\\n\\n')\n webfile.write('\\n\\n')\n webfile.close()\n\n\nprint('Generating Logfile...')\ninitializetemps()\nlatestreading()\naveragetemp()\nlastfive()\nshowmax()\nwriteout()\ngenweb()\nprint('Logfile Generation Complete.')\n","repo_name":"sudoecho/templogger","sub_path":"templogoutput.py","file_name":"templogoutput.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26781676070","text":"import random\n\nMAX_GUESSES = 5\nSTART, END = 1, 20\n\n\ndef get_random_number():\n \"\"\"Get a random number between START and END, returns int\"\"\"\n return random.randint(START, END)\n\n\nclass Game:\n \"\"\"Number guess class, make it callable to initiate game\"\"\"\n\n def __init__(self):\n \"\"\"Init _guesses, _answer, _win to set(), get_random_number(), False\"\"\"\n self._guesses = set()\n self._answer = get_random_number()\n self._win = False\n\n def guess(self):\n \"\"\"Ask user for input, convert to int, raise ValueError outputting\n the following errors when applicable:\n 'Please enter a number'\n 'Should be a number'\n 'Number not in range'\n 'Already guessed'\n If all good, return the int\"\"\"\n guess = input()\n try:\n guess_int = int(guess)\n except:\n raise ValueError(\"Please enter a number\")\n\n if guess_int in self._guesses:\n raise ValueError(\"Already guessed\")\n elif guess_int < 1 or guess_int > 20:\n raise ValueError(\"Number not in range\")\n else:\n self._guesses.add(guess_int)\n\n return guess_int\n\n def _validate_guess(self, guess):\n \"\"\"Verify if guess is correct, print the following when applicable:\n {guess} is correct!\n {guess} is too low\n {guess} is too high\n Return a boolean\"\"\"\n if guess > self._answer:\n print(f\"{guess} is too high\")\n elif guess < self._answer:\n print(f\"{guess} is too low\")\n else:\n print(f\"{guess} is correct!\")\n return True\n return False\n\n def __call__(self):\n \"\"\"Entry point / game loop, use a loop break/continue,\n see the tests for the exact win/lose messaging\"\"\"\n guess_count = 0\n while not self._win and guess_count < 5:\n try:\n guess = self.guess()\n except ValueError as e:\n print(e.args[0])\n continue\n\n if self._validate_guess(guess):\n self._win = True\n print(f\"It took you {guess_count+ 1} guesses\")\n guess_count += 1\n\n if not self._win:\n print(f\"Guessed 5 times, answer was {self._answer}\")\n ...\n\n\nif __name__ == \"__main__\":\n game = Game()\n game()\n","repo_name":"kaysagoe/pybites","sub_path":"42/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34366690592","text":"\"\"\"Run a `runjob` action.\"\"\"\r\n\r\n\r\nimport pyntelope\r\n\r\n\r\ndata = [\r\n pyntelope.Data(\r\n name=\"worker\",\r\n value=pyntelope.types.Name(\"open.facings\"),\r\n ),\r\n pyntelope.Data(\r\n name=\"nonce\",\r\n value=pyntelope.types.Uint64(123),\r\n ),\r\n]\r\n\r\nauth = pyntelope.Authorization(actor=\"youraccount\", permission=\"active\")\r\n\r\naction = pyntelope.Action(\r\n account=\"open.facings\",\r\n name=\"runjobs\",\r\n data=data,\r\n authorization=[auth],\r\n)\r\n\r\nraw_transaction = pyntelope.Transaction(actions=[action])\r\n\r\nnet = pyntelope.WaxTestnet()\r\nlinked_transaction = raw_transaction.link(net=net)\r\n\r\nkey = \"a_very_secret_key\"\r\nsigned_transaction = linked_transaction.sign(key=key)\r\n\r\nresp = signed_transaction.send()\r\n","repo_name":"FACINGS/pyntelope","sub_path":"examples/run_a_simple_action.py","file_name":"run_a_simple_action.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"}
+{"seq_id":"17827435405","text":"import os\nfrom os.path import join as opj\nimport argparse\nimport time\nfrom datetime import datetime\n\nimport cv2\nimport torch\nimport torch.distributed as dist\n\nfrom datasets.dataloader import get_dataloader, get_test_dataloader\nfrom utils.util import *\nfrom models.cyclegan import AttentionCycleGAN\n\n\ndef build_args(is_test=False):\n parser = argparse.ArgumentParser()\n\n #### dataset ####\n parser.add_argument(\"--data_root_dir\", type=str, default=\"/home/data/\")\n parser.add_argument(\"--data_name\", type=str, default=\"horse2zebra\")\n parser.add_argument(\"--in_ch\", type=int, default=3)\n parser.add_argument(\"--out_ch\", type=int, default=3)\n parser.add_argument(\"--use_crop_A\", type=bool, default=False)\n parser.add_argument(\"--use_crop_B\", type=bool, default=False)\n parser.add_argument(\"--resize_type\", type=str, default=\"scale_height\", choices=[\"resize\", \"scale_height\"])\n parser.add_argument(\"--resize_H\", type=int, default=1080, help=\"scale 또는 crop있을 때 resize할때 사이즈\")\n parser.add_argument(\"--resize_W\", type=int, default=1920)\n parser.add_argument(\"--img_H\", type=int, default=512)\n parser.add_argument(\"--img_W\", type=int, default=512, help=\"crop할때 이미지 사이즈. crop안하면 이거로 resize\")\n \n #### model ####\n parser.add_argument(\"--G_attn_A_name\", type=str, default=\"basic_attn\")\n parser.add_argument(\"--G_attn_B_name\", type=str, default=\"basic_attn\")\n parser.add_argument(\"--G_AB_name\", type=str, default=\"res_9blks\")\n parser.add_argument(\"--G_BA_name\", type=str, default=\"res_9blks\")\n parser.add_argument(\"--D_AB_name\", type=str, default=\"basic\")\n parser.add_argument(\"--D_BA_name\", type=str, default=\"basic\")\n\n #### train ####\n parser.add_argument(\"--batch_size\", type=int, default=1)\n parser.add_argument(\"--n_workers\", type=int, default=4)\n parser.add_argument(\"--start_epoch\", type=int, default=1)\n parser.add_argument(\"--n_epochs\", type=int, default=10000)\n parser.add_argument(\"--linearlr_epochs\", type=int, default=50, help=\"linear decay ratio for linear lr scheduler\")\n parser.add_argument(\"--target_real_label\", type=float, default=1.0)\n parser.add_argument(\"--target_gene_label\", type=float, default=0.0)\n parser.add_argument(\"--G_lr\", type=float, default=2e-4)\n parser.add_argument(\"--D_lr\", type=float, default=2e-4)\n parser.add_argument(\"--G_betas\", type=tuple, default=(0.5, 0.999))\n parser.add_argument(\"--D_betas\", type=tuple, default=(0.5, 0.999))\n parser.add_argument(\"--gan_loss_name\", type=str, default=\"lsgan\", choices=[\"lsgan\", \"wgangp\", \"vanilla\"])\n parser.add_argument(\"--lr_scheduler\", type=str, default=\"linear\", choices=[\"linear\", \"step\", \"plateau\", \"cosine\"])\n parser.add_argument(\"--lambda_ID\", type=float, default=0.5)\n parser.add_argument(\"--lambda_A\", type=float, default=10.0)\n parser.add_argument(\"--lambda_B\", type=float, default=10.0)\n parser.add_argument(\"--pool_size\", type=int, default=50)\n parser.add_argument(\"--no_vgg\", action=\"store_true\")\n parser.add_argument(\"--attn_thres\", type=float, default=0.1)\n parser.add_argument(\"--use_mask_for_D\", type=bool, default=False, help=\"True이면 논문의 equation 7을 사용한다. 즉, D에 들어갈때 mask를 적용해서 들어간다. 그런데 thresh는 적용이 안됨.\") \n parser.add_argument(\"--stop_attn_learning_epoch\", type=int, default=30, help=\"이 에폭이후로 attn은 학습 안됨.\")\n\n #### save ####\n parser.add_argument(\"--no_save\", action=\"store_true\")\n parser.add_argument(\"--save_root_dir\", type=str, default=\"/media/data1/jeonghokim/VFP290K_GAN/save/cyclegan_attention\")\n parser.add_argument(\"--save_name\", type=str, default=f\"{datetime.now().strftime('%Y%m%d')}\")\n parser.add_argument(\"--log_save_iter_freq\", type=int, default=100)\n parser.add_argument(\"--img_save_iter_freq\", type=int, default=100)\n parser.add_argument(\"--model_save_iter_freq\", type=int, default=500)\n parser.add_argument(\"--n_save_images\", type=int, default=8)\n\n #### config ####\n parser.add_argument(\"--use_DDP\", type=bool, default=False)\n args = parser.parse_args()\n args.is_test = is_test\n if is_test:\n args.use_DDP = False\n args.no_save = True\n if args.use_DDP: args.local_rank = int(os.environ[\"LOCAL_RANK\"])\n else: args.local_rank = 0\n args.save_dir = opj(args.save_root_dir, args.save_name)\n args.img_save_dir = opj(args.save_dir, \"save_images\")\n args.model_save_dir = opj(args.save_dir, \"save_models\")\n args.log_path = opj(args.save_dir, \"log.txt\")\n args.config_path = opj(args.save_dir, \"config.json\")\n if not args.no_save:\n os.makedirs(args.img_save_dir, exist_ok=True)\n os.makedirs(args.model_save_dir, exist_ok=True)\n os.makedirs(opj(args.img_save_dir, \"A2B\"), exist_ok=True)\n os.makedirs(opj(args.img_save_dir, \"B2A\"), exist_ok=True)\n return args\n\ndef main_worker(args, logger):\n train_loader, valid_loader = get_dataloader(args)\n args.total_iter = args.n_epochs * len(train_loader)\n logger.write(f\"[Train] # of imgs A : {train_loader.dataset.n_A}, # of imgs B : {train_loader.dataset.n_B}\")\n logger.write(f\"[Valid] # of imgs A : {valid_loader.dataset.n_A}, # of imgs B : {valid_loader.dataset.n_B}\")\n logger.write(f\"1 epoch = {len(train_loader)} iters\")\n model = AttentionCycleGAN(args)\n cur_iter = 1\n start_time = time.time()\n for epoch in range(args.start_epoch, args.n_epochs+1):\n loss_G_meter = AverageMeter()\n loss_D_meter = AverageMeter()\n for data in train_loader:\n img_A = data['img_A'].cuda(args.local_rank)\n img_B = data[\"img_B\"].cuda(args.local_rank)\n model.set_input(img_A, img_B)\n model.train(epoch)\n\n BS = img_A.shape[0]\n loss_G_meter.update(model.loss_G.item(), BS)\n loss_D_meter.update(model.loss_D.item(), BS)\n if cur_iter % args.log_save_iter_freq == 0:\n msg = f\"[iter - {cur_iter}/{args.total_iter}]_[time - {time.time() - start_time:.2f}sec]_[loss G - {loss_G_meter.avg:.4f}]_[loss D - {loss_D_meter.avg:.4f}]\"\n logger.write(msg)\n if cur_iter % args.img_save_iter_freq <= args.n_save_images:\n real_A_img = tensor2img(img_A)\n real_B_img = tensor2img(img_B)\n gene_A_img = tensor2img(model.gene_A)\n gene_B_img = tensor2img(model.gene_B)\n attn_A_img = tensor2img(model.attn_A_viz)\n attn_B_img = tensor2img(model.attn_B_viz)\n A2B_to_path = opj(args.img_save_dir, \"A2B\", f\"{cur_iter}_{cur_iter % args.img_save_iter_freq}.png\")\n A2B_save_img = np.concatenate([real_A_img, real_B_img, gene_B_img, attn_A_img], axis=1)\n if args.local_rank == 0:\n cv2.imwrite(A2B_to_path, A2B_save_img[:,:,::-1])\n \n B2A_to_path = opj(args.img_save_dir, \"B2A\", f\"{cur_iter}_{cur_iter % args.img_save_iter_freq}.png\")\n B2A_save_img = np.concatenate([real_B_img, real_A_img, gene_A_img, attn_B_img], axis=1)\n if args.local_rank == 0:\n cv2.imwrite(B2A_to_path, B2A_save_img[:,:,::-1])\n \n if cur_iter % args.model_save_iter_freq == 0:\n to_path = opj(args.model_save_dir, f\"[iter - {cur_iter}].pth\")\n model.save(to_path)\n cur_iter += 1\n model.scheduler_G.step()\n model.scheduler_D.step()\n G_lr_val = get_lr(model.optimizer_G)\n D_lr_val = get_lr(model.optimizer_D)\n msg = f\"[Epoch - {epoch}/{args.n_epochs}]_[time - {time.time() - start_time:.2f}sec]_[loss G - {loss_G_meter.avg:.4f}]_[loss D - {loss_D_meter.avg:.4f}]_[G lr - {G_lr_val}]_[D lr - {D_lr_val}]\"\n logger.write(msg)\n \n\nif __name__ == \"__main__\":\n args = build_args()\n logger = Logger(args.local_rank)\n logger.open(\"asd.txt\")\n print_args(args, logger)\n save_args(args, args.config_path)\n if args.use_DDP:\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(backend=\"nccl\")\n main_worker(args, logger)\n \n","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/Attention_CycleGAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33551585682","text":"from pndni import forceqform\nimport nibabel\nimport numpy as np\nimport pytest\n\n\ndef test_ang():\n x = np.array([1.0, 0.0, 0.0])\n y = np.array([0.0, 2.0, 0.0])\n z = np.array([0.0, 0.0, 3.0])\n for i in [x, y, z]:\n for j in [x, y, z]:\n if i is j:\n assert np.allclose(forceqform._ang(i, j), np.pi / 2)\n else:\n assert np.allclose(forceqform._ang(i, j), 0)\n\n\ndef test_check_ang():\n R1 = np.array([[1.0, -2.0, 0.0],\n [1.0, 2.0, 0.0],\n [0.0, 0.0, 3.0]])\n forceqform._check_ang(R1, 1e-6)\n R2 = np.array([[1.0, 0.0, 0.0],\n [1.0, 2.0, 0.0],\n [0.0, 0.0, 3.0]])\n with pytest.raises(RuntimeError):\n forceqform._check_ang(R2, 1e-6)\n with pytest.raises(RuntimeError):\n forceqform._check_ang(R2, np.pi / 4 - 0.01)\n forceqform._check_ang(R2, np.pi / 4 + 0.01)\n\n\n@pytest.mark.parametrize('testtype,shear,maxang', [('qform', False, None),\n ('sform', False, None),\n ('both', False, None),\n ('none', False, None),\n ('sform', True, None),\n ('sform', True, np.pi)])\ndef test_forceqform(tmp_path, testtype, shear, maxang):\n i1 = tmp_path / 'image1.nii'\n affine = np.array([[1.0, 0.0, 0.0, -20.0],\n [0.0, 2.0, 0.0, -30.0],\n [0.0, 0.0, 4.0, -40.0],\n [0.0, 0.0, 0.0, 1.0]])\n if shear:\n affine[0, 2] = 2.0\n img = np.arange(24).reshape(2, 3, 4)\n nii = nibabel.Nifti1Image(img, None)\n\n if testtype == 'qform':\n nii.set_qform(affine)\n elif testtype == 'sform':\n nii.set_sform(affine)\n elif testtype == 'both':\n nii.set_qform(affine)\n nii.set_sform(affine * 2)\n elif testtype == 'none':\n pass\n else:\n raise RuntimeError()\n nii.to_filename(str(i1))\n parser = forceqform.get_parser()\n toparse = [str(i1), str(tmp_path / 'out.nii')]\n if maxang is not None:\n toparse.extend(['--maxangle', str(maxang)])\n args = parser.parse_args(toparse)\n if testtype != 'none' and not (shear and maxang is None):\n forceqform.forceqform(args.input_file, args.output_file, maxangle=args.maxangle)\n else:\n with pytest.raises(RuntimeError):\n forceqform.forceqform(args.input_file, args.output_file, maxangle=args.maxangle)\n return\n nout = nibabel.load(str(args.output_file))\n if maxang is None:\n assert np.all(nout.affine == affine)\n assert np.all(nout.get_qform() == affine)\n assert nout.get_sform(coded=True)[1] == 0\n","repo_name":"pndni/pndni_utils","sub_path":"tests/test_forceqform.py","file_name":"test_forceqform.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"3912065472","text":"import pprint\nimport pygame\nimport time\nimport queue\n\nclass PS4Controller(object):\n \"\"\"Class representing the PS4 controller. Pretty straightforward functionality.\"\"\"\n\n controller = None\n axis_data = None\n button_data = None\n hat_data = None\n mailbox = None\n debug = 0\n\n def init(self, mailbox):\n \"\"\"Initialize the joystick components\"\"\"\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.mailbox = mailbox\n\n def listen(self):\n \"\"\"Listen for events to happen\"\"\"\n \n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n time.sleep(.1)\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n if event.button == 0:\n self.mailbox.put(\"square\")\n elif event.button == 1:\n self.mailbox.put(\"cross\")\n elif event.button == 2:\n self.mailbox.put(\"circle\")\n elif event.button == 3:\n self.mailbox.put(\"triangle\")\n elif event.button == 4:\n self.mailbox.put(\"L1\")\n elif event.button == 5:\n self.mailbox.put(\"R1\")\n elif event.button == 8:\n self.mailbox.put(\"select\")\n elif event.button == 9:\n self.mailbox.put(\"start\")\n #0: square\n #1: cross\n #2: circle\n #3: triangle\n #4: L1\n #5: R1\n #8: Share\n #9: Options\n if self.debug == 1: print(event.button)\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\nif __name__ == \"__main__\":\n ps4 = PS4Controller()\n ps4.init(queue.Queue())\n ps4.debug = 1\n ps4.listen()\n","repo_name":"dhruvm96/SocialDrones","sub_path":"cfcli/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31066122461","text":"with open(r\"D:\\New folder\\files\\New folder\\Python\\Day 13\\iris.csv\", \"r\") as iris_file:\r\n iris_data = iris_file.readlines()\r\n print(iris_data)\r\n\r\nirises = []\r\n\r\nfor row in iris_data[1:]:\r\n sepal_length, sepal_width, petal_length, petal_width, species = row.strip().split(\",\")\r\n\r\n irises.append({\r\n \"sepal_length\": sepal_length,\r\n \"sepal_width\": sepal_width,\r\n \"petal_length\": petal_length,\r\n \"petal_width\": petal_width,\r\n \"species\": species\r\n })\r\n\r\n\r\n#alternative way\r\nwith open(\"iris.csv\", \"r\") as iris_file:\r\n iris_data = iris_file.readlines()\r\n\r\nheaders = iris_data[0].strip().split(\",\")\r\nirises = []\r\n\r\nfor row in iris_data[1:]:\r\n iris = row.strip().split(\",\")\r\n iris_dict = dict(zip(headers, iris))\r\n\r\n irises.append(iris_dict)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"imxal/Python","sub_path":"Day 13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33321811259","text":"\"\"\"\nRotina de extração de notícias da aba News do Google a partir de string de busca e dias de início e fim.\n\n\"\"\"\nimport datetime as DT\nimport logging\nimport os\nimport time\nfrom datetime import date\nfrom os import path\n\nimport pandas as pd\nfrom GoogleNews import GoogleNews\n\nfrom noticias_ner import config\nfrom noticias_ner.noticias.google import extrai_noticias_google\n\n\ndef __extrai_noticias_gnews(q, dia_inicio, dia_fim, num_limite_paginas=1, lang='pt-BR', sleep=1, tentativas=5):\n \"\"\"\n Retorna data frame com as notícias obtidas na aba News do Google\n\n Parâmetros\n ----------\n q : str\n String de busca\n\n data_inicio, dta_fim : datatime.Date\n Datas de início e fim para realização da busca\n\n num_limite_num_limite_paginas : int\n Número máxima de páginas que serão obtidas.\n\n lang : str\n Código da lingua para realização da busca (padrão pt-BR)\n\n sleep : int\n Número de segundos para esperar entre tentativas após cada erro de obtenção de página\n\n tentativas : int\n Número de tentativas de obnteção de uma página antes de se considerar a extração concluída\n\n Retorno\n -------\n resultados : DataFrame\n Dataframe com os reulstados de busca\n \"\"\"\n\n # String de busca formatado adequadamente para URL\n # q = urllib.parse.quote(q)\n\n # Strings com as datas no formato esperado pela lib GoogleNews\n formato_data = '%m/%d/%Y'\n dia_inicio_formatado = dia_inicio.strftime(formato_data)\n dia_fim_formatado = dia_fim.strftime(formato_data)\n\n # Instancia interface de busca ao Google News com idioma pt-BR e período adequado\n gn = GoogleNews(lang=lang, start=dia_inicio_formatado, end=dia_fim_formatado)\n\n # Inicializa lista para armazenar os resultados de busca\n resultados = []\n\n # Realiza busca da primeira página\n logger = logging.getLogger('covidata')\n logger.info(f'Buscando página 1')\n gn.search(q)\n resultados = resultados + gn.result()\n gn.clear()\n\n # Para a página 2 em diante (p2 corresponde ao índice 1)\n for i in range(2, num_limite_paginas + 1):\n\n logger.info(f'Buscando página {i}')\n\n # Busca a página\n gn.getpage(i)\n\n # Adiciona reusltado à lista\n resultados = resultados + gn.result()\n\n # Caso a consulta à página não tenha gerado resultados\n if gn.result() == []:\n logger.info(f'A consulta à página {i} não retornou nehnum resultado')\n\n # Diminui o contador de tentaivas\n tentativas = tentativas - 1\n logger.info(f'*** Há {tentativas} restantes ***')\n\n # Caso o número de tentativas tenha chegado a zero, interrompe a execução\n if tentativas < 1:\n break\n\n # Caso contrário\n else:\n # Pausa script por sleep segundos antes de buscar a próxima página\n logger.info(f'Execução interrompida por {sleep} segundos')\n time.sleep(sleep)\n\n # Apaga cache do resultado\n gn.clear()\n\n # Cria e retorna dataframe\n return pd.DataFrame(resultados)\n\n\ndef executar_busca(data_inicial, q):\n dia_inicio = __get_dia_inicio(data_inicial)\n dia_fim = date.today()\n\n # Número limite de páginas\n num_limite_paginas = 100\n\n df_google = extrai_noticias_google(q, dia_inicio)\n\n # Realiza busca\n df_gnews = __extrai_noticias_gnews(q, dia_inicio, dia_fim, num_limite_paginas=num_limite_paginas, sleep=10,\n tentativas=10)\n\n # Salva resultados\n if not path.exists(config.diretorio_dados):\n os.makedirs(config.diretorio_dados)\n\n df = pd.concat([df_google, df_gnews])\n caminho_arquivo_resultante = os.path.join(config.diretorio_dados, f'noticias_n_{len(df)}.xlsx')\n\n df.to_excel(caminho_arquivo_resultante)\n\n return caminho_arquivo_resultante, dia_inicio\n\n\ndef __get_dia_inicio(data_inicial):\n if not data_inicial:\n today = DT.date.today()\n dia_inicio = today - DT.timedelta(days=7)\n else:\n dia_inicio = date.fromisoformat(data_inicial)\n return dia_inicio\n","repo_name":"SecexSaudeTCU/noticias_ner","sub_path":"noticias_ner/noticias/gnews.py","file_name":"gnews.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"pt","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"}
+{"seq_id":"71725173266","text":"\"\"\"\n每个点可以有四种颜色的选择,只要两个相邻点的颜色不同就行\n\n输入:n = 3, paths = [[1,2],[2,3],[3,1]]\n输出:[1,2,3]\n解释:\n花园 1 和 2 花的种类不同。\n花园 2 和 3 花的种类不同。\n花园 3 和 1 花的种类不同。\n因此,[1,2,3] 是一个满足题意的答案。其他满足题意的答案有 [1,2,4]、[1,4,2] 和 [3,2,1]\n\"\"\"\n\n\nclass Solution(object):\n def gardenNoAdj(self, N, paths):\n \"\"\"\n :type N: int\n :type paths: List[List[int]]\n :rtype: List[int]\n \"\"\"\n res = [0] * N\n neigh = [[] for _ in range(N)]\n \n # 每个点的相邻的点都记录起来\n for i, j in paths:\n neigh[i - 1].append(j - 1)\n neigh[j - 1].append(i - 1)\n\n for cur in range(N):\n # 当前i可以选择哪种花色\n flowers = [1, 2, 3, 4]\n for j in neigh[cur]:\n if res[j] in flowers:\n flowers.remove(res[j])\n\n # 反正剩出来的都可以选,随意选\n res[cur] = flowers[0]\n\n return res","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Graph/二分图bipartition(图染色法)/1042m. 不邻接植花(图染色法).py","file_name":"1042m. 不邻接植花(图染色法).py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"70134877586","text":"a = \"\\n\".join([\n \".W.\",\n \".W.\",\n \"...\"\n])\n\na = a.split('/n')\ndef make_2d (str):\n a = str.split('/n')\n new_a = []\n for x in a:\n new_a.append(list(x))\n return new_a\n","repo_name":"mjyrhee9/IB-Comp-Sci-Projects","sub_path":"bfs2d.py","file_name":"bfs2d.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"43517188272","text":"# N행 M열의 표 A가 있고, 표의 각 칸에는 숫자가 하나씩 적혀있다.\r\n#\r\n# 연두는 서로 다른 1개 이상의 칸을 선택하려고 하는데, 행의 번호가 선택한 순서대로 등차수열을 이루고 있어야 하고, 열의 번호도 선택한 순서대로 등차수열을 이루고 있어야 한다. 이렇게 선택한 칸에 적힌 수를 순서대로 이어붙이면 정수를 하나 만들 수 있다.\r\n#\r\n# 연두가 만들 수 있는 정수 중에서 가장 큰 완전 제곱수를 구해보자. 완전 제곱수란 어떤 정수를 제곱한 수이다.\r\n\r\nimport math\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nN, M = map(int, input().split())\r\ntable = [input().strip() for _ in range(N)]\r\nanswer = -1\r\n\r\ndef func(x):\r\n x = int(x)\r\n num = math.sqrt(x)\r\n if int(num) * int(num) == x:\r\n return True\r\n return False\r\n\r\nif N == 1 and M == 1:\r\n result = int(''.join(map(str, table)))\r\n if func(result):\r\n print(result)\r\n else:\r\n print(answer)\r\nelse:\r\n for y in range(N):\r\n for x in range(M):\r\n for dy in range(-N + 1, N):\r\n for dx in range(-M + 1, M):\r\n num = \"\"\r\n current_y = y\r\n current_x = x\r\n if dx == 0 and dy == 0:\r\n continue\r\n while 0 <= current_x < M and 0 <= current_y < N:\r\n num += table[current_y][current_x]\r\n current_x += dx\r\n current_y += dy\r\n if func(num):\r\n answer = max(answer, int(num))\r\n print(answer)","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"1000~1999/1025.py","file_name":"1025.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15582111383","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\n\ndef exponentialRegression(closing):\n x = np.arange(1,len(closing) + 1).reshape((-1, 1))\n y_normalized = np.divide(closing, closing[0])\n y_ln = np.log(y_normalized)\n model = LinearRegression()\n model.fit(x, y_ln)\n scalar = np.exp(model.intercept_) * closing[0]\n base = np.power(np.exp(model.coef_)[0], 252)\n # annualReturn = (base - 1) * 100\n # equation = \"y = {}({})^x\".format(scalar, base,)\n rSquared = model.score(x, y_ln)\n return {\n \"scalar\": scalar,\n \"roi\": ((base - 1) * 100),\n # \"annualReturn\": annualReturn,\n # \"equation\": equation,\n \"r2\": rSquared\n }\n","repo_name":"blakesanie/Stock-Analysis","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"36923199342","text":"import argparse\nimport movie_data\nimport content_recommender\n\n\ndef show_user_id_prompt():\n while True:\n try:\n user_id = int(input(\"Enter the desired user ID: \"))\n return user_id\n except ValueError:\n print(\"Please input integer only...\")\n\n\ndef print_user_profile(user_profile):\n sorted_profile = {k: v for k, v in sorted(user_profile.items(), key=lambda item: item[1], reverse=True)}\n for genre in sorted_profile:\n print('{0}: {1}'.format(genre, sorted_profile[genre]))\n\n\nif __name__ == \"__main__\":\n try:\n # Command line arguments\n # --user set the user_id for which to generate recommendations\n # --strategy strategy to be used in recommendation process\n # --sample set the number of randomly sampled movies used in recommendation process\n # (does not influence user profile creation)\n parser = argparse.ArgumentParser()\n parser.add_argument('--user', dest='user_id', type=int, default=-1,\n help='id of user to generate recommendations for')\n parser.add_argument('--strategy', dest='strategy', type=str, default='all',\n help='strategy to use for recommendations (overlap, popularity, count, all)')\n parser.add_argument('--sample', dest='sample_size', type=int, default=-1,\n help='set the number of randomly sampled movies used in recommendation process (does not '\n 'influence user profile creation)')\n\n args = parser.parse_args()\n # if user_id was not set via cl arguments then prompt user for input\n if args.user_id == -1:\n args.user_id = show_user_id_prompt()\n\n if args.strategy not in ('overlap', 'popularity', 'count', 'all'):\n raise ValueError('Strategy must be in (overlap, popularity, count, all)')\n\n print('Loading data...')\n md = movie_data.MovieData()\n # if sample size > actual movie count\n if md.get_movie_count() < args.sample_size:\n raise ValueError('Sample size must be smaller than user and movie count ({0})'\n .format(md.get_movie_count()))\n\n print('\\nUser has previously rated:')\n print(md.get_rated_movies_df(args.user_id)[['Title', 'Genres']])\n\n # create user profile and print\n recommender = content_recommender.ContentBasedRecommender(data=md, sample_size=args.sample_size)\n user_profile = recommender.get_user_profile(args.user_id)\n print('\\nUSER PROFILE:')\n print_user_profile(user_profile)\n\n print('\\nGenerating recommendations...')\n\n # recommendation strategy 1\n if args.strategy in ('overlap', 'all'):\n print('\\nRECOMMENDATIONS (OVERLAP):')\n df_recommendations = recommender.get_recommendations_overlap(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n # recommendation strategy 2\n if args.strategy in ('popularity', 'all'):\n print('\\nRECOMMENDATIONS (POPULARITY):')\n df_recommendations = recommender.get_recommendations_popularity(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n # recommendation strategy 3\n if args.strategy in ('count', 'all'):\n print('\\nRECOMMENDATIONS (COUNT):')\n df_recommendations = recommender.get_recommendations_count(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n except (ValueError, KeyError) as e:\n print('Error: ' + str(e))\n except:\n print('unknown error occurred')\n","repo_name":"elsantner/recommender-systems-assignments","sub_path":"assignment04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41859303670","text":"from helpers import analytics, primes\nanalytics.monitor()\n\nlimit = int(1e6)\n\ndef main(limit):\n phi = primes.totients(limit)\n return max(range(1,len(phi)), key=lambda i:i/phi[i])\n\nprint(main(limit), analytics.lap(), analytics.maxMem())","repo_name":"Phyisis/Problems","sub_path":"src/1-100/P069.py","file_name":"P069.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"34609675666","text":"from distutils.core import setup\nimport os\n\nreadme_fname = os.path.join(os.path.dirname(__file__), \"README.rst\")\nreadme_text = open(readme_fname).read()\n\nsetup(name=\"ftptool\", version=\"0.7.1\",\n url=\"https://github.com/bloggse/ftptool\",\n description=\"Higher-level interface to ftplib\",\n author=\"Blogg Esse AB\",\n author_email=\"teknik@blogg.se\",\n requires=[\"six\"],\n long_description=readme_text,\n py_modules=[\"ftptool\"])\n","repo_name":"bloggse/ftptool","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"}
+{"seq_id":"5786255722","text":"import json\n\nclass Project:\n def __init__(self, title, description, due_date):\n self.title = title\n self.description = description\n self.due_date = due_date\n self.tasks = []\n\n def add_task(self, task):\n self.tasks.append(task)\n\n def remove_task(self, task):\n self.tasks.remove(task)\n\n def complete_task(self, task):\n task.complete()\n\n def get_completed_tasks(self):\n return [task for task in self.tasks if task.completed]\n\n def get_incomplete_tasks(self):\n return [task for task in self.tasks if not task.completed]\n\n def display_tasks(self):\n print(\"Aktualne zadania w projekcie:\")\n print(\"Tytuł\\tOpis\\tPriorytet\\tStatus\")\n for task in self.tasks:\n completed = \"Zakończone\" if task.completed else \"Nie zakończone\"\n print(f\"{task.title}\\t{task.description}\\t{task.priority}\\t{completed}\")\n print(\"--------------------\")\n\nclass Task:\n def __init__(self, title, description, priority, completed=False):\n self.title = title\n self.description = description\n self.priority = priority\n self.completed = completed\n\n def complete(self):\n self.completed = True\n\n def edit(self, new_title, new_description, new_priority):\n self.title = new_title\n self.description = new_description\n self.priority = new_priority\n\nclass ProjectManager:\n def __init__(self):\n self.projects = []\n\n def create_project(self, title, description, due_date):\n project = Project(title, description, due_date)\n self.projects.append(project)\n return project\n\n def remove_project(self, project):\n self.projects.remove(project)\n\n def save_projects(self, filename):\n data = []\n for project in self.projects:\n project_data = {\n \"title\": project.title,\n \"description\": project.description,\n \"due_date\": project.due_date,\n \"tasks\": [\n {\n \"title\": task.title,\n \"description\": task.description,\n \"priority\": task.priority,\n \"completed\": task.completed\n }\n for task in project.tasks\n ]\n }\n data.append(project_data)\n\n with open(filename, \"w\") as file:\n json.dump(data, file, indent=4)\n\n def load_projects(self, filename):\n with open(filename, \"r\") as file:\n data = json.load(file)\n\n self.projects = []\n for project_data in data:\n project = Project(project_data[\"title\"], project_data[\"description\"], project_data[\"due_date\"])\n for task_data in project_data[\"tasks\"]:\n task = Task(task_data[\"title\"], task_data[\"description\"], task_data[\"priority\"])\n task.completed = task_data[\"completed\"]\n project.add_task(task)\n\n self.projects.append(project)\n\ndef save_tasks(tasks, filename):\n with open(filename, 'w') as file:\n json.dump(tasks, file)\n\ndef load_tasks(filename):\n try:\n with open(filename, 'r') as file:\n return json.load(file)\n except FileNotFoundError:\n return []\n\nproject_manager = ProjectManager()\n\nproject1 = project_manager.create_project(\"Aplikacja do zarządzania zadaniami\", \"Aplikacja webowa do śledzenia zadań\", \"2023-06-30\")\n\ntasks = load_tasks('tasks.json')\nfor task_data in tasks:\n task = Task(task_data['title'], task_data['description'], task_data['priority'], task_data['completed'])\n project1.add_task(task)\n\ndef add_new_task(project):\n title = input(\"Podaj tytuł zadania: \")\n description = input(\"Podaj opis zadania: \")\n priority = input(\"Podaj priorytet zadania: \")\n task = Task(title, description, priority)\n project.add_task(task)\n print(\"Dodano nowe zadanie:\")\n print(\"Tytuł:\", task.title)\n print(\"Opis:\", task.description)\n print(\"Priorytet:\", task.priority)\n print(\"--------------------\")\n\n\nwhile True:\n command = input(\"Wprowadź komendę (add - dodaj nowe zadanie, delete - usuń zadanie, complete - oznacz zadanie jako zakończone, display - wyświetl zadania, quit - wyjście): \")\n if command == \"add\":\n add_new_task(project1)\n elif command == \"delete\":\n task_title = input(\"Podaj tytuł zadania do usunięcia: \")\n for task in project1.tasks:\n if task.title == task_title:\n project1.remove_task(task)\n print(f\"Usunięto zadanie o tytule: {task_title}\")\n break\n else:\n print(\"Zadanie o podanym tytule nie zostało znalezione.\")\n elif command == \"complete\":\n task_title = input(\"Podaj tytuł zadania do oznaczenia jako zakończone: \")\n for task in project1.tasks:\n if task.title == task_title:\n task.complete()\n print(f\"Zadanie o tytule {task_title} zostało oznaczone jako zakończone.\")\n break\n else:\n print(\"Zadanie o podanym tytule nie zostało znalezione.\")\n elif command == \"display\":\n project1.display_tasks()\n elif command == \"quit\":\n\n tasks = []\n for task in project1.tasks:\n task_data = {\n 'title': task.title,\n 'description': task.description,\n 'priority': task.priority,\n 'completed': task.completed\n }\n tasks.append(task_data)\n save_tasks(tasks, 'tasks.json')\n\n print(\"Program zakończony.\")\n break\n","repo_name":"Seppyo/planerProjektow","sub_path":"planerZadań.py","file_name":"planerZadań.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"5006077652","text":"import random\n\n# A stone of mass 0.05 kg is thrown vertically upwards. Give the direction and magnitude of the net force on the pebble,\n#(a) during its upward motion,\n#(b) during its downward motion,\n#(c) at the highest point where it is momentarily at rest. \n# Do your answers change if the pebble was thrown at an angle of 45° with the horizontal direction\n\n# Give the magnitude and direction of the net force acting on a stone of mass 0.1 kg,\n#(a) just after it is dropped from the window of a stationary train,\n#(b) just after it is dropped from the window of a train running at a constant velocity of 36 km/ h,\n#(c) just after it is dropped from the window of a train accelerating with 1 ms-2,\n#(d) lying on the floor of a train which is accelerating with 1 m s~2, the stone being at rest relative to the train.\n\n# A rocket with a lift-off mass 20,000 kg is blasted upwards with an initial acceleration of 5.0 ms-2. Calculate the initial thrust (force) of the blast.\n\nqns = open('./questions.txt', 'w') \nans = open('./answers.txt','w')\nno_of_samples = 2500000\n# no_of_samples = 30\n\nfor i in range(no_of_samples):\n type = random.randint(1,5)\n if type == 1 or type == 3:\n m = random.randint(1,200)\n v = random.randint(1,200)\n angle = random.randint(0,90)\n q = \"A stone of mass \"+str(m)+\" kg is thrown vertically upwards with a velocity of \"+str(v)+\" ms-1. Give the direction and magnitude of the net force on the stone,\"\n t2 = random.randint(1,3)\n if t2 == 1:\n q = q + \" during its upward motion, \"\n elif t2 == 2:\n q = q + \" during its downward motion, \"\n else:\n q = q + \" at the highest point where it is momentarily at rest, \"\n q = q + \" do the answer change if the stone was thrown at an angle of \"+str(angle)+\" degree with the horizontal direction?\\n\"\n a = str(m*10)+\" newton, no answer does not change in 2nd case also.\\n\"\n elif type == 2 or type == 4:\n m = random.randint(10,2000)\n q = \"Give the magnitude and direction of the net force acting on a stone of mass \"+str(m)+\" g,\"\n a = str(round(m/100,1))+\" newton, vertically downwards\\n\"\n t2 = random.randint(1,5)\n if t2 == 1:\n l = random.randint(100,2000)\n q = q + \" just after it is dropped from the window of a stationary train of length \"+str(l)+\" m?\\n\"\n elif t2 == 2:\n v = random.randint(1,2000)\n q = q + \" just after it is dropped from the window of a train running at a constant velocity of \"+str(v)+\" ms-1?\\n\"\n elif t2 == 3:\n acc = random.randint(1,2000)\n q = q + \" just after it is dropped from the window of a train accelerating with \"+str(acc)+\" ms-2?\\n\"\n else:\n acc = random.randint(10,2000)\n q = q + \" lying on the floor of a train which is accelerating with \"+str(acc)+\" ms-2, the stone being at rest relative to the train?\\n\"\n a = str(round((m*acc)/1000,1)) + \" newton, along the direction of motion of train\\n\"\n else:\n m = random.randint(1000,20000)\n acc = random.randint(20,300)\n q = \"A rocket with a lift-off mass \"+str(m)+\" kg is blasted upwards with an initial acceleration of \"+str(acc)+\" ms-2. Calculate the initial thrust (force) of the blast.\\n\"\n a = str(m*a) + \" newton\\n\"\n qns.write(q)\n ans.write(a)\n # print(q)\n # print(a)\nqns.close()\nans.close()","repo_name":"misterpawan/scimat2","sub_path":"science/LawsOfMotion/Force/Force.py","file_name":"Force.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"}
+{"seq_id":"8579654026","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import Personaje\nfrom .serializers import PersonajeSerializer\n\n\nclass PersonajeList(APIView):\n def get(self, request):\n personajes = Personaje.objects.all()\n serializer = PersonajeSerializer(personajes, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = PersonajeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PersonajeDetail(APIView):\n def get_object(self, pk):\n try:\n return Personaje.objects.get(pk=pk)\n except Personaje.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n snippet = self.get_object(pk)\n serializer = PersonajeSerializer(snippet)\n return Response(serializer.data)\n\n def put(self, request, pk):\n snippet = self.get_object(pk)\n serializer = PersonajeSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n snippet = self.get_object(pk)\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"RenatoPeG/CF_Server","sub_path":"cholofighter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"7814827767","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\ndr = [-1, 1, 0, 0]\ndc = [0, 0, -1, 1]\n\n\ndef gravity(arr):\n for c in range(W):\n s = []\n for r in range(H):\n if arr[r][c]:\n s.append(arr[r][c])\n arr[r][c] = 0\n h = H-1\n while s:\n arr[h][c] = s.pop()\n h -= 1\n\n\ndef shot(row, col, arr1):\n stack = [(row, col)]\n while stack:\n r, c = stack.pop()\n if arr1[r][c] > 1:\n for i in range(4):\n nr, nc = r + dr[i], c + dc[i]\n for j in range(arr1[r][c] - 1):\n if 0 <= nr < H and 0 <= nc < W:\n if arr1[nr][nc] > 1:\n stack.append((nr, nc))\n else:\n arr1[nr][nc] = 0\n nr, nc = nr + dr[i], nc + dc[i]\n else:\n break\n arr1[r][c] = 0\n gravity(arr1)\n\ndef remains(arr):\n cnt = 0\n for c in range(W):\n for r in range(H-1, -1, -1):\n if not arr[r][c]:\n break\n cnt += 1\n return cnt\n\ndef dfs(idx, arr):\n global bricks\n if not bricks:\n return\n if idx == N:\n res = remains(arr)\n if res < bricks:\n bricks = res\n return\n\n for w in range(W):\n arr1 = [list(arr[_]) for _ in range(H)]\n for h in range(H):\n if arr1[h][w]:\n shot(h, w, arr1)\n if not remains(arr1):\n bricks = 0\n return\n dfs(idx+1, arr1)\n break\n\nfor tc in range(1, T+1):\n N, W, H = map(int, input().split())\n matrix = [list(map(int, input().split())) for i in range(H)]\n bricks = 987654321\n dfs(0, matrix)\n print('#{} {}'.format(tc, bricks))\n\n","repo_name":"asooso1/ssafy_algorithm","sub_path":"1012/박근석/5656_벽돌_깨기/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34536305847","text":"# Sort it first on the basis of age then on the basis of salary.\nimport pandas as pd\n\na = [{'name': 'a', 'salary': 20000, 'age': 25},\n {'name': 'b', 'salary': 25000, 'age': 23},\n {'name': 'c', 'salary': 34000, 'age': 25},\n {'name': 'd', 'salary': 13000, 'age': 30}]\n\ndf1 = pd.DataFrame(a)\nprint(df1.sort_values(by='age'))\nprint(df1.sort_values(by='salary'))","repo_name":"PiyushBadule/Python_Codes","sub_path":"Sort_on_basis_of_objects.py","file_name":"Sort_on_basis_of_objects.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15201476304","text":"from time import time\nfrom random import choice as ch\nimport tkinter as tk\n\nclass App:\n def __init__(self):\n with open('../text.txt', 'r', encoding='UTF-8') as f:\n self.text = ch(f.readlines())\n for chars in [['ё', 'е'], ['Ё', 'е'], ['«', '\"'], ['»', '\"']]:\n self.text.replace(chars[0], chars[1])\n\n self.startTime = self.endTime = self.start = self.wrong = self.characters = self.words = self.cpm = self.wpm = self.accuracy = self.cpmWpm = 0\n\n\n self.root = tk.Tk()\n root = self.root\n\n self.main_frm = tk.Frame(root, bg='#0d0124', width=550, height=350)\n main_frm = self.main_frm\n self.frm = tk.Frame(root, bg='#0b1f07', width=550, height=350)\n frm = self.frm\n\n self.lbl = tk.Label(main_frm, text='Начните печатать текст, чтобы начать', font=('Comic', 20), fg='green',\n bg='#0d0124')\n self.ent = tk.Entry(main_frm, textvariable=True, font=('Arial', 20), width=25, state='normal', fg='purple',\n bg='#0d0124')\n self.ent.insert(0, self.text)\n self.btn = tk.Button(main_frm, command=self.cpm_wpm, text='Завершить', font=('Times New Roman', 15), fg='green',\n bg='#0d0124')\n self.lbl_end = tk.Label(frm,\n text=f'Твой результат: \\ncpm: {self.cpm}\\nwpm: {self.wpm}\\nточность: {self.accuracy}%',\n font=('Sans', 30), fg='purple', bg='#0b1f07', width=22)\n self.again = tk.Button(frm, command=self.Again, text='Еще раз', font=('Times New Roman', 15), fg='yellow',\n bg='#0b1f07', width=7)\n self.quit = tk.Button(frm, command=self.Quit, text='Выйти', font=('Times New Roman', 15), fg='red',\n bg='#0b1f07', width=7)\n\n\n def Window(self):\n root = self.root\n root['bg'] = 'black'\n root.title('cpm_wpm')\n #root.geometry('550x350')\n #root.resizable(width=False, height=False)\n root.bind('', self.keyboard)\n\n root.mainloop()\n\n def Main_Frame(self):\n self.lbl.pack(padx=10, pady=50)\n self.ent.pack(padx=10, pady=10)\n self.btn.pack(pady=50)\n self.main_frm.grid()\n\n def Frame(self):\n frm = self.frm\n self.lbl_end.pack(pady=10)\n self.again.pack(pady=10)\n self.quit.pack(pady=10)\n frm.grid()\n\n def Text(self):\n with open('../text.txt', 'r', encoding='UTF-8') as f:\n self.text = ch(f.readlines())\n for chars in [['ё', 'е'], ['Ё', 'е'], ['«', '\"'], ['»', '\"']]:\n self.text.replace(chars[0], chars[1])\n self.ent.delete(0, 'end')\n self.ent.insert(0, self.text)\n\n def Again(self):\n self.startTime = self.endTime = self.start = self.wrong = self.characters = self.words = self.cpm = self.wpm = self.accuracy = self.cpmWpm = 0\n\n self.frm.grid_forget()\n self.main_frm.grid()\n\n self.Text()\n\n self.cpmWpm = 0\n\n def Quit(self):\n self.root.quit()\n\n def cpm_wpm(self):\n self.endTime = round(time())\n endTime = self.endTime\n startTime = self.startTime\n\n root = self.root\n\n\n inputTime = (endTime - startTime)\n\n characters = self.characters\n words = self.words\n wrong = self.wrong\n\n\n minLen = min(characters, len(self.text))\n try:\n cpm = round(characters / inputTime * 60)\n wpm = round(words / inputTime * 60)\n accuracy = 100 - round(wrong / minLen * 100)\n except ZeroDivisionError:\n cpm = 0\n wpm = 0\n accuracy = 0\n\n\n self.main_frm.grid_forget()\n self.cpm, self.wpm, self.accuracy = cpm, wpm, accuracy\n self.lbl_end.configure(text=f'Твой результат: \\ncpm: {self.cpm}\\nwpm: {self.wpm}\\nточность: {self.accuracy}%')\n self.Frame()\n\n self.cpmWpm = 1\n\n def keyboard(self, event):\n root = self.root\n if event.keysym == 'Escape':\n root.quit()\n elif event.keysym == 'Return':\n if self.cpmWpm == 0:\n self.cpmWpm = 1\n self.cpm_wpm()\n else:\n self.Again()\n try:\n if len(self.ent.get()) == 0:\n self.cpm_wpm()\n except:\n pass\n else:\n if event.char == self.ent.get()[0]:\n if self.start == 0:\n self.startTime = round(time())\n self.start = 1\n self.characters += 1\n if event.char == ' ':\n self.words += 1\n self.ent.delete(0, 1)\n else:\n if self.start == 1:\n self.wrong += 1\n\n def main(self):\n self.Main_Frame()\n self.Window()\n\n\nif __name__ == '__main__':\n App().main()\n","repo_name":"Kuso0taku/cpm_wpm","sub_path":"GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"20231254447","text":"import string\nimport re\n# Part 1\nwith open('day3-input.txt', 'r') as f:\n rucksacks = f.readlines()\n\nvalue_string = '{}{}'.format(string.ascii_lowercase, string.ascii_uppercase)\n\nsum = 0\nfor r in [r.strip() for r in rucksacks]:\n compartment1 = r[0:int((len(r)+1)/2)]\n compartment2 = r[int((len(r)+1)/2):]\n share = list(set(compartment1).intersection(compartment2))\n sum += value_string.index(share[0]) + 1 # index starts at 0\nprint(sum)\n\n# Part 2\nsum = 0\nwith open('day3-input.txt', 'r') as f:\n rucksacks = [r.strip() for r in f.readlines()]\n for i in range(0, len(rucksacks), 3):\n sacks = rucksacks[i:i+3]\n share = list(set(sacks[0]).intersection(sacks[1]).intersection(sacks[2]))\n #print(share)\n sum += value_string.index(share[0]) + 1 # index starts at 0\nprint(sum)","repo_name":"knastase/AoC2022","sub_path":"day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"5287271416","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\nG=nx.read_adjlist(\"graphe1.txt\", create_using=nx.DiGraph())\r\n\r\ndef AffichageGraphe(graphe):\r\n pos = nx.planar_layout(graphe)\r\n\r\n nx.draw_networkx_nodes(graphe, pos)\r\n nx.draw_networkx_edges(graphe, pos)\r\n nx.draw_networkx_labels(graphe, pos)\r\n\r\n\r\n print(f\"Ce graphe contient {graphe.number_of_nodes()} sommets et {graphe.number_of_edges()} arêtes.\")\r\n plt.show()\r\n\r\ndef arcs_arrivant(graphe):\r\n liste_arrivant = []\r\n\r\n for s in graphe.edges:\r\n if s[1] == \"2\":\r\n liste_arrivant.append(s)\r\n return liste_arrivant\r\n\r\ndef arcs_arrivantV2(graphe):\r\n liste_arrivant = []\r\n\r\n for s in graphe.predecessors(\"2\"):\r\n arc = (s, \"2\")\r\n liste_arrivant.append(arc)\r\n return liste_arrivant\r\n\r\nprint(list(G.successors(\"2\")))\r\nprint(arcs_arrivantV2(G))\r\nAffichageGraphe(G)\r\n","repo_name":"AlexandroAR/SAE3.02","sub_path":"TP/lecture_adj.py","file_name":"lecture_adj.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"5863517980","text":"from twisted.internet import defer\nfrom player import Player\n\nclass Server:\n\t\"\"\" this represents a bf3 server \"\"\"\n\n\tdef __init__(self, rcon):\n\t\tself.teams = {}\n\t\tself.players = {}\n\t\tself.rcon = rcon\n\t\n\tdef addPlayer(self, name, guid):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\treturn self.players[lname]\n\t\tph = Player(name, guid)\n\t\tself.players[ph.lname] = ph\n\t\treturn ph\n\t\n\tdef delPlayer(self, name):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\tph = self.players[lname]\n\t\t\tph.finalize()\n\t\t\tdel self.players[lname]\n\t\t\tdel ph\n\t\n\t@defer.inlineCallbacks\n\tdef getPlayer(self, name):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\tdefer.returnValue(self.players[lname])\n\t\t\treturn\n\t\t### player not found, so let's create him\n\t\tpl = yield self.rcon.admin_listOnePlayer(name)\n\t\tph = self.addPlayer(pl['name'], pl['guid'])\n\t\tdefer.returnValue(ph)\n\t\treturn\n\t\n\n","repo_name":"ragzilla/txfbrcon","sub_path":"serverstate/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"26961951943","text":"# -*- coding: utf-8 -*-\nfrom conf import *\nfrom tool.utils import OSUtils\n\nos_utils = OSUtils()\n\n\ndef sync_common_data_files():\n if not os_utils.directory_exists(COMMON_DIR):\n raise ValueError(\"common directory not existed!\")\n if not os_utils.directory_exists(SERVER_DIR):\n raise ValueError(\"server directory not existed!\")\n if not os_utils.directory_exists(CLIENT_DATA_DIR):\n raise ValueError(\"client data directory not existed!\")\n for root_dir, _, file_names in os_utils.walk(COMMON_DIR):\n for filename in file_names:\n source = os_utils.joinpath(root_dir, filename)\n server_dist = os_utils.joinpath(SERVER_DIR, filename)\n client_dist = os_utils.joinpath(CLIENT_DATA_DIR, filename)\n os_utils.copy(source, server_dist)\n os_utils.copy(source, client_dist)\n print(\"copy file %s to server/client ok.\" % filename)\n\n\ndef package_client():\n if not os_utils.directory_exists(CLIENT_DIR):\n raise ValueError(\"client directory not existed!\")\n with os_utils.open_zip(UPDATE_FILE, \"w\",\n os_utils.ZIP_DEFLATED) as zipped:\n prefix_len = len(CLIENT_DIR) + 1\n for root, _, file_names in os_utils.walk(CLIENT_DIR):\n for filename in file_names:\n full_path = os_utils.joinpath(root, filename)\n zip_path = full_path[prefix_len:]\n zipped.write(full_path, zip_path)\n print(\"package client to data.7z\")\n\n\nif __name__ == '__main__':\n sync_common_data_files()\n package_client()\n","repo_name":"dwdw520533/mhzx","sub_path":"tool/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"6511840186","text":"import os\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom elections.constants import (\n PEOPLE_FOR_BALLOT_KEY_FMT,\n POLLING_STATIONS_KEY_FMT,\n POSTCODE_TO_BALLOT_KEY_FMT,\n)\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--full\",\n action=\"store_true\",\n dest=\"full\",\n default=False,\n help=\"Import all data, not just people\",\n )\n\n def handle(self, **options):\n if options[\"full\"]:\n commands = [\n (\"import_parties\",),\n (\"import_ballots\",),\n (\"import_people\",),\n ]\n else:\n commands = [(\"import_people\", \"--recently-updated\")]\n\n for command in commands:\n print(\" \".join(command))\n call_command(*command)\n\n # Delete the cache on a full import\n if options[\"full\"] and hasattr(cache, \"delete_pattern\"):\n for fmt in (\n POLLING_STATIONS_KEY_FMT,\n POSTCODE_TO_BALLOT_KEY_FMT,\n PEOPLE_FOR_BALLOT_KEY_FMT,\n ):\n cache.delete_pattern(fmt.format(\"*\"))\n\n # Unset dirty file if it exists\n if getattr(settings, \"CHECK_HOST_DIRTY\", False):\n dirty_file_path = os.path.expanduser(\n getattr(settings, \"DIRTY_FILE_PATH\")\n )\n\n if os.path.exists(dirty_file_path):\n os.remove(dirty_file_path)\n","repo_name":"DemocracyClub/WhoCanIVoteFor","sub_path":"wcivf/apps/core/management/commands/init_data.py","file_name":"init_data.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"}
+{"seq_id":"1199504526","text":"import random\n\nimport requests\nimport json\n\nimport tornado.gen\nimport tornado.web\n\nfrom common.web import requestsManager\nfrom common.sentry import sentry\nfrom objects import glob\n\nMODULE_NAME = \"direct_download\"\nclass handler(requestsManager.asyncRequestHandler):\n\t\"\"\"\n\tHandler for /d/\n\t\"\"\"\n\t@tornado.web.asynchronous\n\t@tornado.gen.engine\n\t@sentry.captureTornado\n\tdef asyncGet(self, bid):\n\t\ttry:\n\t\t\tnoVideo = bid.endswith(\"n\")\n\t\t\tif noVideo:\n\t\t\t\tbid = bid[:-1]\n\t\t\tbid = int(bid)\n\t\t\trequestIP = requestsManager.getRequestIP(self)\n\t\t\tipa = requests.get(\"http://ip-api.com/json/{}?fields=continent,country\".format(requestIP)).text\n\t\t\tjsonOut = json.loads(ipa)\n\t\t\t\"\"\"\n\t\t\tif jsonOut[\"continent\"] == \"North America\" or jsonOut[\"continent\"] == \"South America\":\n\t\t\t\tmirror = \"https://aoba-proxy-us.herokuapp.com\"\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://aoba-proxy-us.herokuapp.com\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"US SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"US DOWNLOAD WORKS\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"US SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"US SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telif jsonOut[\"continent\"] == \"Europe\" or jsonOut[\"continent\"] == \"Africa\":\n\t\t\t\teu_mirror = ['https://storage.ainu.pw', 'https://aoba-proxy-eu.herokuapp.com']\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://aoba-proxy-eu.herokuapp.com\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"EU SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"EU DOWNLOAD WORKS\")\n\t\t\t\t\t\teu_mirror = ['https://storage.ainu.pw', 'https://aoba-proxy-eu.herokuapp.com']\n\t\t\t\t\t\tmirror = random.choice(eu_mirror)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"EU SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"EU SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telif jsonOut[\"continent\"] == \"Australia\":\n\t\t\t\tmirror = \"https://bm.realm.so\"\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://bm.realm.so\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"AU SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"AU DOWNLOAD WORKS\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"AU SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"AU SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t# Server is too slow, so I disabled it.\n#\t\t\telif jsonOut[\"continent\"] == \"Asia\":\n#\t\t\t\tmirror = \"https://bm-th.ainu.pw\"\n#\t\t\t\ttry:\n#\t\t\t\t\tc_mirror = \"https://bm-th.ainu.pw\"\n#\t\t\t\t\trequests.get(c_mirror)\n#\t\t\t\t\tprint(\"TH/SEA SERVER OK\")\n#\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n#\t\t\t\t\tif response.status_code == 200:\n#\t\t\t\t\t\tprint(\"TH/SEA DOWNLOAD WORKS\")\n#\t\t\t\t\telse:\n#\t\t\t\t\t\tprint(\"TH/SEA SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n#\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n#\t\t\t\texcept requests.exceptions.ConnectionError:\n#\t\t\t\t\tprint(\"TH/SEA SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n#\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telse:\n\t\t\t\"\"\"\n\t\t\tmirror = \"https://storage.rina.place\"\n\n\t\t\tself.set_status(302, \"Moved Temporarily\")\n\t\t\tself.add_header(\"Location\", \"{}/d/{}{}\".format(mirror, bid, \"n\" if noVideo else \"\"))\n\t\t\tself.add_header(\"Cache-Control\", \"no-cache\")\n\t\t\tself.add_header(\"Pragma\", \"no-cache\")\n\t\texcept ValueError:\n\t\t\tself.set_status(400)\n\t\t\tself.write(\"Invalid set id\")","repo_name":"Unny984/eee","sub_path":"downloadMapHandler.py","file_name":"downloadMapHandler.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"3066934663","text":"import os\nimport json\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torchvision import datasets, transforms, models\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport argparse\nimport logging\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\nclass Predict:\n def __init__(self, image_path):\n self.image_path = image_path\n \n\n def load_checkpoint(self, checkpoint):\n checkpoint = torch.load(checkpoint)\n self.model = models.vgg16(pretrained=True)\n for param in self.model.parameters(): \n param.requires_grad = False\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear (25088, 4096)),\n ('relu1', nn.ReLU ()),\n ('dropout1', nn.Dropout (0.05)),\n ('fc2', nn.Linear (4096, 2048)),\n ('relu2', nn.ReLU ()),\n ('dropout', nn.Dropout (0.05)),\n ('fc3', nn.Linear (2048, 102)),\n ('output', nn.LogSoftmax (dim =1))\n ]))\n \n self.model.classifier = classifier\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.class_to_idx = checkpoint['class_to_idx']\n \n def process_image(self):\n img = Image.open(self.image_path)\n\n original_width, original_height = img.size\n\n if original_width < original_height:\n size=[256, 256**600]\n else: \n size=[256**600, 256]\n \n img.thumbnail(size)\n center = original_width/4, original_height/4\n left, top, right, bottom = center[0]-(244/2), center[1]-(244/2), center[0]+(244/2), center[1]+(244/2)\n img = img.crop((left, top, right, bottom))\n\n numpy_img = np.array(img)/255 \n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n numpy_img = (numpy_img-mean)/std\n\n numpy_img = numpy_img.transpose(2, 0, 1)\n \n return numpy_img\n \n def imshow(self, ax=None, title=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n image = self.image_path.transpose((1, 2, 0))\n \n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n image = np.clip(image, 0, 1)\n ax.imshow(image)\n \n return ax\n \n def predict(self, top_k, device, category_names=None):\n self.model.to(device)\n self.model.eval()\n torch_image = torch.from_numpy(np.expand_dims(self.process_image(), \n axis=0)).type(torch.FloatTensor).to(device)\n\n log_probs = self.model.forward(torch_image)\n linear_probs = torch.exp(log_probs)\n top_probs, top_labels = linear_probs.topk(top_k)\n \n top_probs = np.array(top_probs.detach())[0] \n top_labels = np.array(top_labels.detach())[0]\n \n idx_to_class = {val: key for key, val in self.model.class_to_idx.items()}\n top_labels = [idx_to_class[lab] for lab in top_labels]\n \n if category_names:\n with open(category_names, 'r') as f:\n cat_to_name = json.load(f)\n print(cat_to_name)\n class_name = [cat_to_name[i] for i in top_labels]\n \n \n return top_probs, top_labels, class_name\n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Predict based on the model')\n parser.add_argument('image_path', type=str, help='provide an image path')\n parser.add_argument('checkpoint', type=str, help='models checkpoint')\n parser.add_argument('--top_k', type=int, default=5, help=\"return top k most likely calsses\")\n parser.add_argument('--category_names', type=str, help='a mapping of categories to real names from a json file')\n parser.add_argument('--gpu', action='store_true', help = 'enable the GPU')\n\n args = parser.parse_args()\n image_path = args.image_path\n checkpoint = args.checkpoint\n top_k = args.top_k\n category_names = args.category_names\n \n cuda = False\n if args.gpu:\n if torch.cuda.is_available():\n cuda = True\n else:\n logging.warning(\"GPU is not exist, use CPU instead\") \n device = \"cuda\" if cuda else \"cpu\" \n \n predict = Predict(image_path)\n predict.load_checkpoint(checkpoint)\n numpy_img = predict.process_image()\n top_probs, top_labels, class_name = predict.predict(top_k, device, category_names)\n print(\"=\"*80)\n print(\" \"*35 + 'FLOWER PREDICTOR')\n print(\"=\"*80)\n print(\"Input label (or labels) = {}\".format(top_labels))\n print(\"Probability confidence(s) = {}\".format(top_probs))\n print(\"Class(es) name(s) = {}\".format(class_name))\n print(\"=\"*80)\n \n \n ","repo_name":"skyicechuchu/AI_Programming_with_python","sub_path":"project2/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"18306906451","text":"import unittest\n\nfrom catlizor import Catlizor, Hook\n\n\nclass TaskManager:\n def __init__(self):\n self.tasks = {}\n\n def add_task(self, task: str, *items):\n self.tasks[task] = items\n\n def pop_task(self):\n return self.tasks.popitem()\n\n def get_tasks(self, task: str):\n return self.tasks[task]\n\n\n@Hook.pre\nclass PreHook(Hook):\n methods = [\"add_task\"]\n callbacks = []\n\n\n@Hook.on_call\nclass OnCallHook(Hook):\n methods = [\"get_tasks\"]\n callbacks = []\n\n\n@Hook.post\nclass PostHook(Hook):\n methods = [\"pop_task\"]\n callbacks = []\n\n\nclass TestCatlizor(unittest.TestCase):\n def test_catlizor(self):\n results = []\n\n def callback(result):\n nonlocal results\n if result.result is not None:\n results.append(result.result)\n else:\n results.append(result.args)\n\n PreHook.callbacks = [callback]\n OnCallHook.callbacks = [callback]\n PostHook.callbacks = [callback]\n\n PreHook.update_hookspec()\n OnCallHook.update_hookspec()\n PostHook.update_hookspec()\n\n tm_catlizor = Catlizor.hook(TaskManager, PreHook, OnCallHook, PostHook)\n tm = TaskManager()\n tm.add_task(\"a\", 1, 2)\n tm.get_tasks(\"a\")\n tm.pop_task()\n\n self.assertEqual(results, [(tm, \"a\", 1, 2), (1, 2), {}])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"isidentical-archive/catlizor","sub_path":"tests/test_catlizor.py","file_name":"test_catlizor.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"48"}
+{"seq_id":"551784658","text":"import pickle\nimport torch\n\n\nclass BaseActvMap:\n '''\n Base class for read activation map (if saved), calculate activation map (if not saved), \n and flatten the activation maps with different methods.\n Inherented by CulpritNeuronScore and Uncertainty class.\n Function:\n 1. load saved pkl data\n 2. flatten actv map\n '''\n \n def __init__(self):\n pass \n \n \n def load_pkl(self, path):\n '''\n load pkl files in the folder with the filename: gt, pred, activationMap, map_shape\n '''\n with open(path + 'activationMap.pkl', 'rb') as f:\n actv_map = pickle.load(f)\n with open(path + 'gt.pkl', 'rb') as f:\n gt = pickle.load(f)\n with open(path + 'pred.pkl', 'rb') as f:\n pred_prob = pickle.load(f)\n with open(path + 'map_shape.pkl', 'rb') as f:\n map_shape = pickle.load(f)\n # sanity check for data shape\n assert gt.shape[0] == pred_prob.shape[0], 'pred and gt do not have the same datapoints, pred {}, gt {}'.format(pred_prob.shape, gt.shape)\n for i in range(len(map_shape)):\n assert actv_map[i].size()[1:] == map_shape[i][1:], 'activation map {} and map shape are not at the same length, activateion map {}, map_shape {}.'.format(i, actv_map[i].size(), map_shape[i])\n print('*** actv shape (ignore dim 0 - batch size) is: {} .'.format(map_shape))\n print('*** {} data loaded ***'.format(path))\n return actv_map, gt.numpy(), pred_prob.numpy(), map_shape\n\n \n def flatten_actv_map(self, actv_map, mode):\n '''\n Input:\n - actv_map, a dict of {layer idx: activation map for that layer of shape (datapoints, activations) - FC layer, or (datapoints, 3D activation maps) - conv}\n Output: \n - feature, turnout, of shape (datapoints, neurons). numpy object\n Method:\n 1. flatten the 2D HxW activation map of one channel/unit/neuron to be a 1D scalar. \n mode: average, max, median\n 2. aggregate the neurons/channels at each layer to be single activation vector.\n \n '''\n # flatten activation map.\n mode_dict = {'mean': torch.mean, 'max': torch.max, 'median':torch.median, 'lognormal': 'lognormal'}\n activation = []\n turnouts = [] # appending variable for layerwise turnout\n # i corresponds to layer i in actv_map, of tensor d greater than 2. Disregards FC layers etc.\n for i in range(len(actv_map)):\n # conv layer case\n if len(actv_map[i].size()) > 2:\n actv_map_flattened = actv_map[i].reshape(actv_map[i].shape[0], actv_map[i].shape[1], -1)\n if mode == 'max':\n convert_map_to_scalar, _ = mode_dict[mode](actv_map_flattened, dim = 2)\n elif mode != 'lognormal':\n # take mean, median, etc across channel volume\n convert_map_to_scalar = mode_dict[mode](actv_map_flattened, dim = 2)\n activation.append(convert_map_to_scalar)\n elif mode == 'lognormal':\n # extract non-zero activations, log transform, take mean, transform back to initial domain.\n n_val = list(actv_map_flattened.size())[0]\n n_kern = list(actv_map_flattened.size())[1]\n weighted_median = torch.zeros(n_val, n_kern)\n t_out = torch.zeros(n_val, n_kern) #iterable turnout variable\n for img in range(n_val):\n for kern in range(n_kern):\n activations = actv_map_flattened[img][kern] # fetch 1-d length HxW channelwise activations\n nonzero_idx = torch.nonzero(activations)\n t_out[img][kern] = len(nonzero_idx)/len(activations)\n if nonzero_idx.size()[0] == 0: # If nonzero index is empty, then do not pass an empty arg to torch.log()\n weighted_median[img][kern] = 0\n else:\n log_mean = torch.mean(torch.log(activations[nonzero_idx]))\n weighted_median[img][kern] = torch.exp(log_mean) \n # Append output after each layer \n activation.append(weighted_median) # 2d image x channel vector of weighted median activations\n turnouts.append(t_out)\n else:\n # FC layer case\n activation.append(actv_map[i])\n turnouts.append(torch.ones_like(actv_map[i]))\n feature = torch.cat(activation, dim=1)\n turnout = torch.cat(turnouts, dim=1)\n if mode != 'lognormal': \n print('*** feature shape is {}.'.format(feature.shape))\n else:\n print('*** non-zero image specific actv shape: {} | turnout: {} |'.format(feature.shape, turnout.shape))\n # get the actv group for r/w preditions\n# self.right_actv = self.feature[self.label, :]\n# self.wrong_actv = self.feature[self.label==0, :]\n# self.right_actv_weighted_median = self.feature_weighted_median[self.label, :]\n# self.wrong_actv_weighted_median = self.feature_weighted_median[self.label==0, :]\n# print('*** right_actv shape is {}|{}, wrong_actv shape is {}|{}.'.format(self.right_actv.shape, self.right_actv_weigh ted_median.shape, self.wrong_actv.shape, self.wrong_actv_weighted_median.shape)) \n return feature.numpy(), turnout.numpy() # convert to numpy type before return\n \n \n \n\n \n","repo_name":"weinajin/pytorch_classification_template","sub_path":"activation_base_class.py","file_name":"activation_base_class.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"6419339087","text":"from hashlib import md5\nimport time\nimport os\n \ndef calMD5(str):\n m = md5()\n m.update(str)\n \n return m.hexdigest()\n \ndef calMD5ForFile(file):\n statinfo = os.stat(file)\n \n if int(statinfo.st_size)/(1024*1024) >= 1000 :\n return calMD5ForBigFile(file)\n m = md5()\n f = open(file, 'rb')\n m.update(f.read())\n f.close()\n \n return m.hexdigest()\n \ndef calMD5ForFolder(dir,MD5File):\n outfile = open(MD5File,'w',encoding=\"utf-8\")\n for root, subdirs, files in os.walk(dir):\n for file in files:\n filefullpath = os.path.join(root, file)\n \"\"\"print filefullpath\"\"\"\n \n filerelpath = os.path.relpath(filefullpath, dir)\n md5 = calMD5ForFile(filefullpath)\n outfile.write(dir+\"\\\\\"+filerelpath+':'+md5+\"\\n\")\n print(dir+\"\\\\\"+filerelpath+\" Completed!\")\n outfile.close()\n \ndef calMD5ForBigFile(file):\n m = md5()\n f = open(file, 'rb')\n buffer = 8192 # why is 8192 | 8192 is fast than 2048\n \n while 1:\n chunk = f.read(buffer)\n if not chunk : break\n m.update(chunk)\n \n f.close()\n return m.hexdigest()\n \n \n \n \nif __name__ == \"__main__\":\n #print calMD5(\"Hello World!\")\n \n t = time.time()\n #print(calMD5ForFile(\"H:\\\\[WMSUB][Detective_Conan][Movie_24_The Scarlet Bulle][BDRip][GB][1920X1080].mp4\"))\n calMD5ForFolder(\"E:\\\\World of Warcraft\",\"World_of_Warcraft.mdl\")","repo_name":"Shuai-Zuo/Trash_Codes_Archives","sub_path":"Exact time unknown/Python/FolderMd5/md5.py","file_name":"md5.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"16293874539","text":"### Import libraries\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom seaborn import violinplot\nimport numpy as np\n\n\n### Functions useful for the drug comparison\n\n\ndef plot_comparison(score_rf, score_vae, score_vae_highcorr, label, title, save = False, out_path = \"figures/comparison_vae_rf.png\"):\n \"\"\"\n Plot a comparison of the random forest and vae across the three metrics.\n \"\"\"\n score = [score_rf, score_vae, score_vae_highcorr]\n xlim = [(-0.5, 0.7), (0, 9), (0.35, 0.75)]\n\n fig = plt.figure(constrained_layout = True, figsize = (15,15))\n fig.suptitle(title, fontsize = 15, fontweight = \"bold\")\n subfigs = fig.subfigures(nrows = 3, ncols = 1)\n\n for i in range(len(subfigs)):\n subfigs[i].suptitle(label[i], fontweight = \"bold\")\n axs = subfigs[i].subplots(nrows = 1, ncols = 3)\n \n for j, ax in enumerate(axs.flat):\n\n ax.grid(True)\n ax.set_title(score[i].columns[j])\n ax.set_xlim(xlim[j])\n ax.boxplot(score[i][score[i].columns[j]], vert = False)\n \n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\ndef plot_violin(score_RF_downsampling, score_RF_veryhigh_corr, score_RF_PCA, score_RF_embedding, score_RF, save = False, out_path = \"figures/embedding_comparisons.png\"):\n \"\"\"\n Plot a comparison of different RF models fitted with different input data, as violin plot.\n \"\"\"\n df1 = pd.DataFrame([score_RF_downsampling[\"Pearson's r\"].tolist(), 349*[\"Down-sampled\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df2 = pd.DataFrame([score_RF_veryhigh_corr[\"Pearson's r\"].tolist(),349*[\"Drug-correlated\"]],\\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df3 = pd.DataFrame([score_RF_PCA[\"Pearson's r\"].tolist(), 349*[\"PCA\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df4 = pd.DataFrame([score_RF_embedding[\"Pearson's r\"].tolist(), 349*[\"VAE embedded\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df5 = pd.DataFrame([score_RF[\"Pearson's r\"].tolist(), 349*[\"All proteins\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df = pd.concat([df1, df2, df3, df4, df5])\n df[\"Pearson's r\"] = df[\"Pearson's r\"].astype(float)\n\n plt.figure(constrained_layout = True, figsize = (8,8))\n ax = violinplot(x = \"Input variables\", y = \"Pearson's r\", data = df)\n ax.axhline(df4[\"Pearson's r\"].median(), ls='-.', color = \"black\")\n ax.axhline(df4[\"Pearson's r\"].quantile(0.25), ls=':', color = \"purple\")\n ax.axhline(df4[\"Pearson's r\"].quantile(0.75), ls=':', color = \"purple\")\n\n plt.suptitle(\"Comparison of Random Forest models fitted with different input data\",\\\n fontsize = 15, fontweight = \"bold\")\n\n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\ndef get_drugs(score, bad_pred = True):\n \"\"\"\n Get a list of drugs that are either well-predicted (bad_pred = False) according to all metrics of the score \n (Pearson's r, RMSE, C-index), or badly-predicted (bad_pred = True).\n \"\"\"\n drug_list = []\n \n if bad_pred:\n for i, elem in enumerate(score.values):\n if elem[0] < score[\"Pearson's r\"].quantile(0.25) and elem[1] > score[\"RMSE\"].quantile(0.75) and elem[2] < score[\"C-index\"].quantile(0.25): # then the drug is badly predicted at all level\n drug_list.append(score.index[i])\n \n else:\n for i, elem in enumerate(score.values):\n if elem[0] > score[\"Pearson's r\"].quantile(0.75) and elem[1] < score[\"RMSE\"].quantile(0.25) and elem[2] > score[\"C-index\"].quantile(0.75): # then the drug is well predicted at all level\n drug_list.append(score.index[i])\n \n return(drug_list)\n\ndef plot_drug_distrib(drug, bad_drugs, negative_skew, save = False, out_path = \"figures/drugs_distrib.png\"):\n \"\"\"\n Plot the distributions of badly predicted drugs (bad_drugs) and well-predicted drugs with negative \n skew (negative_skew) to see whether there are noticable differences which could explain the prediction.\n \"\"\"\n fig = plt.figure(constrained_layout = True, figsize = (15,10))\n fig.suptitle(\"Distributions of AUC of outlier drugs\", fontsize = 15, fontweight = \"bold\")\n subfigs = fig.subfigures(nrows = 2, ncols = 1)\n\n subfigs[0].suptitle(\"Well-predicted drugs\", fontweight = \"bold\")\n axs = subfigs[0].subplots(nrows = 1, ncols = 3)\n for j, ax in enumerate(axs.flat):\n ax.grid(True)\n ax.set_title(negative_skew[j])\n ax.set_xlim((-0.5,20))\n ax.set_ylim((0,180))\n ax.hist(drug[negative_skew[j]])\n ax.set_xlabel(\"Drug sensitivity (AUC)\")\n\n subfigs[1].suptitle(\"Badly-predicted drugs\", fontweight = \"bold\")\n axs = subfigs[1].subplots(nrows = 1, ncols = 3)\n for j, ax in enumerate(axs.flat):\n ax.grid(True)\n ax.set_title(bad_drugs[j])\n ax.set_xlim((-0.5,20))\n ax.set_ylim((0,180))\n ax.hist(drug[bad_drugs[j]])\n ax.set_xlabel(\"Drug sensitivity (AUC)\")\n\n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\n\n### Functions useful for the deep-SHAP analysis\n\n\ndef convert_shap_to_gene(shap_values, gene_prot, proteins, shap_threshold = 0.9):\n \"\"\"\n Order proteins by SHAP value absolute mean importance, then convert the list of proteins to set of genes.\n \"\"\"\n # order proteins by shap value absolute mean importance\n df = pd.DataFrame({\n \"mean_abs_shap\": np.mean(np.abs(shap_values), axis=0), \n \"name\": proteins\n })\n df = df.sort_values(\"mean_abs_shap\", ascending=False)\n enriched_prot = df.loc[df.mean_abs_shap > df.mean_abs_shap.quantile(shap_threshold)].name\n\n # convert from protein to genes\n gene_set = gene_prot.loc[gene_prot[\"Uniprot\"].isin(enriched_prot)].Gene_Symbol\n gene_set = set(gene_set)\n\n return(gene_set)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SimonGrouard/astra","sub_path":"result_interpretation.py","file_name":"result_interpretation.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"19508946335","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nBASE_DIR = os.getcwd()\n\ndef make_path(url,target_path='picture'):\n dir_path = os.path.join(BASE_DIR, target_path)\n if os.path.exists(dir_path):\n pass\n else:\n os.mkdir(dir_path)\n\n response = requests.get(url=url)\n text = response.text\n soup = BeautifulSoup(text, 'html.parser')\n div_obj = soup.find(name='div', attrs={'class': 'lb_box'})\n img_list = div_obj.find_all_next(name='dt')\n dic = {}\n for i in img_list:\n dir_name = i.find(name='img').get(\"alt\").replace(\" \", \"-\")\n dir_href = i.find(name='a').get(\"href\")\n dic[dir_name] = dir_href\n return dic\n\ndef download_img(lst,name,dirname):\n for img in lst:\n img_url = img.get(\"src\").replace(\"113x113\",'740x-')\n img_content = requests.get(img_url).content\n file_name = img_url.rsplit('/', 1)[-1]\n file_path = os.path.join(BASE_DIR, dirname, name, file_name)\n try:\n with open(file_path, 'wb') as f:\n f.write(img_content)\n print(file_path, '爬取完毕')\n except OSError:\n print(file_path, '爬取失败')\n\ndef request_img(url,dirname):\n dic = make_path(url,dirname)\n for name, img_url in dic.items():\n dir_path = os.path.join(BASE_DIR, dirname, name)\n os.mkdir(dir_path)\n response2 = requests.get(url=img_url)\n text2 = response2.text\n soup2 = BeautifulSoup(text2, 'html.parser')\n div_obj2 = soup2.find(name='div', attrs={'class': 'overview'})\n if not div_obj2:\n continue\n img_list2 = div_obj2.find_all(name='img')\n download_img(img_list2, name,dirname)\n\nif __name__ == '__main__':\n url = 'http://www.yesky.com/c/6_20491.shtml'\n request_img(url,'picture3')","repo_name":"myin1994/mylearn","sub_path":"Python项目/day74/pa2.py","file_name":"pa2.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"30258256343","text":"import requests\nimport pprint # json 데이터가 줄바꿈 해서 나오지 않고 한 줄로 나올 때 유용함!\n\nclient_id = \"NQiH7_sc1SAtNUlef0Rp\"\nclient_secret = \"8rqvp8AYlW\"\n\nnaver_open_api = \"https://openapi.naver.com/v1/search/news.json?query=android\"\nheader_params = {\"X-Naver-Client-id\":client_id, \"X-Naver-Client-Secret\":client_secret} # 일종의 JSON임\nres = requests.get(naver_open_api, headers=header_params)\n\nif res.status_code == 200: # 응답코드 200이면 정상. open API도 응답코드가 해당됨\n data = res.json()\n # pprint.pprint(data)\n for idx, item in enumerate(data['items']):\n print(str(idx+1) + '.', item['title'], '/ 링크: ', item['link'])\nelse: print(\"Error Code: \", res.status_code) ","repo_name":"Jiyul-Kim/study","sub_path":"LectureAndCourse/Inflearn__Python-Crawling/02_naver_news_json.py","file_name":"02_naver_news_json.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"24551176637","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n############################################################\n# Created on: 2018-05-03\n# Author: Joe Aaron\n# Email: pant333@163.com\n# Description: 当用户输入敏感词语,则用 星号 * 替换,例如当用户输入「北京是个好城市」,则变成「**是个好城市」。\n############################################################\nimport string\n\nword_filter=set()\n\nwith open('source/0011/filtered_words.txt') as f:\n for w in f.readlines():\n word_filter.add(w.strip())\n \nwhile True:\n s=input()\n if s == 'exit':\n break\n for w in word_filter:\n if w in s:\n s= s.replace(w,'*'*len(w))\n print(s)\n ","repo_name":"joeaaron/LearningPython","sub_path":"Practice/练习册/0012.py","file_name":"0012.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"10129814797","text":"from minaombud.client import MinaOmbudClient\nfrom minaombud.crypto.jwkset import JwkSet\nfrom minaombud.defaults import (\n MINA_OMBUD_API_CLIENT_ID,\n MINA_OMBUD_API_CLIENT_SECRET,\n MINA_OMBUD_API_TOKEN_URL,\n MINA_OMBUD_API_URL,\n MINA_OMBUD_SAMPLE_AUDIENCE,\n MINA_OMBUD_SAMPLE_ISSUER,\n MINA_OMBUD_SAMPLE_KEYS,\n MINA_OMBUD_SAMPLE_USER_DB,\n MINA_OMBUD_TREDJE_MAN\n)\nfrom minaombud.model import Identitetsbeteckning\nfrom minaombud.user import (\n create_user_token,\n load_user_database\n)\n\n\ndef _load_users():\n with open(MINA_OMBUD_SAMPLE_USER_DB) as f:\n return load_user_database(f)\n\n\nKEYS = JwkSet.load(MINA_OMBUD_SAMPLE_KEYS)\nUSERS = _load_users()\n\n\ndef new_user_token(u: str):\n return create_user_token(u, jwks=KEYS, users=USERS,\n audience=MINA_OMBUD_SAMPLE_AUDIENCE,\n issuer=MINA_OMBUD_SAMPLE_ISSUER)\n\n\ndef new_client():\n return MinaOmbudClient(service=\"test_client.py\", scope=\"user:self\",\n client_id=MINA_OMBUD_API_CLIENT_ID,\n client_secret=MINA_OMBUD_API_CLIENT_SECRET,\n url=MINA_OMBUD_API_URL,\n token_url=MINA_OMBUD_API_TOKEN_URL)\n\n\ndef test_sok_fullmakter():\n client = new_client()\n user_token = new_user_token(\"198602262381\")\n response = client.sok_fullmakter(tredjeman=MINA_OMBUD_TREDJE_MAN,\n fullmaktshavare=Identitetsbeteckning.from_id(\"198602262381\"),\n user_token=user_token)\n assert isinstance(response.fullmakter, list)\n\n\ndef test_sok_behorigheter():\n client = new_client()\n user_token = new_user_token(\"198602262381\")\n response = client.sok_behorigheter(tredjeman=MINA_OMBUD_TREDJE_MAN,\n fullmaktshavare=Identitetsbeteckning.from_id(\"198602262381\"),\n user_token=user_token)\n assert isinstance(response.kontext, list)\n","repo_name":"bolagsverket/mina-ombud-samples","sub_path":"python/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"8653822560","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\n\nlr = 0.002\nepochs = 100\npredict_need = True\n\n\nclass TitanicData(Dataset):\n def __init__(self, filepath):\n # 不取‘Age‘是因为’Age‘有的sample缺省,干脆就不取了\n x_features = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare']\n y_features = ['Survived']\n\n data = pd.read_csv(filepath)\n self.len = data.shape[0]\n\n # get_dummies是one_hot编码,实际上x_data的维度从6变成了7\n # x和y都采取data[features]的形式是为了使y也是矩阵,与x的shape相同\n self.x_data = torch.from_numpy(np.array(pd.get_dummies(data[x_features])))\n self.y_data = torch.from_numpy((np.array(data[y_features])))\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n\n\nfilepath = r'../data/titanic/train.csv'\ntrain_set = TitanicData(filepath)\ntrain_loader = DataLoader(dataset=train_set, batch_size=32, shuffle=True, num_workers=0)\n\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self):\n super(LogisticRegression, self).__init__()\n self.linear1 = torch.nn.Linear(6, 3)\n self.linear2 = torch.nn.Linear(3, 1)\n self.sigmoid = torch.nn.Sigmoid()\n\n def forward(self, x):\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n return x\n\n def predict(self, x):\n with torch.no_grad():\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n print(x.shape)\n y = []\n for i in x:\n y.append(1 if i >= 0.5 else 0)\n return y\n\n\nmodel = LogisticRegression()\n\n\ncriterion = torch.nn.BCELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\n\n\ndef show_lossimage(epoch_list, loss_list):\n plt.plot(epoch_list, loss_list)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.show()\n\n\nif __name__ == '__main__':\n epoch_list = []\n loss_list = []\n for epoch in range(epochs):\n epoch_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data\n x = x.float() # 要进行数据类型转换,否则会报错,但是是为什么呢?\n y = y.float()\n y_hat = model(x)\n\n loss = criterion(y_hat, y)\n\n loss.backward()\n epoch_loss += loss.item()\n\n optimizer.step()\n optimizer.zero_grad()\n epoch_loss /= (i+1)\n print('epoch={}, loss={}'.format(epoch+1, epoch_loss))\n epoch_list.append(epoch+1)\n loss_list.append(epoch_loss)\n\n show_lossimage(epoch_list, loss_list)\n\n # predict\n if predict_need:\n # prepare the test dataset\n test_filepath = r'../data/titanic/test.csv'\n test_data = pd.read_csv(test_filepath)\n x_features = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare']\n # 数据类型经历df, np.ndarray, tensor\n x = torch.from_numpy(np.array(pd.get_dummies(test_data[x_features])))\n\n # get the prediction\n y_pred = model.predict(x.float())\n\n # save the prediction in csv\n outputs = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_pred})\n outputs.to_csv(r'../TitanicPredict.csv', index=False) # False表示不保存索引","repo_name":"LennonLau/PyTorch-practice","sub_path":"chapter8_Titanic.py","file_name":"chapter8_Titanic.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"29415414863","text":"import pandas as pd\n\n'''data=pd.read_csv('Carcount.csv')\ndata=data.sort_values(by='count', ascending=False)\nprint(data)\n\ndf=data[data['count']>12]\nprint(df)\n# df.to_csv('maxcarcaount_3.csv')\n# df['xmax']-df['xmin'] > threshold\n# df[df['JobTitle'].value_counts()<2])'''\n\ndata = pd.read_csv(\"/home/mayank-s/PycharmProjects/Datasets/Berkely_DeepDrive/berkely_train.csv\")\n# data=pd.read_csv('/home/mayank-s/Desktop/Link to Datasets/aptiveBB/reddy.csv')\nprint(data.head())\nprint(data.groupby('class').count())\n\n# this is done to remove if xmin==xmax and ymin==yamax(which is actuallly wrong)\ndf = data[(data['xmin'] != data['xmax']) & (data['ymin'] != data['ymax'])]\n\nprint(df.head())\n\n# df.to_csv(\"berkely_After_filter.csv\")\n\n# this is most important funtion to count no of class in group\nnew = data.groupby(['filename'])['class'].count()\n\ngb = data.groupby(['filename'])\ngrouped_C = gb['class']\nn = data.groupby(['filename', 'class'])\na = (n.size())\nprint(a)\ngv = a.index[0]\n\nfor file_name, (cls) in enumerate(a):\n print(file_name)\n print(cls)\nnew1 = data.groupby(['filename', 'class'])['xmin'] # .count()\n\n# b=data.groupby(level=['filname', 'class']).sum()\n\n\nmydata = data.groupby('filename')\nprint(data.groupby('class').count())\nlen_group = mydata.ngroups\n# index=mydata.groups['car'].values\nmygroup = mydata.groups\n\n# new=data.groupby(['filename', 'class'])#['car'].count()\n\n# this is most important funtion to count no of class in group\nnew = data.groupby(['filename'])['class'].count()\n\nfor da in mygroup.keys():\n index = mydata.groups[da].values\n for read_index in index():\n print(index)\n print(da)\n break\n\n'''for da in mydata.ngroups():\n\n index = mydata.groups['car'].values\n mydata.groups['0124dfa6-385f1b58'].values\n print(da)'''\n\n# index=mydata.groups['car'].values\n'''pyindex=np.random.choice(index, size=10000, replace=False)\ndata.drop(data.index[pyindex],inplace=True)\nprint(data.groupby('class').count())\n\ndf=data.replace(\"motor\", \"cool\")\ndf=df.replace(\"bike\", \"cool\")\ndf=df.replace(\"cool\", \"motorbike\")\ndf=df.replace(\"traffic light\", \"traffic_light\")\ndf=df.replace(\"traffic sign \", \"traffic_sign \")\nprint(df.groupby('class').count())\ndata.to_csv(\"berkely_train_new_1.csv\")\nprint(1)'''\n","repo_name":"mayanks888/AI","sub_path":"Python/python_code/more data prepocessing.py","file_name":"more data prepocessing.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"17233001959","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\n\n# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\n# load the data\ndata = pd.read_csv('C:\\\\Users\\\\ehold\\\\Desktop\\\\Folders\\\\Datasets\\\\melb_data.csv')\n\n# Select target\ny = data.Price\n\n# To keep things simple, we'll use only numerical predictors\nmelb_predictors = data.drop(['Price'], axis=1)\nX = melb_predictors.select_dtypes(exclude=['object'])\n\n# Divide data into training and validation subsets\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,\n random_state=0)\ndef score_dataset(X_train, X_valid, y_train, y_valid):\n model = RandomForestRegressor(n_estimators=10, random_state=0)\n model.fit(X_train, y_train)\n preds = model.predict(X_valid)\n return mean_absolute_error(y_valid, preds)\n\ndef missingValues(X_train, X_valid, y_train, y_valid):\n # there are 3 ways to deal with missing values\n # drop columns with missing values - downside is model loses access to alot of potential information\n # imputation - filling in missing values with some number - i.e. the mean of the column - usually leads to a more accurate model than dropping\n # extending imputation - impute the missing values, add a new column to make not of the imputed entries w/ true/false\n\n # examining data\n # Shape of training data (num_rows, num_columns)\n print(X_train.shape)\n\n # Number of missing values in each column of training data\n missing_val_count_by_column = (X_train.isnull().sum())\n print(missing_val_count_by_column[missing_val_count_by_column > 0])\n\n cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]\n\n approach_to_use = 3\n\n if approach_to_use == 1:\n # first approach - dropping missing values\n reduced_X_train = X_train.drop(cols_with_missing, axis=1)\n reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)\n\n print(\"MAE from Approach 1 (Drop columns with missing values):\")\n print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))\n elif approach_to_use == 2:\n # second approach - imputing missing values with the mean of each column with simple imputer\n my_imputer = SimpleImputer()\n imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))\n imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))\n\n # Imputation removed column names; put them back\n imputed_X_train.columns = X_train.columns\n imputed_X_valid.columns = X_valid.columns\n\n print(\"MAE from Approach 2 (Imputation):\")\n print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))\n elif approach_to_use == 3:\n # third approach - imputing missing values with the mean of each column then making a new column to mark the rows imputed\n # Make copy to avoid changing original data (when imputing)\n X_train_plus = X_train.copy()\n X_valid_plus = X_valid.copy()\n\n # Make new columns indicating what will be imputed\n for col in cols_with_missing:\n X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()\n X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()\n\n # Imputation\n my_imputer = SimpleImputer()\n imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))\n imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))\n\n # Imputation removed column names; put them back\n imputed_X_train_plus.columns = X_train_plus.columns\n imputed_X_valid_plus.columns = X_valid_plus.columns\n\n print(\"MAE from Approach 3 (An Extension to Imputation):\")\n print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid))\n\n\n\nmissingValues(X_train, X_valid, y_train, y_valid)\n\n","repo_name":"eric-holdener/IntermediateMachineLearning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"11032522453","text":"from ctypes import cdll\nfrom _ctypes import CFuncPtr\n\n\n\ndef make_dll_meta(dll_path):\n class DllMeta(type):\n def __new__(mcs, what, bases, attr_dict):\n import sys\n sys.stderr.write('欢迎使用\\n')\n cls = super().__new__(mcs, what, bases, attr_dict)\n dll = cdll.LoadLibrary(dll_path)\n for f_name, f in vars(cls).items():\n if not callable(f):\n continue\n if hasattr(dll, f_name) and isinstance(getattr(dll, f_name), CFuncPtr):\n setattr(cls, f_name, staticmethod(getattr(dll, f_name)))\n cls.__dll = dll\n return cls\n\n # def close(cls):\n # if hasattr(cls, \"close\"):\n # cls.close(cls)\n #\n # def __del__(self):\n # self.close()\n\n return DllMeta\n","repo_name":"ItGarbager/aimcf_yolov5","sub_path":"utils/now/dll_meta.py","file_name":"dll_meta.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"48"}
+{"seq_id":"23175997382","text":"from django.http import HttpRequest\nfrom django.shortcuts import render\nfrom utils.decorators import permission_checker_decorator_factory\nfrom car_repair_request.models import CarRepairRequest\n\n# Create your views here.\nfrom django.views.generic import FormView\n\n\n@permission_checker_decorator_factory()\ndef index(request: HttpRequest):\n current_user_id = request.user.id\n car_repairs = CarRepairRequest.objects.filter(user_id=current_user_id).all()\n repaired = CarRepairRequest.objects.filter(user_id=current_user_id, is_fixed=True).all()\n working = CarRepairRequest.objects.filter(user_id=current_user_id, is_fixed=False).all()\n print(request.user.username)\n context = {\n 'repairs': car_repairs,\n 'repaired': repaired,\n 'working': working\n }\n return render(request, 'home_module/index.html', context)\n\n\n","repo_name":"mesutfd/repair-it","sub_path":"home_module/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"37237068602","text":"\nclass LinkedList:\n def __init__(self, data):\n self.data = data\n self.next = None\n \n def append_node(self, data):\n if self.next == None:\n self.next = LinkedList(data)\n else:\n self.next.append_node(data)\n \n def append_nodes(self, dataList):\n for data in dataList:\n self.append_node(data)\n return self\n\n def insert_node(self, data, position):\n if position == 0:\n new_node = LinkedList(data)\n new_node.next = self\n return new_node\n\n elif position == 1:\n new_node = LinkedList(data)\n new_node.next = self.next\n self.next = new_node\n return self\n\n else:\n if self.next is None:\n raise ValueError(\"Position out of range\")\n self.next = self.next.insert_node(data, position - 1)\n return self\n \n def remove_node(self, data):\n if self.data == data:\n return self.next\n else:\n if self.next is None:\n raise ValueError(\"Data not in list\")\n self.next = self.next.remove_node(data)\n return self\n \n def search_node(self, data):\n if self.data == data:\n return True\n else:\n if self.next is None:\n return False\n return self.next.search_node(data)\n \n def get_list(self):\n print(self.data, end=\", \")\n if self.next != None:\n self.next.get_list()\n\n\nclass BinarySearchTree:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def add_value(self, data):\n if data < self.data:\n if self.left is None:\n self.left = BinarySearchTree(data)\n else:\n self.left.add_value(data)\n elif data > self.data:\n if self.right is None:\n self.right = BinarySearchTree(data)\n else:\n self.right.add_value(data)\n else:\n print(\"Value already in tree\")\n\n def add_values(self, dataList):\n for data in dataList:\n self.add_value(data)\n return self\n\n def remove_value(self, data):\n if data < self.data:\n if self.left is None:\n raise ValueError(\"Data not in tree\")\n self.left = self.left.remove_value(data)\n return self\n elif data > self.data:\n if self.right is None:\n raise ValueError(\"Data not in tree\")\n self.right = self.right.remove_value(data)\n return self\n else:\n if self.left is None and self.right is None:\n return None\n elif self.left is None:\n return self.right\n elif self.right is None:\n return self.left\n else:\n self.data = self.right.get_min()\n self.right = self.right.remove_value(self.data)\n return self\n \n def get_min(self):\n if self.left is None:\n return self.data\n else:\n return self.left.get_min()\n \n def search_value(self, data):\n if data < self.data:\n if self.left is None:\n return False\n return self.left.search_value(data)\n elif data > self.data:\n if self.right is None:\n return False\n return self.right.search_value(data)\n else:\n return True\n \n def get_tree(self):\n print(self.data, end=\", \")\n if self.left != None:\n self.left.get_tree()\n if self.right != None:\n self.right.get_tree()\n\n\nfrom HashMap import HashMap\nfrom Array import Array\nfrom random import randint\nfrom time import time\n\nif __name__ == \"__main__\":\n print(\"\\nTHIS IS THE DATA STRUCTURE COMPARATIVE STUDY\")\n print(\"The available data structures are LinkedLists, Binary Search Trees, Hash Maps, and Arrays.\")\n size = int(input(\"\\nPlease input the size of these data structures: \"))\n items = set()\n while len(items) < size:\n items.add(randint(0, size*2))\n items = list(items)\n\n print(\"\\nCreating Data Structures...\")\n linked_list = LinkedList(items[0])\n linked_list.append_nodes(items[1:])\n\n binary_search_tree = BinarySearchTree(items[0])\n binary_search_tree.add_values(items[1:])\n\n hash_map = HashMap()\n hash_map.add_values(items)\n\n array = Array(items)\n print(\"Done!\")\n\n while True:\n choice = int(input(\"\"\"\\nNext, please enter a function you would like to benchmark for each of the data structures:\n1. Insert (at beginning)\n2. Search\n3. Remove\n4. Print\n5. Exit\n\nChoice: \"\"\"))\n \n if choice == 1:\n ele = int(input(\"Please enter the element you would like to insert: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list = linked_list.insert_node(ele, 0)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.add_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.add_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.insert_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 2:\n ele = int(input(\"Please enter the element you would like to search for: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list.search_node(ele)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.search_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.search_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.search_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 3:\n ele = int(input(\"Please enter the element you would like to remove: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list = linked_list.remove_node(ele)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.remove_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.remove_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.remove_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 4:\n print(\"\\nThe data structures currently are:\")\n print(f\"Linked List: \")\n linked_list.get_list()\n print(\"None\")\n\n print(f\"Binary Search Tree: \")\n binary_search_tree.get_tree()\n print(\"End\")\n\n hash_map.print_hash_map_readable()\n array.print_array()\n\n else:\n exit(1)","repo_name":"AdityaHegde712/Compiler-Design-problems","sub_path":"Activity 8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"39606762170","text":"import sys\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication\nfrom GuiTests.Gui.Layouts import LAbsolute, LBoxLayout, LQGridLayout\n\nclass Example(QWidget):\n\n def __init__(self):\n super().__init__()\n layout = LQGridLayout.LQGridLayout()\n\n layout.__initUI__(self)\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())","repo_name":"kmietek/ExchangeSpy","sub_path":"GuiTests/Gui/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"28417664189","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.nn import KLDivLoss\nimport os\nimport itertools\nfrom scipy import stats\nimport numpy as np\nimport seaborn as sns\nimport difflib\nfrom itertools import chain\nfrom evodiff.utils import extract_seq_a3m, csv_to_dict, normalize_list, removekey, get_matrix, get_pairs, normalize_matrix, \\\n get_pairwise\n\ndef aa_reconstruction_parity_plot(project_dir, out_path, generate_file, msa=False, idr=False, gen_file=True,\n start_valid=False, start_query=False, start_msa=False):\n \"Parity plots for generated vs test (for sequence models) or valid (for MSA models)\"\n # Load in approx train distribution\n idr_flag = \"\"\n # Eliminate BXJOU for KL since they occur at 0 freq in test dataset\n keys_to_remove = ['B', 'Z', 'J', 'O', 'U'] #, '-']\n if msa:\n if start_valid:\n if start_query:\n valid_file = 'valid_msas_onlymsa.txt'\n elif start_msa:\n valid_file = 'valid_msas_onlyquery.txt'\n keys_to_remove += ['-']\n else:\n valid_file = 'valid_msas.a3m'\n valid_file = out_path + '/' + valid_file\n #print(valid_file)\n aminos = csv_to_dict(valid_file)\n values = list(aminos.values())\n else:\n file = project_dir + 'ref/openfold_ref.csv'\n else:\n file = project_dir + 'ref/uniref50_aa_ref_test.csv' # TODO add file to git\n #print(file)\n if idr:\n idr_flag = 'idr_'\n true_file = out_path + 'data_idr.csv'\n aminos = csv_to_dict(true_file)\n values = aminos.values()\n #print(aminos, values)\n elif not idr and not start_valid:\n df = pd.read_csv(file)\n aminos = df.to_dict('list')\n values = [each[0] for each in aminos.values()]\n if gen_file:\n gen_flag = ''\n # Load in generated seqs and count values\n generate_file = out_path + generate_file\n aminos_gen = csv_to_dict(generate_file)\n #print(\"aminos gen\", aminos_gen)\n else:\n gen_flag = '_train_only'\n # Normalize scores\n a = normalize_list(values) # normalize(list(aminos.values()))\n if start_valid:\n a_kl = normalize_list(list(removekey(aminos, keys_to_remove).values()))\n else:\n #print(aminos)\n a_kl = normalize_list([each[0] for each in removekey(aminos, keys_to_remove).values()])\n if gen_file:\n b_list = list(aminos_gen.values())\n b = normalize_list(b_list) # ADD GAPS IN\n # Save KL to file\n kl_loss = KLDivLoss(reduction=\"sum\")\n if msa:\n b_kl = normalize_list(list(removekey(aminos_gen, keys_to_remove).values()))\n #print(len(a_kl), len(b_kl))\n #print(a_kl, b_kl)\n kl = kl_loss(torch.tensor(a_kl).log(), torch.tensor(b_kl)).item()\n else:\n if idr:\n b_kl = torch.tensor(b[0:20])\n kl = kl_loss(torch.tensor(a[0:20]).log(), torch.tensor(b[0:20])).item()\n else:\n b_kl = torch.tensor(b[0:21])\n kl = kl_loss(torch.tensor(a[0:21]).log(), torch.tensor(b[0:21])).item()\n print(\"KL\", kl)\n with open(out_path + idr_flag + 'generate_metrics.csv', 'w') as f:\n f.write(\"aa freq kl:\" + str(kl))\n f.close()\n kl_label = \"$KL$=%.3f\" % (kl)\n\n # Plot\n colors = ['black', 'grey', 'lightcoral', 'brown', 'tomato', 'peru',\n 'darkorange', 'goldenrod', 'khaki', 'olive', 'yellow', 'olivedrab',\n 'yellowgreen', 'palegreen', 'forestgreen', 'turquoise', 'paleturquoise',\n 'cyan', 'deepskyblue', 'dodgerblue', 'royalblue', 'navy', 'blue',\n 'darkslateblue', 'mediumpurple', 'darkviolet', 'violet', 'mediumvioletred',\n 'crimson', 'lightpink']\n fig, ax = plt.subplots(figsize=(3, 2.5))\n annotations = list(aminos_gen.keys())[0:len(a)]\n plt.axline([0, 0], [0.1, 0.1], c='k', linestyle='dotted', alpha=0.75)\n for i, label in enumerate(annotations):\n plt.scatter(a[i], b[i], label=label, c=colors[i], edgecolors='k')\n ax.text(0.05, 0.95, kl_label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.xlabel(\"Test Freq\", fontweight='bold')\n plt.ylabel(\"Gen Freq\", fontweight='bold')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, idr_flag+'parity_scatter.svg'))\n fig.savefig(os.path.join(out_path, idr_flag+'parity_scatter.png'))\n plt.close()\n if not gen_file:\n return a # return train probability distribution\n\n\ndef msa_substitution_rate(generated_msa, train_msa, alphabet, out_path):\n \"Plot substitution rates for generated MSAs\"\n print(alphabet, \"len: \", len(alphabet))\n all_aa = np.arange(len(alphabet))\n all_aa_pairs = list(itertools.product(all_aa, all_aa))\n\n all_pairs_train = get_pairs(train_msa, alphabet)\n train_matrix = get_matrix(all_pairs_train, all_aa_pairs, alphabet)\n print(\"train len\", len(all_pairs_train))\n train_table, train_vals, train_diag_vals = normalize_matrix(train_matrix.T, alphabet)\n\n all_pairs_gen = get_pairs(generated_msa, alphabet)\n print(\"gen len\", len(all_pairs_gen))\n gen_matrix = get_matrix(all_pairs_gen, all_aa_pairs, alphabet)\n gen_table, gen_vals, gen_diag_vals = normalize_matrix(gen_matrix.T, alphabet)\n\n # Plot substitution data as heatmaps\n vmax = 0.4\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.heatmap(train_table, annot=False, cmap='Greens', vmin=0, vmax=vmax, ax=ax)\n ax.set_title('Train Substitution Freq', weight='bold', fontsize=14)\n fig.savefig(os.path.join(out_path, 'train_heatmap.svg'))\n fig.savefig(os.path.join(out_path, 'train_heatmap.png'))\n\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.heatmap(gen_table, annot=False, cmap='Greens', vmin=0, vmax=vmax, ax=ax)\n ax.set_title('Gen Substitution Freq', weight='bold', fontsize=14)\n fig.savefig(os.path.join(out_path, 'gen_heatmap.svg'))\n fig.savefig(os.path.join(out_path, 'gen_heatmap.png'))\n\n # Plot substitution parity per AA\n fig, axes = plt.subplots(6, 5, figsize=(12, 15))\n for i, ax in enumerate(axes.ravel()[:len(alphabet)]):\n r_squared = stats.pearsonr(train_vals[i, :], gen_vals[i, :]).statistic\n label = \"$R$=%.2f\" % (r_squared)\n # mse = mean_squared_error(train_vals[i,:], gen_vals[i,:])\n # label = \"$mse$=%0.2f\"%(mse)\n ax.set_title(alphabet[i], fontsize=14, weight='bold')\n ax.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n ax.scatter(train_vals[i, :], gen_vals[i, :], color='blue',\n linewidth=0, alpha=1)\n ax.scatter(train_vals[i, i], gen_vals[i, i], color='red',\n linewidth=0, alpha=1)\n # plt.scatter(train_diag_vals, gen_diag_vals, color='red', s=8, linewidth=0, label=\"Same AA\", alpha=0.5)\n ax.set_xlabel(\"True AA Substitution Rate\")\n ax.set_ylabel(\"Gen AA Substitution Rate\")\n # ax.legend(loc='upper left', frameon=False, handlelength=0, handletextpad=0)\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n subplots = 6 * 5\n for j in range(subplots - len(alphabet)):\n fig.delaxes(axes.ravel()[subplots - (j + 1)])\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_per_AA.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_per_AA.png'))\n\n\n # Plot for all data\n fig, ax = plt.subplots(figsize=(3, 2.5))\n r_squared = stats.pearsonr(train_vals.flatten(), gen_vals.flatten()).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.scatter(train_vals, gen_vals, color='blue', linewidth=0, label=\"$R^2$=%.2f\" % (r_squared), alpha=0.5)\n plt.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n plt.xlabel(\"True AA Substitution Rate\")\n plt.ylabel(\"Gen AA Substitution Rate\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_nondiag.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_nondiag.png'))\n\n # Plot only same AA substitutions\n fig, ax = plt.subplots(figsize=(3, 2.5))\n r_squared = stats.pearsonr(train_diag_vals, gen_diag_vals).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.scatter(train_diag_vals, gen_diag_vals, color='red', linewidth=0, label=\"$R^2$=%.2f\" % (r_squared), alpha=1)\n plt.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n plt.xlabel(\"True AA Substitution Rate\")\n plt.ylabel(\"Gen AA Substitution Rate\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_diag.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_diag.png'))\n\ndef msa_pairwise_interactions(generated_msa, train_msa, all_aa, out_path): # Look at AA pairwise interactions within each MSA within each sample\n \"Pairwise plots for MSAs\"\n all_aa_pairs = list(itertools.product(all_aa, all_aa))\n all_aa_dict = {''.join(k): 1 for k in all_aa_pairs}\n all_aa_dict = {k: all_aa_dict[k] for k in sorted(all_aa_dict.keys())}\n\n all_pairs_train = get_pairwise(train_msa, all_aa)\n\n count_map_train = {}\n for i in all_pairs_train:\n i = ''.join(i)\n count_map_train[i] = count_map_train.get(i, 0) + 1\n\n for aa_pair in all_aa_dict.keys():\n if aa_pair not in count_map_train.keys():\n count_map_train[aa_pair] = 0\n\n train_dict = {k: count_map_train[k] for k in sorted(count_map_train.keys())}\n total_train = sum(train_dict.values())\n for k in train_dict.keys():\n train_dict[k] = train_dict[k] / total_train\n\n all_pairs_gen = get_pairwise(generated_msa, all_aa)\n\n count_map_gen = {}\n for i in all_pairs_gen:\n i = ''.join(i)\n count_map_gen[i] = count_map_gen.get(i, 0) + 1\n\n for aa_pair in all_aa_dict.keys():\n if aa_pair not in count_map_gen.keys():\n count_map_gen[aa_pair] = 0\n\n gen_dict = {k: count_map_gen[k] for k in sorted(count_map_gen.keys())}\n total_gen = sum(gen_dict.values())\n for k in gen_dict.keys():\n gen_dict[k] = gen_dict[k] / total_gen\n\n train_vals = list(train_dict.values())\n gen_vals = list(gen_dict.values())\n\n r_squared = stats.pearsonr(train_vals, gen_vals).statistic\n\n fig, ax = plt.subplots(figsize=(3, 2.5))\n label = \"$R$=%.2f\" % (r_squared)\n plt.plot([0, 0.02], [0, 0.02], linewidth=1, color='black', linestyle=\"--\")\n plt.scatter(train_vals, gen_vals, color='blue', linewidth=0, alpha=0.5) # marker = alpha\n plt.xlabel(\"True Parwise Interactions\")\n plt.ylabel(\"Gen Parwise Interactions\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'pairwise.svg'))\n fig.savefig(os.path.join(out_path, 'pairwise.png'))\n\ndef plot_tmscores(tmscore_path, out_path, y_min=0, y_max=30):\n \"TMscores for conditionally generated sequences, given MSAs\"\n tmscores = pd.read_csv(tmscore_path, names=['scores'])\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.histplot(tmscores['scores'], color='blue')\n plt.xlabel('TM Scores')\n plt.xlim(0, 1)\n plt.ylim(y_min,y_max)\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'tmscores.svg'))\n fig.savefig(os.path.join(out_path, 'tmscores.png'))\n\ndef plot_perp_group_masked(df, save_name, mask='mask'):\n \"Plot perplexity computed from Masked models, binned by % of sequence masked \"\n bins = np.arange(0, 1.1, 0.1)\n df['binned'] = pd.cut(df['time'], bins)\n group = df.groupby(pd.cut(df['time'], bins))\n plot_centers = (bins[:-1] + bins[1:]) / 2\n plot_values = np.exp(group['loss'].sum()/group['tokens'].sum())\n fig, ax = plt.subplots(figsize=(3, 2.5))\n plt.plot(plot_centers*100, plot_values, c='b', marker='o')\n ax.set_xticks([100, 80, 60, 40, 20, 0])\n if mask=='causal-mask':\n plt.gca().invert_xaxis()\n plt.xlabel('% Sequence')\n else:\n ax.set_xticks([0, 20, 40, 60, 80, 100])\n plt.xlabel('% Masked')\n plt.ylabel('Perplexity')\n plt.ylim(0,25)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/perp_'+save_name+'.png'))\n\ndef plot_perp_group_d3pm(df, save_name):\n \"Plot perplexity computed from D3PM models, binned by timestep intervals\"\n bins = np.arange(0, 550, 50)\n df['binned'] = pd.cut(df['time'], bins)\n group = df.groupby(pd.cut(df['time'], bins))\n plot_centers = (bins[:-1] + bins[1:]) / 2\n plot_values = np.exp(group['loss'].sum()/group['tokens'].sum())\n fig, ax = plt.subplots(figsize=(3, 2.5))\n plt.plot(plot_centers, plot_values, c='b', marker='o')\n ax.set_xticks([0, 100, 200, 300, 400, 500])\n plt.xlabel('Timestep')\n plt.ylabel('Perplexity')\n plt.ylim(0, 25)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/perp_' + save_name + '.png'))\n\n\ndef plot_ecdf_bylength(perp_groups, colors, labels, seq_lengths, metric='perp', model='esm-if'):\n \"Plots cumulative density as a function of sequence length\"\n fig, ax = plt.subplots(1,4, figsize=(8.,2.5), sharey=True, sharex=True)\n for j, perp_group in enumerate(perp_groups):\n for i,p in enumerate(perp_group):\n c=colors[j]\n sns.ecdfplot(x=p,\n label=labels[j],\n color=c,\n alpha=1,\n ax=ax[i])\n if metric=='perp':\n ax[i].set_xlabel(model+' Perplexity')\n elif metric=='plddt':\n ax[i].set_xlabel(model+' pLDDT')\n ax[i].set_title(\"seq length=\"+str(seq_lengths[i]))\n ax[i].axvline(x=np.mean(perp_groups[0][i]), c='k', ls='--', lw=0.75)\n ax[-1].legend(fontsize=8, loc='upper left')\n if model == 'ESM-IF':\n plt.xlim(0, 25)\n elif model == 'MPNN':\n plt.xlim(0, 6)\n elif model == 'Omegafold':\n plt.xlim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_'+metric+'_bylength_'+model+'.svg'))\n fig.savefig(os.path.join('plots/sc_'+metric+'_bylength_'+model+'.png'))\n\ndef plot_sc_boxplot(perp_groups, colors, labels, metric='perp', model='ESM-IF', length_model='small', legend=False):\n fig, ax = plt.subplots(1, 1, figsize=(3,3.5), sharey=True, sharex=True)\n all_perp = []\n all_names = []\n all_colors = []\n for i, perp_group in enumerate(perp_groups):\n [all_perp.append(item) for item in list(chain.from_iterable(perp_group))]\n [all_names.append(labels[i]) for _ in range(len(list(chain.from_iterable(perp_group))))]\n all_colors.append(colors[i])\n\n df = pd.DataFrame()\n df['value'] = all_perp\n df['names'] = all_names\n sns.boxplot(data=df, x=\"names\", y=\"value\", ax=ax, palette=all_colors)\n\n ax.axhline(y=np.median(list(chain.from_iterable(perp_groups[0]))), c='k', ls='--', lw=0.75)\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')\n\n if legend:\n ax.legend()\n if model == 'ESM-IF':\n ax.set_ylim(0, 25)\n elif model == 'MPNN':\n ax.set_ylim(0, 6)\n elif model == 'Omegafold':\n ax.set_ylim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_' + metric + '_' + model + '_' + length_model + '.svg'))\n fig.savefig(os.path.join('plots/sc_' + metric + '_' + model + '_' + length_model + '.png'))\n\ndef plot_ecdf(perp_groups, colors, labels, metric='perp', model='ESM-IF', length_model='small', legend=False):\n \"Plot cumulative density plot of plddt, or perp scores for each set of gen sequences\"\n fig, ax = plt.subplots(1,1, figsize=(2.5,2.5), sharey=True, sharex=True)\n for i, perp_group in enumerate(perp_groups):\n c = colors[i]\n all_perp = list(chain.from_iterable(perp_group))\n sns.ecdfplot(x=all_perp,\n label=labels[i],\n color=c,\n alpha=1,\n ax=ax)\n if metric == 'perp':\n ax.set_xlabel(model + ' Perplexity')\n elif metric == 'plddt':\n ax.set_xlabel(model + ' pLDDT')\n ax.set_title(\"all sequences\")\n ax.axvline(x=np.mean(list(chain.from_iterable(perp_groups[0]))), c='k', ls='--', lw=0.75)\n if legend:\n ax.legend()\n if model=='ESM-IF':\n ax.set_xlim(0,25)\n elif model == 'MPNN':\n ax.set_xlim(0,6)\n elif model == 'Omegafold':\n ax.set_xlim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_'+metric+'_'+model+'_'+length_model+'.svg'))\n fig.savefig(os.path.join('plots/sc_'+metric+'_'+model+'_'+length_model+'.png'))\n\ndef plot_plddt_perp(ordered_plddt_group, ordered_perp_group, idx, colors, labels, perp_model='ESM-IF', length_model='small'):\n \"Plot pLDDT vs Perplexity for each set of generated sequences against train data\"\n fig, ax = plt.subplots(1, 1, figsize=(3, 2.5), sharey=True, sharex=True)\n plt.scatter(ordered_plddt_group[0], ordered_perp_group[0], c=colors[0], s=20, alpha=1, label=labels[0], edgecolors='grey')\n plt.scatter(ordered_plddt_group[idx], ordered_perp_group[idx], c=colors[idx], s=20, alpha=1, label=labels[idx], edgecolors='k')\n plt.ylim(0, 25)\n plt.xticks([25, 50, 75, 100])\n ax.set_ylabel(perp_model + ' Perplexity')\n ax.set_xlabel('pLDDT')\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_plddt_perp_'+labels[idx]+'_'+length_model+'.svg'))\n fig.savefig(os.path.join('plots/sc_plddt_perp_'+labels[idx]+'_'+length_model+'.png'))\n\ndef ss_helix_strand(runs, data, labels, save_name):\n \"2D Probability Density plots for DSSP 3-state predictions of % Helix and % Sheet\"\n fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(10, 7), constrained_layout=True, sharex=False, sharey=False)\n ax = ax.ravel()\n for i, run in enumerate(runs):\n helix = data[data['type'] == run]['helix_percent']\n strand = data[data['type'] == run]['strand_percent']\n\n plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates.\n plt.rcParams['axes.titlepad'] = -14\n ax[i].set_title(labels[i])\n\n sns.kdeplot(x=helix, y=strand,\n fill=True, thresh=0.001, levels=10,\n cmap='Greys', ax=ax[i], cbar=False, common_norm=True)\n ax[i].set_xlabel('% Helix per Seq')\n ax[i].set_ylabel('% Strand per Seq')\n ax[i].set_xlim(-0.05, 1)\n ax[i].set_ylim(-0.05, 1)\n #plt.tight_layout()\n fig.savefig(os.path.join('plots/helix_strand_' + save_name + '.svg'))\n fig.savefig(os.path.join('plots/helix_strand_' + save_name + '.png'))\n\ndef ss_box_whisker(data, colors, save_name):\n \"Create box and whisker plot for DSSP 3-state secondary structure predictions\"\n fig, ax = plt.subplots(1, 3, figsize=(7, 3.5), sharex=True, sharey=True)\n sns.boxplot(data=data, x=\"helix_percent\", y=\"type\", ax=ax[0], palette=colors)\n sns.boxplot(data=data, x=\"strand_percent\", y=\"type\", ax=ax[1], palette=colors)\n sns.boxplot(data=data, x=\"other_percent\", y=\"type\", ax=ax[2], palette=colors)\n ax[0].set_xlabel('% Helix per Sequence')\n ax[1].set_xlabel('% Strand per Sequence')\n ax[2].set_xlabel('% Loop per Sequence')\n [ax[i].set_ylabel(None) for i in range(len(ax))]\n plt.tight_layout()\n fig.savefig(os.path.join('plots/' + save_name + '_structure_box.svg'))\n fig.savefig(os.path.join('plots/' + save_name + '_structure_box.png'))\n\ndef plot_embedding(train_emb, run_emb, colors, i, runs, project_run):\n \"Plot embedding space of sequences as 2D TSNE \"\n fig, ax = plt.subplots(figsize=(5, 5))\n # Plot test\n plt.scatter(train_emb[:, 0][::10], train_emb[:, 1][::10], s=20, alpha=1, c=colors[0],\n edgecolors='grey')\n # Plot run\n plt.scatter(run_emb[:, 0], run_emb[:, 1], s=20, alpha=0.95,\n c=colors[i+1], edgecolors='k')\n ax.axis('off')\n fig.savefig(os.path.join('plots/fid_' + runs[i+1] + '_' + project_run + '.svg'))\n fig.savefig(os.path.join('plots/fid_' + runs[i+1] + '_' + project_run + '.png'))\n\ndef clean_list(list):\n cleanedList = [x for x in list if x ==x]\n return cleanedList\n\ndef plot_percent_similarity(all_df, colors, legend=False):\n fig, ax = plt.subplots(1, 1, figsize=(2.5, 2.5), sharey=True, sharex=True)\n #sns.set_palette(sns.color_palette(\"viridis\", len(runs)))\n sns.ecdfplot(all_df, ax=ax, legend=legend, palette=colors)\n #f = sns.boxplot([all_df['Valid MSA'].dropna(), all_df['Cond Max'].dropna(), all_df['Cond Rand'].dropna()],\n # ax=ax, palette=colors)\n #f.set(xticklabels=['Valid MSA', 'Cond Max', 'Cond Rand'])\n ax.set_xlabel('% Similarity to Original MSA')\n ax.axvline(x=25, c='k', ls='--', lw=0.75)\n ax.set_title(\"% Sim\")\n plt.tight_layout()\n fig.savefig(os.path.join('plots/simmsa.svg'))\n fig.savefig(os.path.join('plots/simmsa.png'))\n\ndef plot_conditional_tmscores(tm_df, palette, legend=False, save_path='plots/'):\n fig, ax = plt.subplots(1, 1, figsize=(2.5, 2.5), sharey=True, sharex=True)\n sns.ecdfplot(tm_df, palette=palette, ax=ax, legend=legend)\n ax.set_title(\" \")\n ax.axvline(x=0.5, c='k', ls='--', lw=0.75)\n plt.xlim(0,1)\n ax.set_ylabel('CDF')\n ax.set_xlabel('TM Score')\n plt.tight_layout()\n fig.savefig(os.path.join(save_path+'_tmscore.svg'))\n fig.savefig(os.path.join(save_path+'_tmscore.png'))\n\ndef plot_conditional_rmsd(pdb, motif_df, out_path='plots/'):\n fig, ax = plt.subplots(1, 3, figsize=(7.5, 2.5))\n ax[0].scatter(motif_df['scaffold_lengths'], motif_df['rmsd'], edgecolors='grey', c='#D0D0D0')\n ax[0].set_xlabel('Scaffold Lengths')\n ax[0].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[1].scatter(motif_df['scores'], motif_df['rmsd'], edgecolors='grey', c='#D0D0D0')\n ax[1].set_xlabel('pLDDT entire sequence')\n ax[1].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[2].scatter(motif_df['scores_fixed'], motif_df['rmsd'], edgecolors='grey', c='#527d99')\n ax[2].set_xlabel('pLDDT fixed region')\n ax[2].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[0].axhline(y=1, c='k', ls='--', lw=0.75)\n ax[1].axhline(y=1, c='k', ls='--', lw=0.75)\n ax[2].axhline(y=1, c='k', ls='--', lw=0.75)\n plt.title(\" \")\n ax[1].set_xlim(0, 100)\n ax[2].set_xlim(0, 100)\n plt.tight_layout()\n fig.savefig(os.path.join(out_path + pdb + '.png'))\n\ndef plot_conditional_sim(sim, out_path='plots/'):\n fig, ax = plt.subplots(figsize=(2.5, 2.5))\n sns.histplot(sim, color='grey', bins=10, ax=ax)\n plt.xlabel('% Seq similarity (Fixed)')\n plt.title(\" \")\n plt.xlim(0, 100)\n plt.tight_layout()\n fig.savefig(out_path + '_similarity.png')\n\ndef idr_parity_plot(mean_og_score, mean_gen_score, out_path):\n fig, ax = plt.subplots(figsize=(6, 2.5))\n r_squared = stats.pearsonr(mean_og_score, mean_gen_score).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.axline([0, 0], [1, 1], c='k', linestyle='dotted', alpha=0.75)\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.scatter(mean_og_score, mean_gen_score, c='grey', edgecolors='k')\n plt.xlabel(\"Per-Res Score True\", fontweight='bold')\n plt.ylabel(\"Per-Res Score Gen\", fontweight='bold')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'idr_parity_scatter.svg'))\n fig.savefig(os.path.join(out_path, 'idr_parity_scatter.png'))\n plt.close()\n\ndef plot_idr(out_fpath, df, start, end, save_iter):\n fig, ax = plt.subplots(figsize=(6,3))\n plt.plot(df['resid'], df['score'], c='b')\n plt.axhline(y=0.5, c='k', ls='--')\n #plt.axvline(x=end, c='k', ls='--')\n plt.axvspan(start, end, alpha=0.1, color='b')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.tight_layout()\n fig.savefig(out_fpath+'idr_'+str(save_iter)+'.svg')\n fig.savefig(out_fpath+'idr_'+str(save_iter)+'.png')\n\ndef plot_idr_drbert(out_fpath, prefix, df, start, end, save_iter):\n fig, ax = plt.subplots(figsize=(6,3))\n x = np.arange(0,len(df['score'][save_iter]))\n plt.plot(x, df['score'][save_iter], c='b')\n #plt.axhline(y=0.5, c='k', ls='--')\n #plt.axvline(x=end, c='k', ls='--')\n plt.axvspan(start, end, alpha=0.1, color='b')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+'svg/'+prefix+str(save_iter)+'.svg')\n #fig.savefig(out_fpath+prefix+str(save_iter)+'.png')\n\n\ndef plot_idr_drbert_multiple(out_fpath, prefix, df, start, end, df2, start2, end2, save_iter):\n fig, ax = plt.subplots(figsize=(4,1.5))\n x = np.arange(0,len(df['score'][save_iter]))\n x2 = np.arange(0,len(df2['score'][save_iter]))\n plt.plot(x, df['score'][save_iter], c='#1E9AC7')\n plt.plot(x2, df2['score'][save_iter], c='grey')\n plt.axvspan(start, end, alpha=0.25, color='#1E9AC7')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+'svg/'+prefix+str(save_iter)+'.svg')\n\ndef idr_boxplot(gen_disorder_percent, gen_order_percent, out_fpath, save_name):\n fig, ax = plt.subplots(figsize=(3,3))\n f = sns.boxplot([gen_disorder_percent, gen_order_percent], ax=ax)\n f.set(xticklabels=['Disorder', 'Non-Disordered'])\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+save_name+'idr_box.svg')\n fig.savefig(out_fpath+save_name+'idr_box.png')\n\ndef idr_boxplot_all(df, out_fpath, save_name):\n print(df)\n fig, ax = plt.subplots(figsize=(3,3))\n f = sns.boxplot(data=df, x=\"region\", y=\"score\", hue='type', ax=ax)\n f.set(xticklabels=['Disorder', 'Non-Disordered'])\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+save_name+'idr_box.svg')\n fig.savefig(out_fpath+save_name+'idr_box.png')","repo_name":"microsoft/evodiff","sub_path":"evodiff/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":26006,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"48"}
+{"seq_id":"15114314860","text":"alcool = 0\r\ngasol = 0\r\ndiesel = 0\r\nwhile True:\r\n x = int(input())\r\n if x == 4:\r\n break\r\n elif x == 1:\r\n alcool += 1\r\n elif x == 2:\r\n gasol += 1\r\n elif x == 3:\r\n diesel += 1\r\n\r\nprint(\"MUITO OBRIGADO\")\r\nprint(f\"Alcool: {alcool}\")\r\nprint(f\"Gasolina: {gasol}\")\r\nprint(f\"Diesel: {diesel}\")\r\n","repo_name":"GersonRS/beecrowd","sub_path":"Iniciante/python-solution/1134-tipo-de-combustivel/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"13267183320","text":"import csv\nimport urllib.request\n\nfrom functools import wraps\nfrom pytrivia import Category, Diffculty, Type, Trivia\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom cs50 import SQL\nfrom passlib.apps import custom_app_context as pwd_context\nfrom tempfile import mkdtemp\n\n\n\n# configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\ndef apology(message, code=400):\n \"\"\"Renders message as an apology to user.\"\"\"\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code\n\ndef succes(message, code=200):\n \"\"\"Renders message as an apology to user.\"\"\"\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"succes.html\", top=code, bottom=escape(message)), code\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function\n\ndef score(answer):\n \"\"\"Adds points for correct answer\"\"\"\n # select table\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n # check if answer is correct\n user_answer = answer\n real_answer = portfolio[-1][\"answer\"]\n if user_answer == real_answer:\n db.execute(\"UPDATE score set total_score=total_score+1 WHERE id=:id\", \\\n id=session[\"user_id\"])\n db.execute(\"UPDATE score set session_score=session_score+1 WHERE id=:id\", \\\n id=session[\"user_id\"])\n\ndef qinit():\n \"\"\"Initializes all but the first question\"\"\"\n # select table\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n # setup question config\n cat = portfolio[-1][\"category\"]\n dif = portfolio[-1][\"difficulty\"]\n questiontype = portfolio[-1][\"qtype\"]\n qnumber = int(portfolio[-1][\"qnumber\"]) - 1\n config = [cat, dif, questiontype, qnumber]\n return (config)\n\ndef outofq():\n \"\"\"Checks if out of questions\"\"\"\n\n # delete session from portfolio and return total score\n delete_portfolio = db.execute(\"DELETE FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n u_score = db.execute(\"SELECT total_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n s_score = db.execute(\"SELECT session_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n return [u_score, s_score]\n\ndef sconfigmulti(answers, cat, questiontype, dif, qnumber, correct_answer):\n # insert data into portfolio for respective questiontypes\n asked = db.execute(\"INSERT INTO portfolio (id, answer, category, qtype, difficulty, qnumber) \\\n VALUES(:id, :answers, :category, :qtype, :difficulty, :qnumber)\", \\\n answers = correct_answer, category = cat, qtype = questiontype, \\\n difficulty = dif, qnumber = qnumber, id=session[\"user_id\"])\n\ndef sconfigtf(answers, cat, questiontype, dif, qnumber, correct_answer):\n # insert data into portfolio for respective questiontypes\n asked = db.execute(\"INSERT INTO portfolio (id, answer, category, qtype, difficulty, qnumber) \\\n VALUES(:id, :answers, :category, :qtype, :difficulty, :qnumber)\", \\\n answers = correct_answer, category = cat, qtype =questiontype, \\\n difficulty = dif, qnumber = qnumber, id=session[\"user_id\"] )\n\ndef delsession():\n # delete session from portfolio\n db.execute(\"DELETE FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\ndef session_score():\n # set user id into score table\n score = db.execute(\"INSERT INTO score (id) VALUES (:id)\", id=session[\"user_id\"])\n\ndef q_score():\n # return session score\n score = db.execute(\"SELECT session_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n return score[0][\"session_score\"]\n\ndef reset_score():\n # reset session score\n db.execute(\"UPDATE score set session_score=0 WHERE id=:id\", \\\n id=session[\"user_id\"])\n\ndef leaders():\n # lookup top 5 scores\n one = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n two = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n three = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n four = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n five = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n\n return [one, two, three, four, five]\n\ndef leader_names(top):\n # set id's of top 5 scores\n row_1 = top[1][0][\"id\"]\n row_2 = top[1][1][\"id\"]\n row_3 = top[1][2][\"id\"]\n row_4 = top[1][3][\"id\"]\n row_5 = top[1][4][\"id\"]\n\n # lookup names associated with top 5 scores\n name_1 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_1)\n name_2 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_2)\n name_3 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_3)\n name_4 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_4)\n name_5 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_5)\n\n return [name_1, name_2, name_3, name_4, name_5]\n","repo_name":"IIVolumeII/webik-14","sub_path":"trivia/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31100390432","text":"import os\nimport random\nfrom time import sleep\nfrom gpiozero import MotionSensor\n\n#Clear console and turn off blinking cursor\n#This makes for a completely black screen behind the mirror\nos.system(\"clear\")\nos.system(\"setterm --cursor off\")\n\n#Set motion Sensor GPIO 17\npir = MotionSensor(17)\n\n#path where videos are located\nfolderPath = \"/home/pi/Videos/\"\n\n#List of File Names of Videos in folder\nvideos = [\"BC_FearTheReaper_Holl_V.mp4\",\n \"BC_GatheringGhouls_Holl_V.mp4\", \n \"PP_StartleScare1_Wall_Spotlight_V.mp4\",\n \"PP_StartleScare2_Wall_Spotlight_V.mp4\",\n \"PP_StartleScare3_Wall_Spotlight_V.mp4\"]\n\n#VLC Command for starting the video with options\n# \"--quiet\" Turn off all messages on the console.\n# \"--no-osd\" No on-screen display (disables title of video from displaying)\n# \"-f\" fullscreen\n# \"--autoscale\" Let the video scale to fit a given window or fullscreen.\n# https://wiki.videolan.org/VLC_command-line_help/\nvlcCommandStart = \"vlc --quiet --no-osd -f --autoscale file://\"\n\n#End of the VLC Command after the file being played.\n# \"vlc://quit\" Close VLC after video is done\n# \">/dev/null 2>&1\" redirect all console output to null\nvlcCommandEnd = \" vlc://quit >/dev/null 2>&1\"\n\ntry: \n while True: # this will carry on until you hit CTRL+C \n os.system(\"clear\")\n \n #Wait for motion sensor\n pir.wait_for_motion()\n \n #Give time to look at mirror reflection\n # wait 3 seconds \n sleep(3)\n\n #Get video at random from list\n video = random.choice(videos) \n #Create command to play video by concatenating command variables.\n videoCommand = vlcCommandStart + folderPath + video + vlcCommandEnd \n #Run VLC command from BASH Shell/Terminal\n os.system(videoCommand)\n \n #After video plays let relfection \n #show before playing another video\n #wait 3 seconds \n sleep(3)\n\n #wait for motion sensor to deactivate\n #pir.wait_for_no_motion() \n\n# this block will run no matter how the try block exits\nfinally:\n # clean up after yourself\n GPIO.cleanup()","repo_name":"andyrblank/HalloweenMirror","sub_path":"mirrorScript.py","file_name":"mirrorScript.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71640823186","text":"import MLfunctions as mlf\r\nimport gensim\r\nfrom gensim import corpora\r\nfrom gensim.models import Doc2Vec,Word2Vec,TfidfModel\r\nfrom gensim.models.doc2vec import TaggedDocument\r\nfrom gensim.utils import simple_preprocess\r\nfrom gensim.models import CoherenceModel\r\nfrom nltk.corpus import stopwords\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport seaborn as sns\r\nimport matplotlib.colors as mcolors\r\n\r\nsw=stopwords.words('spanish')\r\npathtohere=os.getcwd()\r\n\r\n\r\ndef main():\r\n print('LDA model with gensim')\r\n print('1) 1 gram, 2) 2 gram, 3) 3 gram, 4) Ranking of coherence')\r\n op=input()\r\n op=int(op)\r\n #28 topics, optimum result (27 topics are really 28, 0 to 27)\r\n numberTopic=5\r\n lsReturn=[]\r\n lsDocuments=[]\r\n lsSubject=[]\r\n #lsNoThesis=[]\r\n #Get the the information into a list of documents\r\n lsReturn=mlf.getRawTextToList()\r\n lsDocuments=lsReturn[0]\r\n lsSubject=lsReturn[1]\r\n #lsNoThesis=lsReturn[2]\r\n #Read the unwanted words and then add them up to stopwords\r\n lsUnWantedWords=[]\r\n lsUnWantedWords=mlf.readFile('removed_words.txt')\r\n for word in lsUnWantedWords:\r\n sw.append(word.strip())\r\n \r\n #Read the Notsure words and then add them up to stopwords\r\n lsNotSureWords=[]\r\n lsNotSureWords=mlf.readFile('notsure_words.txt')\r\n for word in lsNotSureWords:\r\n sw.append(word.strip())\r\n \r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocuments]\r\n\r\n\r\n if(op==1):\r\n print('LDA model with gensim for 1 gram')\r\n \r\n if(op==2):\r\n print('LDA model with gensim for 2 gram')\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n lsDocBiGram = [bigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocBiGram]\r\n \r\n\r\n if(op==3):\r\n print('LDA model with gensim for 3 gram')\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n trigram = gensim.models.Phrases(bigram[lsDocuments_NoSW], threshold=100)\r\n trigram_mod = gensim.models.phrases.Phraser(trigram)\r\n lsDocTrigram = [trigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocTrigram]\r\n \r\n \"\"\"\r\n print('Getting bigrams list...')\r\n for doc in lsDocuments_NoSW:\r\n for word in doc:\r\n mlf.appendInfoToFile(pathtohere,'\\\\trigrams.txt',word+'\\n')\r\n\r\n \"\"\" \r\n\r\n if (op==4):\r\n print('Starting coherence ranking with 2 gram...') \r\n #Generate best coherence ranking\r\n # Create Dictionary\r\n id2word = corpora.Dictionary(lsDocuments_NoSW)\r\n # Create Corpus: Term Document Frequency\r\n corpus = [id2word.doc2bow(text) for text in lsDocuments_NoSW]\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n lsDocBiGram = [bigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocBiGram]\r\n limit=51; start=2; step=1;\r\n model_list, coherence_values = mlf.compute_coherence_values(dictionary=id2word, corpus=corpus, texts=lsDocuments_NoSW, start=start, limit=limit, step=step)\r\n print('Plotting ranking...')\r\n # Show graph\r\n \r\n x = range(start, limit, step)\r\n plt.plot(x, coherence_values)\r\n plt.xlabel(\"Num Topics\")\r\n plt.ylabel(\"Coherence score\")\r\n plt.legend((\"coherence_values\"), loc='best')\r\n plt.show() \r\n sys.exit()\r\n\r\n # id2word :Create Dictionary, this dictionary has the id and word\r\n \r\n id2word = corpora.Dictionary(lsDocuments_NoSW)\r\n\r\n # Term Document Frequency\r\n #Gensim creates a unique id for each word in the document. \r\n #The produced corpus shown above is a mapping of (word_id, word_frequency).\r\n \r\n corpus = [id2word.doc2bow(text) for text in lsDocuments_NoSW]\r\n #Example: it has 37, 342 indexes, so 0 to 37, 341 \r\n columns=len(id2word) \r\n #Generate list of columns\r\n lsColumn=[]\r\n for i in range(0,columns):\r\n lsColumn.append(str(i));\r\n #Generate the indexes (id_thesis)\r\n lsIndex=[] \r\n lsIndex=mlf.readFile('lsThesis.txt')\r\n term_matrix=[]\r\n lim=columns-1\r\n countDoc=0\r\n for doc in corpus:\r\n strdoc=''\r\n for i in range(0,columns): \r\n bFound=False \r\n for index_word,value in doc: \r\n #Case: When the document has that index word \r\n if int(i)!=lim:\r\n if i==index_word: \r\n bFound=True \r\n if i==0:\r\n strdoc='('\r\n if int(index_word)==i:\r\n strdoc=strdoc+str(value)+',' \r\n break \r\n #Case: End of columns then add value and ')' \r\n else: \r\n if i==index_word: \r\n bFound=True \r\n if int(index_word)==i:\r\n strdoc=strdoc+str(value)+')' \r\n break \r\n if bFound==False:\r\n strdoc=strdoc+'0,' \r\n if bFound==False and i==lim:\r\n strdoc=strdoc+'0)'\r\n\r\n #mlf.appendInfoToFile(pathtohere+'\\\\','vectors.txt',strdoc) \r\n term_matrix.append(strdoc)\r\n countDoc=countDoc+1\r\n print('Doc:',str(countDoc)) \r\n \r\n\r\n dataFrame = pd.DataFrame(term_matrix) \r\n for row in dataFrame.iterrows():\r\n mlf.appendInfoToFile(pathtohere+'\\\\','dataFrameContent.txt',str(row))\r\n\r\n sys.exit()\r\n\r\n #Print the id and word \r\n \"\"\"\r\n for element in lsSubject:\r\n mlf.appendInfoToFile(pathtohere+'\\\\','lsSubject.txt',str(element)+'\\n') \r\n \r\n #Get the word and its ID.\r\n for key,value in id2word.token2id.items():\r\n mlf.appendInfoToFile(pathtohere+'\\\\','id2word.txt',str(key)+';'+str(value)+'\\n') \r\n\r\n \"\"\" \r\n \r\n print('LDA Model starting...')\r\n # Build LDA model\r\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\r\n id2word=id2word,\r\n num_topics=numberTopic)\r\n\r\n \"\"\"\r\n print('Printing topics')\r\n lda_topics=lda_model.print_topics()\r\n for topic in lda_topics:\r\n mlf.appendInfoToFile(pathtohere,'\\\\list_of_topics_lda.txt',str(topic)+'\\n')\r\n \"\"\"\r\n \r\n df=pd.DataFrame()\r\n df=mlf.getDominantTopicDataFrame(lda_model,corpus,lsDocuments_NoSW,lsSubject) \r\n mlf.generateFileSeparatedBySemicolon(df,str(op)+'gram_csv_'+str(numberTopic)+'_withoutCompleteList.txt') \r\n \r\n mlf.generatePyLDAVis(lda_model,corpus,'vis_'+str(op)+'gram_'+str(numberTopic)+'_withoutCompleteList.html')\r\n \r\n \"\"\"\r\n lda_cm=CoherenceModel(model=lda_model,corpus=corpus,dictionary=id2word,texts=lsDocuments_NoSW)\r\n print('LDA Coherence:',lda_cm.get_coherence()) \r\n \"\"\"\r\n\r\n\r\nif __name__=='__main__':\r\n main() \r\n","repo_name":"ulysesrico33/appPythonML","sub_path":"topicModeling_gensim_LDA.py","file_name":"topicModeling_gensim_LDA.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"25811417423","text":"# 模拟30个请求\n# 发给 http://127.0.0.1:8000/test\n# 发给 http://127.0.0.1:8001/test\n\nimport random\nfrom threading import Thread\nimport requests\n\n# 随机向8000或8001发请求\ndef get_request():\n url = 'http://127.0.0.1:8000/test'\n url2 = 'http://127.0.0.1:8000/test'\n get_url = random.choice([url,url2])\n res = requests.get(get_url)\n print('request OK')\n\nt_list = []\n\nfor i in range(30):\n t = Thread(target=get_request)\n t_list.append(t)\n t.start()\n print(i)\n\nfor t in t_list:\n t.join()\n\n\n","repo_name":"tomcatcn/wiki","sub_path":"tools/test_lock.py","file_name":"test_lock.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71092651987","text":"# Tocando um arquivo mp3.\n\n# Import the modules\n\nimport pygame\nfrom pygame import mixer\n\n# Instancia mixer\nmixer.init()\n\n# Carrega um arquivo de música\nmixer.music.load('ex021.mp3')\n\n# Dá um play na música\nmixer.music.play()\nprint(\"Tocando a música...\")\n\nwhile True:\n print(\"-----------------------------------------------\")\n print(\"Pressione P para Pausar.\")\n print(\"Pressione R para dar Play.\")\n print(\"Pressione E para sair.\")\n\n userInput = input(\" \")\n\n if userInput == 'p':\n\n mixer.music.pause()\n print('Música pausada.')\n elif userInput == 'r':\n mixer.music.unpause()\n print(\"Música está tocando.\")\n elif userInput == 'e':\n print(\"Você saiu do player\")\n break\n","repo_name":"GabrielVictorino8266/python","sub_path":"cursoemvideo/Mundo1/exercises/ex021.py","file_name":"ex021.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15531369915","text":"import whois\nimport logging\nimport json\nimport dns.resolver\n\nunique_domains = ['youtu.be']\n\n\ndef get_nslookup(domain):\n\n # records = ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT']\n records = {\n 'A': [],\n 'AAAA': [],\n 'CNAME': [],\n 'MX': [],\n 'NS': [],\n 'SOA': [],\n 'TXT': []\n }\n for record in records.keys():\n try:\n nslookup = dns.resolver.query(domain, record)\n # print('nslookup:', nslookup)\n for server in nslookup:\n # print(record + ': ' + str(server))\n records[record].append(str(server))\n except Exception as e:\n print(e)\n # submitError(link=link)\n logging.info(\"NS Lookup fetched... ✅\")\n print(\"NS Lookup: \" + json.dumps(records, indent=4, sort_keys=True))\n return records\n\n\ndef getWhois(parsed_url):\n try:\n whois_data = whois.whois(parsed_url['domain'])\n\n if ('domain_name' in whois_data.keys()):\n domain_name = whois_data['domain_name'][0] if type(\n whois_data['domain_name']) is list else whois_data['domain_name']\n\n else:\n if (parsed_url['domain'] in unique_domains):\n domain_name = parsed_url['domain']\n else:\n # print('domain_name not found')\n logging.error(\"domain_name not found... 👻\")\n\n return\n\n # convert domain_name to lowercase\n domain_name = domain_name.lower()\n\n # print('domain_name: ', domain_name)\n\n if ('registrar' in whois_data.keys()):\n registrar = whois_data['registrar']\n else:\n registrar = None\n\n whoisServer = whois_data['whois_server'] if 'whois_server' in whois_data.keys(\n ) else None\n\n if 'updatedDate' in whois_data.keys():\n updatedDate = whois_data['updated_date'] if type(\n whois_data['updated_date']) is list else [whois_data['updated_date']]\n else:\n updatedDate = None\n\n if 'creation_date' in whois_data.keys():\n creationDate = whois_data['creation_date'][0] if type(\n whois_data['creation_date']) is list else whois_data['creation_date']\n else:\n creationDate = None\n\n if 'expiration_date' in whois_data.keys():\n expirationDate = whois_data['expiration_date'][0] if type(\n whois_data['expiration_date']) is list else whois_data['expiration_date']\n else:\n expirationDate = None\n\n if 'name_servers' in whois_data.keys():\n nameServers = whois_data['name_servers']\n else:\n nameServers = None\n\n if 'status' in whois_data.keys():\n status = whois_data['status'] if type(whois_data['status']) is list else [\n whois_data['status']]\n else:\n status = None\n\n if 'emails' in whois_data.keys():\n emails = whois_data['emails'] if type(whois_data['emails']) is list else [\n whois_data['emails']]\n else:\n emails = None\n\n if 'dnssec' in whois_data.keys():\n dnssec = whois_data['dnssec'] if type(whois_data['dnssec']) is list else [\n whois_data['dnssec']]\n else:\n dnssec = None\n\n if 'name' in whois_data.keys():\n name = whois_data['name'] if whois_data['name'] else None\n else:\n name = None\n\n if 'org' in whois_data.keys():\n org = whois_data['org'] if whois_data['org'] else None\n else:\n org = None\n\n if 'address' in whois_data.keys():\n address = whois_data['address'] if whois_data['address'] else None\n else:\n address = None\n\n if 'city' in whois_data.keys():\n city = whois_data['city'] if whois_data['city'] else None\n else:\n city = None\n\n if 'country' in whois_data.keys():\n country = whois_data['country'] if whois_data['country'] else None\n else:\n country = None\n\n if 'state' in whois_data.keys():\n state = whois_data['state'] if whois_data['state'] else None\n else:\n state = None\n\n if 'address' in whois_data.keys():\n address = address if type(address) is list else [address]\n else:\n address = None\n\n if address and address[0] is None:\n address = None\n if updatedDate and updatedDate[0] is None:\n updatedDate = None\n if status and status[0] is None:\n status = None\n if emails and emails[0] is None:\n emails = None\n if dnssec and dnssec[0] is None:\n dnssec = None\n\n logging.info(\"whois data collected... 📝\")\n\n print(\n 'domain:', domain_name,\n '\\nregistrar:', registrar,\n '\\nwhoisServer:', whoisServer,\n '\\nupdatedDate:', updatedDate,\n '\\ncreationDate:', creationDate,\n '\\nexpirationDate:', expirationDate,\n '\\nnameServers:', nameServers,\n '\\nstatus:', status,\n '\\nemails:', emails,\n '\\ndnssec:', dnssec,\n '\\nname:', name,\n '\\norg:', org,\n '\\naddress:', address,\n '\\ncity:', city,\n '\\ncountry:', country,\n '\\nstate:', state\n )\n\n except Exception as e:\n print(e)\n # submitError(link=link)\n\n\nif __name__ == '__main__':\n getWhois({'domain': 'google.com'})\n get_nslookup('google.com')\n","repo_name":"Amansinghtech/python-whois","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"9104975971","text":"class Solution(object):\n def change(self, amount, coins):\n \"\"\"\n :type amount: int\n :type coins: List[int]\n :rtype: int\n \"\"\"\n change = [0] * (amount+1)\n change[0] = 1 # can always make 0 change\n for c in coins:\n for i in range(c,amount+1):\n change[i] = change[i] + change[i-c]\n return change[amount]","repo_name":"BlakeBrown/LeetCode-Solutions","sub_path":"518 - Coin Change 2.py","file_name":"518 - Coin Change 2.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"}
+{"seq_id":"1986974977","text":"import random\nfrom collections import defaultdict\nfrom PyQt6.QtWidgets import QLabel\nfrom backend.player import player\n\n\nclass Game:\n def __init__(self):\n self.deck = [\n f\"{i}{j}\"\n for i in list(\"KQJA\") + list(map(str, range(2, 11)))\n for j in \"SHCD\"\n ] * 2\n\n self.board = [\n [\"XX\", \"6D\", \"7D\", \"8D\", \"9D\", \"10D\", \"QD\", \"KD\", \"AD\", \"XX\"],\n [\"5D\", \"3H\", \"2H\", \"2S\", \"3S\", \"4S\", \"5S\", \"6S\", \"7S\", \"AC\"],\n [\"4D\", \"4H\", \"KD\", \"AD\", \"AC\", \"KC\", \"QC\", \"10C\", \"8S\", \"KC\"],\n [\"3D\", \"5H\", \"QD\", \"QH\", \"10H\", \"9H\", \"8H\", \"9C\", \"9S\", \"QC\"],\n [\"2D\", \"6H\", \"10D\", \"KH\", \"3H\", \"2H\", \"7H\", \"8C\", \"10S\", \"10C\"],\n [\"AS\", \"7H\", \"9D\", \"AH\", \"4H\", \"5H\", \"6H\", \"7C\", \"QS\", \"9C\"],\n [\"KS\", \"8H\", \"8D\", \"2C\", \"3C\", \"4C\", \"5C\", \"6C\", \"KS\", \"8C\"],\n [\"QS\", \"9H\", \"7D\", \"6D\", \"6D\", \"4D\", \"QD\", \"2D\", \"AS\", \"7C\"],\n [\"10S\", \"10H\", \"QH\", \"KH\", \"AH\", \"2C\", \"3C\", \"4C\", \"5C\", \"6C\"],\n [\"XX\", \"9S\", \"8S\", \"7S\", \"6S\", \"5S\", \"4S\", \"3S\", \"2S\", \"XX\"],\n ]\n\n random.shuffle(self.deck)\n self.pos = defaultdict(list)\n self.used = defaultdict(int)\n self.coins = defaultdict(QLabel)\n self.filled = [[0] * 10 for _ in \" \" * 10]\n self.winner = False\n\n def storeLocations(self):\n for i in range(10):\n for j in range(10):\n self.pos[self.board[i][j]] += ((i, j),)\n\n def distribute(self, player: player):\n for _ in range(5):\n player.addCard(self.getNewCard())\n\n def getNewCard(self):\n while self.deck:\n newCard = self.deck.pop()\n if not self.used[newCard] == 2:\n self.used[newCard] += 1\n return newCard\n return False\n\n def checkSequence(self, x, y, obj):\n # check up - down\n\n total = 0\n b = d = y\n while b >= 0:\n if obj.playerBox[x][b]:\n total += 1\n else:\n break\n b -= 1\n\n while d < 10:\n if obj.playerBox[x][d]:\n total += 1\n else:\n break\n d += 1\n\n obj.playerScore += total >= 6\n\n # check left - right\n\n total = 0\n a = c = x\n while a:\n if obj.playerBox[a][y]:\n total += 1\n else:\n break\n a -= 1\n\n while c < 10:\n if obj.playerBox[c][y]:\n total += 1\n else:\n break\n c += 1\n\n obj.playerScore += total >= 6\n\n # check left - diagonal\n\n total = 0\n a = c = x\n b = d = y\n while a and b:\n if obj.playerBox[a][b]:\n total += 1\n else:\n break\n a -= 1\n b -= 1\n\n while c < 10 and d < 10:\n if obj.playerBox[c][d]:\n total += 1\n else:\n break\n c += 1\n d += 1\n\n obj.playerScore += total >= 6\n\n # check right - diagonal\n\n total = 0\n a = c = x\n b = d = y\n while a and b < 9:\n if obj.playerBox[a][b]:\n total += 1\n else:\n break\n a -= 1\n b += 1\n\n while c < 9 and d:\n if obj.playerBox[c][d] == obj.playerBox[c + 1][d - 1]:\n total += 1\n else:\n break\n c += 1\n d -= 1\n\n obj.playerScore += total >= 6\n if obj.playerScore > 0:\n self.winner = True\n\n def setBox(self, player: player, opponent, x, y):\n if self.board[x][y] == \"XX\":\n return False\n\n ok = player.hasChosenValid(x, y, opponent, self.board[x][y])\n if ok == 0:\n print(\"NOT VALID\", self.board[x][y], player.playerCards)\n\n elif ok == 1:\n player.playerBox[x][y] = 1\n self.checkSequence(x, y, player)\n self.filled[x][y] = 1\n return ok\n\n else:\n self.filled[x][y] = 0\n opponent[x][y] = 0\n\n return ok\n\n def makeRandomMove(self, player: player, opponent: player):\n while True:\n card = random.choice(player.playerCards) # one eye jack\n if card in (\"JH\", \"JS\"):\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if opponent.playerBox[i][j]:\n opponent.playerBox[i][j] = 0\n player.playerCards.remove(card)\n self.filled[i][j] = 0\n player.addCard(self.getNewCard())\n return (i, j, 0)\n return False\n\n elif card in (\"JD\", \"JC\"): # two eye jack\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if (\n player.playerBox[i][j] == 0\n and opponent.playerBox[i][j] == 0\n ):\n player.playerBox[i][j] = 1\n self.filled[i][j] = 1\n self.checkSequence(i, j, player)\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return (i, j, 1)\n return False\n\n else: # normal card\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if (\n self.board[i][j] == card\n and player.playerBox[i][j] == opponent.playerBox[i][j] == 0\n ):\n self.filled[i][j] = 1\n player.playerBox[i][j] = 1\n self.checkSequence(i, j, player)\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return (i, j, 1)\n\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return False\n","repo_name":"heksadecimal/sequence","sub_path":"src/backend/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"}
+{"seq_id":"7716174783","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType\n\n\"\"\"\n Dataframe from array\n\"\"\"\n\n__all__ = [\"smartphones_dataframe\"]\n\n\ndef smartphones_dataframe(spark: SparkSession):\n data_array = [[\"Apple\", \"iPhone X\", \"IOS\", 46], [\"Xiaomi\", \"Mi 9\", \"Android\", 54]]\n schema = StructType([StructField('make', StringType(), True),\n StructField('model', StringType(), True),\n StructField('platform', StringType(), True),\n StructField('camera_megapixels', IntegerType(), True)])\n df_from_array = spark.createDataFrame(data_array, schema)\n print(\"Smartphone DataFrame: \")\n df_from_array.show(5)\n\n return df_from_array\n","repo_name":"thaapontes/pyspark-etl","sub_path":"ingested_dataframes/smartphones_df.py","file_name":"smartphones_df.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"31593026269","text":"##To run the code type in terminal: python3 180123057_MOHAMMAD_HUMAM_KHAN.py\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import cm\nfrom scipy.stats import multivariate_normal\nimport random\nimport math\nimport numpy as np\nimport statistics\n\n\n\n\n\n#Function to calculate Normal density at a point x\ndef normal_density(mu, sigma, x):\n\ty = (1/math.sqrt(2 * math.pi * sigma)) * math.exp(-0.5 * ((x-mu)/sigma)**2)\n\treturn y\n\n\n\n#Calculating Frequencies for different intervals\ndef calculate_frequency(results, intervals, freq, sigma,rounds):\n\tfor i in results:\n\t\tfor j in range(len(intervals)):\n\t\t\tif (i <= intervals[j]):\n\t\t\t\tfreq[j] = freq[j] + 1\n\t\t\t\tbreak\n\tfor i in range(len(intervals)):\n\t\tfreq[i] = freq[i]*math.sqrt(sigma)/(2*rounds/10)\n\n\n\n##Plotting the results\ndef plot_result(intervals,freq,mu,sigma,a,var,name):\n\t\n\tplt.figure(figsize=(20,12))\n\tplt.title(\"Marginal Density Plot of %s \\n a = %s\" % (var,a),fontsize=20)\n\tplt.ylabel(\"Scaled frequency Values\", fontsize=15)\n\tplt.xlabel(\"Intervals\", fontsize=15)\n\tplt.plot(intervals[1:], freq[1:])\n\tplt.scatter(intervals[1:], freq[1:])\n\n\tr = np.linspace(mu-4*sigma,mu+4*sigma,5000,endpoint=True)\n\ty = []\n\tfor i in range(5000):\n\t\ty.append(normal_density(mu, sigma, r[i]))\n\tplt.plot(r, y, color='r')\n\tplt.savefig(name)\n\tplt.clf()\n\n\n\n#Function to Plot 3D density for different values of a\ndef plot3D_density(X1,X2,mu1,mu2,sigma1,sigma2,a,rounds,name):\n\t\n\tX_start = mu1 - 4*sigma1\n\tX_end = mu1 + 4*sigma1\n\tY_start = mu2 - 4*sigma2\n\tY_end = mu2 + 4*sigma2\n\tX_intervals = np.linspace(X_start,X_end,100,endpoint=True)\n\tY_intervals = np.linspace(Y_start,Y_end,100,endpoint=True)\n\tXY_freq = np.array([[0]*100]*100)\n\n\n\tX_interval_size = (X_end-X_start)/100\n\tY_interval_size = (Y_end-Y_start)/100\n\n\t#Calculating frequency of intervals\n\tfor i in range(rounds):\n\t\tj = math.floor((X1[i]-X_start)/X_interval_size)\n\t\tk = math.floor((X2[i]-Y_start)/Y_interval_size)\n\t\tif j>=100 or k>=100:\n\t\t\tcontinue\n\t\tXY_freq[j][k] = XY_freq[j][k] + 1\n\n\n\t#Plotting Simulated Density \n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\tax.set_title(\"Bivariate Density of Simulated Points (X) in 3D \\n a = %s\" % a)\n\tax.set_xlabel('X axis')\n\tax.set_ylabel('Y axis')\n\tax.set_zlabel('Z axis')\n\n\tX_intervals, Y_intervals = np.meshgrid(X_intervals, Y_intervals)\n\tax.plot_surface(X_intervals, Y_intervals, XY_freq,cmap=cm.coolwarm)\n\tplt.savefig(name+\"_3\")\n\tplt.clf()\n\n\n\t##Plotting Actual Bivariate Normal density \n\n\tX = np.linspace(X_start,X_end, 1000,endpoint=True)\n\tY = np.linspace(Y_start,Y_end, 1000,endpoint=True)\n\tX, Y = np.meshgrid(X, Y)\n\n\tmu = np.array([5, 8])\n\n\t#Handling Corner Case for a = 1\n\tif a!=1:\n\t\tSigma = np.array([[ 1. , 2.0*a], [2.0*a, 4.]])\n\t\n\tif a==1:\n\t\tSigma = np.array([[ 1. , 2.0*0.99999], [2.0*0.99999, 4.]])\n\n\tpos = np.empty(X.shape + (2,))\n\tpos[:, :, 0] = X\n\tpos[:, :, 1] = Y\n\n\tF = multivariate_normal(mu, Sigma)\n\tZ = F.pdf(pos)\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\tax.set_title(\"Actual Bivariate Density of X in 3D \\n a = %s\" % a)\n\tax.set_xlabel('X axis')\n\tax.set_ylabel('Y axis')\n\tax.set_zlabel('Z axis')\n\tax.plot_surface(X,Y,Z,cmap=cm.coolwarm,linewidth=0, antialiased=True)\n\tplt.savefig(name+\"_4\")\n\tplt.clf()\n\n\n\n\n\n#Function to Simulate Bivariate Normal\ndef Simulate(a,name):\n\n\tmu1 = 5\n\tmu2 = 8\n\n\tsigma1 = 1\n\tsigma2 = 2\n\n\trho = a\n\n\trounds = 1000\n\tZ1 = np.random.normal(0,1,rounds)\n\tZ2 = np.random.normal(0,1,rounds)\n\n\tX1 = mu1 + sigma1*Z1\n\tX2 = mu2 + (rho * sigma2 * Z1) + (math.sqrt(1 - rho**2) * sigma2 * Z2) \n\n\n\tintervals = []\n\tval = mu1-5\n\tfor i in range(50):\n\t\tintervals.append(round(val,2))\n\t\tval += 0.2\t\n\n\n\tfreq = [0]*50\n\tcalculate_frequency(X1, intervals, freq, sigma1,rounds)\n\tplot_result(intervals, freq, mu1, sigma1,a,\"X1\",name+\"_1\")\n\n\n\tintervals.clear()\n\tval = mu2-8\n\tfor i in range(80):\n\t\tintervals.append(round(val,2))\n\t\tval += 0.2\t\n\n\n\tfreq = [0]*80\n\tcalculate_frequency(X2, intervals, freq, sigma2,rounds)\n\tplot_result(intervals, freq, mu2, sigma2,a,\"X2\",name+\"_2\")\n\n\n\tplot3D_density(X1,X2,mu1,mu2,sigma1,sigma2,a,rounds,name)\n\n\n\n\n\n\n\n\n#Calling function to simulate for different values of a\nSimulate(-0.5,\"plot1\")\nSimulate(0,\"plot2\")\nSimulate(0.5,\"plot3\")\nSimulate(1,\"plot4\")\n\n\n","repo_name":"humamkhan2k/MA-323-Monte-Carlo-Simulation","sub_path":"LAB 6/180123057_MOHAMMAD_HUMAM_KHAN.py","file_name":"180123057_MOHAMMAD_HUMAM_KHAN.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"30858742349","text":"from __future__ import print_function, unicode_literals\n\nfrom PyInquirer import style_from_dict, Token, prompt, Separator\nfrom pprint import pprint\n\n\ndef present_menu(suggestions):\n\n style = style_from_dict(\n {\n Token.Separator: \"#cc5454\",\n Token.QuestionMark: \"#673ab7 bold\",\n Token.Selected: \"#cc5454\", # default\n Token.Pointer: \"#673ab7 bold\",\n Token.Instruction: \"\", # default\n Token.Answer: \"#f44336 bold\",\n Token.Question: \"\",\n }\n )\n\n questions = [\n {\n \"type\": \"checkbox\",\n \"message\": \"Select which jobs to share on LinkedIn:\",\n \"name\": \"posts\",\n \"choices\": [],\n \"validate\": lambda answer: \"You must choose at least one option.\"\n if len(answer) == 0\n else True,\n }\n ]\n\n # Load menu options.\n questions[0][\"choices\"].append(Separator(\"{:=^40}\".format(\"OPTIONS\")))\n for job in suggestions:\n questions[0][\"choices\"].append(\n {\"name\": \"{:<32}: {:>32}\".format(job[\"title\"], job[\"guid\"])}\n )\n questions[0][\"choices\"].append({\"name\": \"None\"})\n\n answers = prompt(questions, style=style)\n if \"None\" in answers[\"posts\"]:\n return None\n return answers\n","repo_name":"jtroussard/linkedin_assist","sub_path":"linkedin_assist/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33256694371","text":"# Hayden Feddock\r\n# 3/30/2023\r\n\r\nimport numpy as np\r\nimport sigmoid\r\n\r\n# Function that performs feed-forward propagation algorithm\r\ndef predict(Theta1, Theta2, X):\r\n \r\n # Create the bias term a^1_0\r\n a1_0 = np.ones([X.shape[0], 1])\r\n \r\n # Add the bias term to the inputs a^1\r\n a1 = np.hstack([a1_0, X])\r\n \r\n # Create the hidden layer a^2 by computing the sigmoid function of the dot product of theta1 and a^1\r\n a2 = sigmoid.sigmoid(a1 @ Theta1.T)\r\n \r\n # Create the bias term a^2_0\r\n a2_0 = np.ones([a1.shape[0], 1])\r\n \r\n # Add the bias term to the inputs a^2\r\n a2 = np.hstack([a2_0, a2])\r\n \r\n # Create the output layer h_x by computing the sigmoid function of the dot product of theta2 and a^2\r\n h_x = sigmoid.sigmoid(a2 @ Theta2.T)\r\n \r\n # Predict the label for each class as the output with the highest probability (add 1 for proper class)\r\n p = np.argmax(h_x, axis=1) + 1\r\n \r\n # Return the predicted class label and the array of output probabilities\r\n return [p, h_x]","repo_name":"Feddockh/Learning_ML","sub_path":"HW_7/ps7_python_Feddock_Hayden/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"21007486226","text":"\"\"\"\nsortedcontainers.sortedlist source code:\nhttps://grantjenks.com/docs/sortedcontainers/sortedlist.html\n\"\"\"\nimport sortedcontainers.sortedlist as sortedlist\nimport time\nimport random\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef measure_times(datastructure, dataset):\n \"\"\"\n Measures the run time of a function for different sizes of input and stores the results in a dictionary.\n :param datastructure: Data structure to measure\n :param dataset: Dictionary mapping size of input to list of run times \n \"\"\"\n \n start = 0\n end = 0\n random.seed(0)\n n = 10\n while end - start < 3:\n # Increase size of input\n n = n*10\n # Generate random array of size n\n arr = [random.randint(0, n) for _ in range(n)]\n \n if dataset.get(n) == None:\n dataset[n] = []\n # Measure run time if data structure is a multiset\n if type(datastructure) == sortedlist.SortedList:\n start = time.time()\n for i in arr:\n datastructure.add(i)\n end = time.time()\n # Store run time\n dataset[n].append((end - start)/n)\n\n # Measure run time if data structure is a vector\n else:\n start = time.time()\n for i in arr:\n # Run binary search to find index to insert element\n left = 0\n right = len(datastructure) - 1\n mid = 0\n while left <= right:\n mid = (left + right) // 2\n if datastructure[mid] < i:\n left = mid + 1\n elif datastructure[mid] > i:\n right = mid - 1\n else:\n break\n # Insert element\n datastructure.insert(mid, i)\n end = time.time()\n # Store run time\n dataset[n].append((end - start)/n)\n\n\n print(type(datastructure),\" n: \",n)\n \n\nif __name__ == \"__main__\":\n # Data structures\n multiset = sortedlist.SortedList()\n vector = []\n\n # Dictionary mapping size of input to list of run times\n vector_times = {}\n multiset_times = {}\n\n for i in range(10):\n # Measure run times\n measure_times(vector, vector_times)\n measure_times(multiset, multiset_times)\n\n # Reset data structures\n vector = []\n multiset = sortedlist.SortedList()\n\n # Plot results\n df = pd.DataFrame.from_dict(vector_times, orient='index')\n df = df.transpose()\n df = df.melt(var_name='n', value_name='time')\n df['n'] = df['n'].astype(int)\n df['time'] = df['time'].astype(float)\n sns.lineplot(x='n', y='time', data=df)\n plt.title('Vector with Binary Search Insertion Time')\n plt.ylabel('Time (s)')\n plt.savefig('vector_insertion.png')\n plt.clf()\n\n df = pd.DataFrame.from_dict(multiset_times, orient='index')\n df = df.transpose()\n df = df.melt(var_name='n', value_name='time')\n df['n'] = df['n'].astype(int)\n df['time'] = df['time'].astype(float)\n sns.lineplot(x='n', y='time', data=df)\n plt.title('Binary Search Tree Insertion Time ')\n plt.savefig('bst_insertion.png')\n plt.clf()\n","repo_name":"sidb70/Algorithm-Engineering-Course-Project","sub_path":"Vector vs BST/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"27219651253","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import regularizers\n\nclass myModel(tf.keras.Model):\n def __init__(self, hparams):\n super(myModel, self).__init__()\n self.hparams = hparams\n\n # Define layers here\n self.Message = tf.keras.models.Sequential()\n self.Message.add(keras.layers.Dense(self.hparams['link_state_dim'],\n activation=tf.nn.selu, name=\"FirstLayer\"))\n\n self.Update = tf.keras.layers.GRUCell(self.hparams['link_state_dim'], dtype=tf.float32)\n\n self.Readout = tf.keras.models.Sequential()\n self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],\n activation=tf.nn.selu,\n kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout1\"))\n self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],\n activation=tf.nn.selu,\n kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout2\"))\n self.Readout.add(keras.layers.Dense(1, kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout3\"))\n\n def build(self, input_shape=None):\n # Create the weights of the layer\n self.Message.build(input_shape=tf.TensorShape([None, self.hparams['link_state_dim']*2]))\n self.Update.build(input_shape=tf.TensorShape([None,self.hparams['link_state_dim']]))\n self.Readout.build(input_shape=[None, self.hparams['link_state_dim']])\n self.built = True\n\n #@tf.function\n def call(self, link_state, first_critic, second_critic, num_edges_critic, training=False):\n\n # Execute T times\n for _ in range(self.hparams['T']):\n # We have the combination of the hidden states of the main nodes with the neighbours\n mainNodes = tf.gather(link_state, first_critic)\n neighNodes = tf.gather(link_state, second_critic)\n\n nodesConcat = tf.concat([mainNodes, neighNodes], axis=1)\n\n ### 1.a Message passing for node link with all it's neighbours\n outputs = self.Message(nodesConcat)\n\n ### 1.b Sum of output values according to link id index\n edges_inputs = tf.math.unsorted_segment_sum(data=outputs, segment_ids=second_critic, num_segments=num_edges_critic)\n\n ### 2. Update for each link\n # GRUcell needs a 3D tensor as state because there is a matmul: Wrap the link state\n outputs, links_state_list = self.Update(edges_inputs, [link_state])\n\n link_state = links_state_list[0]\n\n # Perform sum of all hidden states\n edges_combi_outputs = tf.math.reduce_sum(links_state_list, axis=1)\n\n r = self.Readout(edges_combi_outputs, training=training)\n return r","repo_name":"paulalmasan/DRL-GNN-PPO","sub_path":"PPO/criticPPO.py","file_name":"criticPPO.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"}
+{"seq_id":"30115683653","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('crm', '0001_initial'),\n ('dm', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=128, verbose_name='code')),\n ('description', models.CharField(max_length=255, verbose_name='description')),\n ('measure_unit', models.CharField(max_length=10, null=True, verbose_name='measure unit', blank=True)),\n ('packaging', models.PositiveIntegerField(default=1, verbose_name='standard packaging')),\n ('enabled', models.BooleanField(default=True, verbose_name='enabled')),\n ('control_stock', models.BooleanField(default=False, verbose_name='control stock')),\n ('stock', models.PositiveIntegerField(default=0, verbose_name='stock')),\n ('stock_alert', models.PositiveIntegerField(default=0, verbose_name='stock alert')),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'article',\n 'verbose_name_plural': 'articles',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Brand',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='name')),\n ],\n options={\n 'ordering': ['name'],\n 'verbose_name': 'brand',\n 'verbose_name_plural': 'brands',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='name')),\n ('lft', models.PositiveIntegerField(editable=False, db_index=True)),\n ('rght', models.PositiveIntegerField(editable=False, db_index=True)),\n ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),\n ('level', models.PositiveIntegerField(editable=False, db_index=True)),\n ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='wm.Group', null=True)),\n ],\n options={\n 'verbose_name': 'group',\n 'verbose_name_plural': 'groups',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SupplierCode',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=50, verbose_name='code')),\n ('article', models.ForeignKey(verbose_name='article', to='wm.Article')),\n ('company', models.ForeignKey(verbose_name='company', to='crm.Company')),\n ],\n options={\n 'verbose_name': 'supplier code',\n 'verbose_name_plural': 'supplier codes',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='suppliercode',\n unique_together=set([('article', 'company')]),\n ),\n migrations.AddField(\n model_name='article',\n name='brand',\n field=models.ForeignKey(verbose_name='brand', blank=True, to='wm.Brand', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='documents',\n field=models.ManyToManyField(related_name='articles', null=True, verbose_name='documents', to='dm.Document', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='group',\n field=models.ForeignKey(verbose_name='group', to='wm.Group'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='article',\n unique_together=set([('code', 'brand')]),\n ),\n ]\n","repo_name":"jantoniomartin/django-machinery","sub_path":"wm/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4477452299","text":"# Given an array of integers, find the length of the longest sub-array with a sum that equals 0.\n\n# Examples: \n\n# Input: arr[] = {15, -2, 2, -8, 1, 7, 10, 23};\n# Output: 5\n# Explanation: The longest sub-array with \n# elements summing up-to 0 is {-2, 2, -8, 1, 7}\n\n# BruteForce:- This involves the use of brute force where two nested loops are used. \n# The outer loop is used to fix the starting position of the sub-array, and the inner loop is used for\n# the ending position of the sub-array and if the sum of elements is equal to zero, then increase the count\n\n# Efficient Approach: The brute force solution is calculating the sum of each and every sub-array and checking whether \n# the sum is zero or not. Let’s now try to improve the time complexity by taking an extra space of ‘n’ length. \n# The new array will store the sum of all the elements up to that index. The sum-index pair will be stored in a hash-map. \n# A Hash map allows insertion and deletion of key-value pair in constant time. Therefore, the time complexity remains unaffected.\n# So, if the same value appears twice in the array, it will be guaranteed that the particular array will be a zero-sum sub-array. \n\ndef findLongestSubarrayZero(nums):\n ans=0\n hmp={}\n s=0\n for i in range(len(nums)):\n s+=nums[i]\n if s== 0:\n ans=i+1\n else:\n if s not in hmp:\n hmp[s]=i\n else:\n ans=max(ans,i-hmp[s])\n return ans\n\nnums=[15, -2, 2, -8, 1, 7, 10, 23]\nprint(findLongestSubarrayZero(nums))\n \n \n ","repo_name":"thekuldeep07/SDE-SHEET","sub_path":"longestSubarraywithSum Zero.py","file_name":"longestSubarraywithSum Zero.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41789718690","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('freelance_utils', '0003_auto_20150805_1907'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='services',\n field=models.ManyToManyField(to='freelance_utils.Service'),\n ),\n ]\n","repo_name":"PyUnchained/freelance_utils","sub_path":"migrations/0004_auto_20150805_1907.py","file_name":"0004_auto_20150805_1907.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"38581759329","text":"from django.shortcuts import render\nfrom .exchanger import Exchanger\nfrom .forms import ExchangeForm, HistoryForm\nfrom .currencies import Currencies\nfrom django.http import HttpResponseRedirect\nimport arrow\n\n\nexch = Exchanger()\ncurrencies = Currencies()\n\ntry: \n exch.currency_live\nexcept AttributeError:\n exch.get_live_currency()\n\ndef index(request):\n return HttpResponseRedirect('/exchange/?from=eur&to=kzt&amount=1')\n\ndef exchanger_form(request):\n if request.method == 'POST':\n form = ExchangeForm(request.POST)\n if form.is_valid():\n from_curr = form.cleaned_data[\"from_currency\"]\n to_curr = form.cleaned_data[\"to_currency\"]\n amt = form.cleaned_data[\"amount\"]\n return HttpResponseRedirect('/exchange/?from={}&to={}&amount={}'.format(from_curr, to_curr, amt))\n\n else:\n from_curr, to_curr, amt = validate_url_params(request)\n form = ExchangeForm(initial={\n 'from_currency': from_curr,\n 'to_currency': to_curr,\n 'amount': amt\n })\n \n result = exch.exchange(from_curr, to_curr, float(amt))\n\n return render(request, 'exchangeapp/exchanger_form.html', {\n 'form': form, \n 'exchange_result': round(result, 3), \n 'from': from_curr.upper(),\n 'to': to_curr.upper(),\n 'amt': amt\n })\n\ndef history(request):\n try:\n exch.two_week_history\n except AttributeError:\n exch.get_two_week_history()\n\n if request.method == 'POST':\n form = HistoryForm(request.POST)\n if form.is_valid():\n from_curr = form.cleaned_data[\"from_currency\"]\n to_curr = form.cleaned_data[\"to_currency\"]\n return HttpResponseRedirect('/history/?from={}&to={}'.format(from_curr, to_curr))\n\n else:\n from_curr, to_curr, _ = validate_url_params(request)\n form = HistoryForm(initial={\n 'from_currency': from_curr,\n 'to_currency': to_curr\n })\n changes, current = cross_rate_changes(from_curr, to_curr)\n\n return render(request, 'exchangeapp/history.html', {\n 'form': form, \n 'changes': changes, \n 'current': round(current, 5)\n })\n\ndef cross_rate_changes(from_curr, to_curr):\n changes = {}\n for i in range(1, 15):\n arrow_obj = arrow.now().shift(days=-i)\n rate = exch.exchange_past(from_curr, to_curr, 1, arrow_obj.format('MMM DD, YYYY'))\n changes[arrow_obj.format('MMM DD, YYYY')] = round(rate, 5)\n return [changes, exch.exchange(from_curr, to_curr, 1)]\n\ndef validate_url_params(request):\n from_curr = request.GET.get('from', 'eur')\n from_curr = from_curr if from_curr.lower() in currencies.as_list() else 'eur'\n to_curr = request.GET.get('to', 'kzt')\n to_curr = to_curr if to_curr.lower() in currencies.as_list() else 'kzt'\n amt = request.GET.get('amount', 1)\n try:\n amt = abs(float(amt))\n except ValueError:\n amt = 1\n return [from_curr, to_curr, amt]","repo_name":"andrijasinski/exchanger","sub_path":"exchangeapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30947100860","text":"import requests\nimport json\nimport time\n\n# api = \"http://103.247.219.34/api/v2\"\napi = \"http://172.10.0.52:8000/api/v2\"\n\ndef get_identity(uid):\n try:\n r = requests.get(api+\"/user/\"+uid)\n y = json.loads(r.content)\n data = {\"status\":y[\"status\"],\"name\":y[\"name\"]}\n x = json.dumps(data)\n return x\n except Exception as e:\n print(e)\n\ndef post_attendance(json,image):\n try:\n r = requests.post(api+\"/attendance/\",files=image,data=json,headers={\"Accept\":\"application/json\"})\n print(r.content)\n except Exception as e:\n print(e)\n\ndef post_register(image,json):\n try:\n r = requests.post(api+\"/register/verify\",files=image,data=json,headers={\"Accept\":\"application/json\"})\n print(r.content)\n except Exception as e:\n print(e)\ndef track(jsons):\n try:\n time.sleep(3)\n r = requests.post(api+\"/detect/\",data=jsons,headers={\"Accept\":\"application/json\"})\n print(r.code)\n # y = json.loads(r.content)\n # if y[\"status\"] == \"true\": \n # print(\"Deteksi Berhasil\")\n except Exception as e:\n print(e)","repo_name":"AthanatiusC/V-CORE","sub_path":"Face/LBPH/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"45178508206","text":"#!../venv/bin/python\nimport unittest\n\nfrom flask import Flask\nfrom flask.ext.testing import TestCase\n\nimport fixtures\nimport models\n\nclass ShiftPersonTestCase(TestCase):\n database_uri = \"sqlite:///shiftperson_unittest.db\"\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = database_uri\n\n\n def create_app(self):\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = self.database_uri\n return app\n\n @classmethod\n def setUpClass(self):\n models.create_tables(self.app)\n fixtures.install(self.app, *fixtures.shift_test_data)\n self.db = models.init_app(self.app)\n\n @classmethod\n def tearDownClass(self):\n self.db.session.remove()\n self.db.drop_all()\n\n def resetDB(self):\n self.db.session.remove()\n self.db.drop_all()\n models.create_tables(self.app)\n fixtures.install(self.app, *fixtures.shift_test_data)\n self.db = models.init_app(self.app)\n\n\n \"\"\" Test that shift_person relationships are defined and the model represents them correctly. \"\"\"\n def test_shiftperson_model(self):\n current = models.ShiftPerson.query.filter_by(pk=1).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 1)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=2).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 1)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=3).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 2)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=4).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 2)\n self.assertEqual(current.personFK, 5)\n \n current = models.ShiftPerson.query.filter_by(pk=5).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 3)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=6).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 3)\n self.assertEqual(current.personFK, 5)\n \n current = models.ShiftPerson.query.filter_by(pk=7).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=8).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=9).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 5)\n\n\n \"\"\" Test that we can retieve the shift from the shift-person assignment. \"\"\"\n def test_shiftperson_shift_relationship(self):\n # Define prerequisite data.\n key = 8\n personKey = 4\n # Retrieve the target object directly.\n direct = models.Person.query.filter_by(entityFK=personKey).first()\n self.assertIsNotNone(direct)\n self.assertEqual(direct.entityFK, personKey)\n # Retrieve the containing object.\n host = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNotNone(host)\n self.assertEqual(host.personFK, direct.entityFK)\n # Retrieve the target object through the containing object.\n target = host.person\n self.assertIsNotNone(target)\n self.assertEqual(direct.__repr__(), target.__repr__())\n\n\n \"\"\" Test adding a Shift-Person assignment to the database \"\"\"\n def test_shiftperson_add(self):\n # Verify state of related tables before operation.\n shiftPersonCount = models.ShiftPerson.query.count()\n shiftCount = models.Shift.query.count()\n personCount = models.Person.query.count()\n \n # Define prerequisite data.\n shiftKey=1\n personKey=5\n target = models.ShiftPerson(shiftFK=shiftKey, personFK=personKey)\n\n # Verify that the data does not already exist.\n fetched = models.ShiftPerson.query.filter_by(shiftFK=shiftKey, personFK=personKey).first()\n self.assertIsNone(fetched)\n \n # Perform the operation.\n self.db.session.add(target)\n self.db.session.commit()\n\n # Verify that the data was added, and only added once.\n fetchedList = models.ShiftPerson.query.filter_by(shiftFK=shiftKey, personFK=personKey)\n self.assertIsNotNone(fetchedList)\n count = 0\n for item in fetchedList:\n self.assertEqual(item.shiftFK, shiftKey)\n self.assertEqual(item.personFK, personKey)\n count += 1\n self.assertEqual(count, 1)\n \n # Verify state of related tables before operation.\n shiftPersonCountAfter = models.ShiftPerson.query.count()\n shiftCountAfter = models.Shift.query.count()\n personCountAfter = models.Person.query.count()\n self.assertTrue(shiftPersonCountAfter == shiftPersonCount + 1) \n self.assertTrue(shiftCountAfter == shiftCount)\n self.assertTrue(personCountAfter == personCount)\n\n\n \"\"\" Test deleting a shift-person assignment. \"\"\"\n def test_shiftperson_delete(self):\n # Verify state of related tables before operation.\n shiftPersonCount = models.ShiftPerson.query.count()\n shiftCount = models.Shift.query.count()\n personCount = models.Person.query.count()\n \n # Define required test data.\n key = 9\n\n # Verify that prerequisite data exists.\n target = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNotNone(target)\n\n # Perform the operation.\n self.db.session.delete(target)\n self.db.session.commit()\n\n # Verify that the record has been removed.\n target = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNone(target)\n \n # Verify state of related tables before operation.\n shiftPersonCountAfter = models.ShiftPerson.query.count()\n shiftCountAfter = models.Shift.query.count()\n personCountAfter = models.Person.query.count()\n self.assertTrue(shiftPersonCountAfter == shiftPersonCount - 1) \n self.assertTrue(shiftCountAfter == shiftCount)\n self.assertTrue(personCountAfter == personCount)\n\n\ndef suite():\n # Define the container for this module's tests.\n suite = unittest.TestSuite()\n\n # Add tests to suite.\n suite.addTest(ShiftPersonTestCase('test_shiftperson_model'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_shift_relationship'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_add'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_delete'))\n \n return suite\n \n\nif __name__ == \"__main__\":\n unittest.TextTestRunner().run(suite())\n","repo_name":"umworkma/Comp4350","sub_path":"ESA/unit_tests_shiftperson.py","file_name":"unit_tests_shiftperson.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30242700069","text":"#Program to illustrate simple for loop\r\nnumbers = [1, 10, 20, 30, 40, 50]\r\nsum = 0\r\n# Find sum of all the numbers using for loop \r\n\r\nfor i in range(0,len(numbers)):\r\n\tsum=sum+numbers[i]\r\nprint (\"The sum of numbers is\", sum ) # print sum here\r\n\r\n\r\ncolors = ['red', 'orange', 'green', 'yellow', 'white', 'violet']\r\nfor j in (colors):\r\n\tprint(j)\r\n# Similarly ierate over the given colors and print the colors\r\n","repo_name":"garladinne/python_codetantra","sub_path":"forloop1_list.py","file_name":"forloop1_list.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11656914494","text":"\"\"\"\nurl : \nGiven two numbers 'N' and 'S' , find the largest number that can be formed with 'N' digits and whose sum of digits should be equals to 'S'.\n\nInput\n\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. The first line of each test case contains\ntwo space separated integers N and S, where N is the number of digits and S is the sum.\n\nOutput\n\nPrint the largest number that is possible.\nIf their is no such number, then print -1\n\nConstraints:\n\n1 <= T <= 30\n1 <= N <= 50\n0 <= S <= 500\n\nExample\n\nInput\n2\n2 9\n3 20\n\nOutput\n\n90\n992\nExpected Time Complexity: O(n)\n\n\"\"\"\n\ndef largest_number(n,s):\n if s==0:\n return -1\n digit = 9\n number = \"\"\n while(s>=0 and digit >=0 and len(number)= 0:\n s = s - digit\n number = number + str(digit)\n else:\n digit = digit - 1\n if s>0:\n return -1\n else:\n return number\n \n\ndef main():\n t = int(input().strip())\n for i in range(0,t):\n numbers = input().strip().split(\" \")\n n = int(numbers[0])\n k = int(numbers[1])\n print(largest_number(n,k))\n\nif __name__ == '__main__':\n main()","repo_name":"amitkmr/coding-questions","sub_path":"Greedy/largest_number_possible.py","file_name":"largest_number_possible.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"}
+{"seq_id":"38786234301","text":"counter = 100 #İnteger değer\nmiles = 1000.0 #Float Değer \nname = \"John\" #String Değer\n\n#İnt değer. Tam sayı değerleri tutar. \"\" içine yazılan her şey string ifadedir. Pythonda değişkenlerin tipi belirtilmez. \nx = 10\ny = 20\nz = \"Yaren\" #String değer\ntcNo = \"45485418465146\" #Tc no genelde string olarak tutulur. Çünkü toplamıyoruz. Telefon numaralarıda öyledir. Çarpmayız,toplamayız o yüzden çift tırnak içinde tutulur.Eğer ihtiyacın olursa tip dönüşümü yapabilirsin.\n\nfloatNo = 4595.45644 #Float ondalıklı sayılardır.\n\nprint(x + 10) #Print ile ekrana yazdırırız.\nprint(\"Hello\",z)\nprint(floatNo)\nprint(type(floatNo)) #Type ile veri tipini görebiliriz. İnt mi,float mı,string mi gibi.\n\n\n\n","repo_name":"yarenahlatci/PythonNotebook","sub_path":"PYTHON-NOTEBOOK/BÖLÜM1/DEĞİŞKENLER.py","file_name":"DEĞİŞKENLER.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1509319748","text":"'''16. Escribir un programa que pida al usuario un número entero y muestre por pantalla si es\npar o impar'''\n\nentero = int(input(\"Por favor ingrese un número: \\n\")) \n\n#funcion que define si es par o impar\ndef ParImpar():\n num = entero%2\n if num == 0:\n print(\"El numero \", entero, \"es par\" )\n else:\n print(\"El numero \", entero, \"es impar\" )\n\n#llamado a la función\nParImpar()","repo_name":"angelagn/ApuntesPython","sub_path":"e005_pdf5/e016_ParImpar.py","file_name":"e016_ParImpar.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10983389457","text":"import cv2 as cv\nimport numpy as np\n\nim= cv.imread('mor_teams5.jpg')\n\n\ngray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray Image', gray)\n\nhaar_cascade_modele = cv.CascadeClassifier('haar_face.xml') #for run this app you should to download this file ('haar_face.xml')\n\nfaces_r = haar_cascade_modele.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=11)\n\nprint(f'Number of faces found = {len(faces_r)}')\n\nfor (x,y,w,h) in faces_r:\n cv.rectangle(im, (x,y), (x+w,y+h), (0,255,0), thickness=2)\n\ncv.imshow('Detected ', im)\n\n\n\ncv.waitKey(0)\n","repo_name":"kalil75/python-project-ML-haar-cascade-detection","sub_path":"haar.py","file_name":"haar.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"25847441759","text":"import time as tm\nimport random\ndictionary = []\ndef poems():\n global dictionary\n stressed_letters = ['А', 'Я', 'О', 'Ё', 'У', 'Ю', 'Э', 'Е', 'И', 'Ы']\n vowel_letters = ['а', 'я', 'о', 'ё', 'у', 'ю', 'э', 'е', 'и', 'ы']\n\n #message = 'грЕчка скОро бУдет по сОрок'\n with open('message.txt', 'r', encoding='utf-8') as mes_file:\n message = mes_file.readline()\n print(message)\n\n final_dict = {'word': '', 'letters': ''}\n\n # Получение unix-времени и времени в формате гггг-мм-дд чч:мм:сс\n unix_time = int(tm.time())\n # print(unix_time)\n str_date = tm.strftime('%Y-%m-%d %H:%M:%S', tm.localtime(unix_time))\n # print(type(unix_time))\n # print(str_date)\n # Получение времени в формате гггг-мм-дд чч:мм:сс из unix\n t = tm.strptime(str_date, '%Y-%m-%d %H:%M:%S')\n # print(int(tm.mktime(t)))\n key_list = []\n key = \"\".join(str(unix_time))\n\n # print('key=', key)\n\n def dictionary():\n data = []\n\n with open('dataset.txt', 'r', encoding='utf-8') as dict:\n for row in dict:\n data.append(row)\n\n # print(data)\n dictionary = []\n for i in data:\n current_word = ''\n for j in i:\n if j != '\\n':\n current_word += j\n else:\n break\n dictionary.append(current_word)\n\n for i in dictionary:\n dictionary.remove('')\n\n return dictionary\n\n dictionary = dictionary()\n\n # print(dictionary)\n\n def get_stressed_syllable(word):\n # получаем номер уданого слога\n # print(word)\n count = 0\n stressed_letter_num = 0\n # print(count)\n for i in word:\n if i in vowel_letters:\n count += 1\n # print(count, i)\n if i in stressed_letters:\n count += 1\n stressed_letter_num = count\n # print(\"stressed_letter\")\n final_dict.update({'word': word, 'stressed': [count, stressed_letter_num]})\n return count, stressed_letter_num\n\n def message_moving(message, key):\n message = message.split()\n print(message)\n empty_word = '*****'\n poem_first_stage = [i for i in range(34)]\n # print(len(poem_first_stage))\n current_number = 0\n summ_num = current_number\n for i in range(len(message)):\n current_number = int(key[i])\n # print(current_number)\n summ_num += current_number\n # print(summ_num)\n poem_first_stage[summ_num] = message[i]\n poem_first_stage.pop(0)\n for i in poem_first_stage:\n if i not in message:\n poem_first_stage[poem_first_stage.index(i)] = empty_word\n return poem_first_stage\n\n def delete_words_from_dictionary():\n #global dictionary\n for i in dictionary:\n if get_stressed_syllable(i)[0] > 2:\n dictionary.remove(i)\n\n def poem_generate(poem):\n\n ending = ['на', 'го', 'ия']\n variant = 0 # Для случайного выбора одного из вариантов окончания слов в коцне строки для рифмы\n #global dictionary\n not_this_words = [] # чтобы слова не повторялись, будем добавлять их сюда\n for j in dictionary:\n if len(j) > 1:\n if (j[-2] + j[-1] == ending[variant]):\n word = j + '8'\n not_this_words.append(j)\n # print(not_this_words)\n local_stessed = 0\n\n if get_stressed_syllable(poem[0])[1] == 1:\n ending_str = [3, 7, 11, 15, 19, 23, 27]\n global_stressed = [3, 5, 7, 9, 11] # номера ударных слогов\n current_stressed = 0 # Текущая сумма слогов в ударении\n for i in range(len(poem)):\n\n if poem[i] == '*****':\n stress = get_stressed_syllable(poem[i])[0]\n current_stressed += stress\n flag = False\n\n while flag == False:\n\n # print(i, end = '')\n if i in ending_str:\n word = not_this_words[random.randint(0, len(not_this_words) - 1)]\n else:\n\n word = dictionary[random.randint(0, len(dictionary) - 1)]\n if get_stressed_syllable(word)[1] == 2 and word not in poem:\n flag = True\n # print(flag)\n poem[i] = word\n\n print(not_this_words)\n\n # return poem2\n\n # 1 7 9 12 17\n\n '''\n Описание алгоритма:\n 1. Сумма слогов первой строки = 9\n 2. Сумма слогов второй строки = 8\n 3. Ударные слоги - чётные\n\n 4. Берём слово, определяем его номер.\n 5. Определяем номер этого слова в строке\n 6. Если номер слова в строке - пятый, то запоминаем последние две буквы \n 7. Заполняем все слова ДО рассматриваемого:\n 9. Нужно, чтобы на каждой строчке было 5 слов\n\n\n 5. повторяем так со всеми словами\n 6. \n 7. \n '''\n\n form = \"\"\"_*_*_*_*_\n _*_*_*_*\"\"\"\n\n def print_poem(poem):\n data = ''\n try:\n num = 0\n for i in range(7):\n for j in range(4):\n print(poem[num], end=' ')\n data += poem[num]\n data += ' '\n num += 1\n print()\n data += '\\n'\n\n except IndexError:\n pass\n return data\n def save_poem(poem):\n # Получение unix-времени и времени в формате гггг-мм-дд чч:мм:сс\n unix_time = int(tm.time())\n # print(unix_time)\n str_date = tm.strftime('%Y-%m-%d %H:%M:%S', tm.localtime(unix_time))\n # print(type(unix_time))\n with open('result.txt', 'w', encoding='utf-8') as poem_file:\n poem_file.writelines(poem)\n #poem_file.writelines(str_date)\n\n # print(poem1)\n delete_words_from_dictionary()\n poem1 = message_moving(message, key)\n\n poem_generate(poem1)\n\n\n save_poem(print_poem(poem1))\n\n\n\nif __name__ == '__main__':\n poems()","repo_name":"ilikecinepol/the_poem_encoder","sub_path":"poem.py","file_name":"poem.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"18848470702","text":"#Write a program to use the loop to find the factorial of a given number.\n\nno = int(input(\"Enter Number : \"))\n\nf = 1\n\nfor i in range(no,0,-1):\n f *= i\nelse:\n print(\"Factorial of\",no,\"is : \",f)\n","repo_name":"KRUTIKHIRAPARA/Python","sub_path":"Python_Exercise-1/PY_E1_13.py","file_name":"PY_E1_13.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2593276772","text":"if __name__ == \"__main__\":\n with open('day08/input') as input_file:\n input = [\n line.strip().split(\" | \") for line in input_file.readlines()\n ]\n input_signals, output_signals = [], []\n for line in input:\n input_signals.append(\n [\"\".join(sorted(signal)) for signal in line[0].split()]\n )\n output_signals.append(\n [\"\".join(sorted(signal)) for signal in line[1].split()]\n )\n\n first_solution = 0\n for output_signal in output_signals:\n first_solution += len([\n output for output in output_signal\n if len(output) in [2, 3, 4, 7]\n ])\n\n second_solution = 0\n for i in range(len(input)):\n input_line = input_signals[i]\n output_line = output_signals[i]\n signal_patterns = {\n \"1\": next(signal for signal in input_line if len(signal) == 2),\n \"7\": next(signal for signal in input_line if len(signal) == 3),\n \"4\": next(signal for signal in input_line if len(signal) == 4),\n \"8\": next(signal for signal in input_line if len(signal) == 7)\n }\n five_length_signals = [\n signal for signal in input_line if len(signal) == 5\n ]\n six_length_signals = [\n signal for signal in input_line if len(signal) == 6\n ]\n\n for signal in six_length_signals:\n if all(\n char in signal\n for char in signal_patterns[\"4\"] + signal_patterns[\"7\"]\n ):\n signal_patterns[\"9\"] = signal\n elif all(char in signal for char in signal_patterns[\"7\"]):\n signal_patterns[\"0\"] = signal\n else:\n signal_patterns[\"6\"] = signal\n\n for signal in five_length_signals:\n missing_from_six = \"\".join(\n char for char in \"abcdefg\" if char not in signal_patterns[\"6\"]\n )\n if all(char in signal for char in signal_patterns[\"1\"]):\n signal_patterns[\"3\"] = signal\n elif missing_from_six in signal:\n signal_patterns[\"2\"] = signal\n else:\n signal_patterns[\"5\"] = signal\n value_for_signal = {\n signal: number for number, signal in signal_patterns.items()\n }\n second_solution += int(\n \"\".join([value_for_signal[output] for output in output_line])\n )\n\n print(\n f\"\"\"Day 8:\n first solution: {first_solution}\n second solution: {second_solution}\"\"\"\n )\n","repo_name":"DanielElisenberg/aoc2021","sub_path":"day08/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"43422729063","text":"from copy import copy\nfrom datetime import datetime\n\nfrom django.conf import settings\n\nimport requests\n\n\ndef get_idservice(test=False):\n if test or settings.DEBUG:\n kwargs = copy(settings.TEST_IDSERVICE) \n else:\n kwargs = copy(settings.IDSERVICE)\n return IDService(**kwargs)\n\n\ndef mintandbind(objtype, objurl='', description=''):\n idservice = get_idservice()\n data = idservice.mint(1)\n id = data['identifier']\n idservice.bind(id=id, objurl=objurl, objtype=objtype, desc=description)\n return id\n\n\nclass IDService():\n\n def __init__(self, requester, minter, url, port=80):\n self.minter = minter\n self.url = url if url.startswith('http') else 'http://%s' % url\n self.port = port\n if port != 80:\n self.baseurl = '%s:%s' % (url, port)\n else:\n self.baseurl = url\n\n def __str__(self):\n return '' % (self.minter, self.url)\n\n def mint(self, quantity=1):\n url = '%s/mint/%s/%s' % (self.baseurl, self.minter, quantity)\n response = requests.get(url)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n data = response.json()\n if quantity==1: data = data[0]\n return data\n\n def bind(self, id, objurl, objtype='', desc=''):\n url = '%s/bind/%s' % (self.baseurl, id)\n params = {'object_url': objurl, 'object_type': objtype,\n 'description': desc}\n response = requests.get(url, params=params)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n return response.json()[0]\n\n def lookup(self, id):\n url = '%s/lookup/%s' % (self.baseurl, id)\n response = requests.get(url)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n return response.json()[0]\n\n class IDServiceError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __repr__(self):\n return self.msg\n","repo_name":"gwu-libraries/inventory_old","sub_path":"inv/invapp/idservice.py","file_name":"idservice.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"}
+{"seq_id":"13158364893","text":"# jsonData.py\n__version__ = \"v20200801\"\n# Built-In Libraries\nimport json\nimport os\nimport glob\nfrom datetime import datetime\n# Downloaded Libraries\nimport PyPDF2\n# Local Files\nimport files\nimport PostScript\nimport order as o\nimport log\n\n\ndef json_data(order):\n \"\"\"\n Generates the JSON/DICT for the current order.\n\n Parameters: \n order (object): The object containing all the information for the current order.\n\n Returns: \n json/dict: The JSON/DICT for the order.\n \"\"\"\n json_Data = {'Account ID': 'CHANGE ME'}\n json_Data[\"Order Number\"] = order.NUMBER\n json_Data[\"Order Subject\"] = order.SUBJECT\n FILES = files.file_list(order)\n # Imports the Email contents line by line.\n email = []\n with open(\"\".join([order.OD, '/', order.NAME, '/', order.NAME, \".txt\"]), \"r\") as f:\n for line in f.readlines():\n email.append(line.rstrip('\\n'))\n json_Data[\"Email ID\"] = email[0][2:]\n json_Data[\"Files\"] = {}\n # This gets the number of pages for every pdf file for the job.\n for i in range(len(FILES)):\n try:\n f = open('/'.join([order.OD, order.NAME, FILES[i]]), \"rb\")\n pdf = PyPDF2.PdfFileReader(f)\n json_Data[\"Files\"][\"\".join([\"File \", str(\n i+1)])] = {\"File Name\": FILES[i], \"Page Count\": str(pdf.getNumPages())}\n f.close()\n except:\n log.logger.exception(\"Using Alternative Page Count Source\")\n pdf = files.page_count(\n '/'.join([order.OD, order.NAME, FILES[i]]))\n json_Data[\"Files\"][\"\".join([\"File \", str(\n i+1)])] = {\"File Name\": FILES[i], \"Page Count\": str(pdf)}\n # Removes the duplicate portion of the email that contains html (form) code.\n for i in range(len(email)):\n if \"IF YOU HAVE ANY QUESTIONS\" in email[i]:\n email = email[8:-(len(email)-i)]\n break\n # Searchs for required elements from the form for the JSON file.\n for i in range(len(email)):\n test_string = \"Timestamp\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Date Ordered\"] = line[1]\n test_string = \"*Timestamp: *\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Date Ordered\"] = line[1]\n test_string = \"Email address \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Email\"] = line[1]\n test_string = \"Your Last Name \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Last Name\"] = line[1]\n test_string = \"Your First Name \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"First Name\"] = line[1]\n test_string = \"Your Call Back Number \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Phone Number\"] = line[1]\n test_string = \"Your building \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Building\"] = line[1]\n test_string = \"Number of Copies Needed per File \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Copies\"] = line[1]\n test_string = \"Printing Setup \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Duplex\"] = line[1]\n test_string = \"Collated or Uncollated \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Collation\"] = line[1]\n test_string = \"Paper Size, Type, and Color \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Paper\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Stapling \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Stapling\"] = line[1]\n test_string = \"Drilling - Three Hole Punch \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Drilling\"] = line[1]\n test_string = \"Folding \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Folding\"] = line[1]\n test_string = \"Cutting \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Cutting\"] = line[1]\n test_string = \"Slip Sheets and/or Shrink Wrap \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n extra = \"\"\n j = 1\n while(not (\"Special Instructions \" in email[i+j] or \"Deliver to: \" in email[i+j])):\n extra = \"\".join([\" \", extra, \" \", email[i+j]])\n j += 1\n json_Data[\"Slip Sheets / Shrink Wrap\"] = \"\".join(\n [line[1], extra])\n test_string = \"Special Instructions \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n extra = \"\"\n j = 1\n while(not(\"Deliver to: \" in email[i+j])):\n extra = \"\".join([\" \", extra, \" \", email[i+j]])\n j += 1\n json_Data[\"Special Instructions\"] = \"\".join([line[1], extra])\n test_string = \"Booklet Fold and Staple \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Booklets\"] = line[1]\n test_string = \"Front Cover \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Front Cover\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Back Cover \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Back Cover\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Deliver to: (Staff Member's Name) \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Deliver To Name\"] = line[1]\n test_string = \"Deliver To:\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n line2 = \"\"\n if (\"@\" not in email[i+1]):\n if len(email[i+1]) == 5:\n line2 = \" \" + email[i+1]\n else:\n line2 = email[i+1]\n json_Data[\"Deliver To Address\"] = (line[1].replace(\n \"=\", \"\").strip() + line2).strip()\n json_Data[\"Status\"] = order.status = \"NotStarted\"\n json_Data[\"Cost\"] = order.COST = 0\n # Creates the JSON file\n with open(\"\".join([order.OD, '/', order.NAME, '/', order.NAME, '.json']), 'w') as outfile:\n json.dump(json_Data, outfile, indent=4, separators=(',', ': '))\n return json_Data\n\n\ndef orderStatusExport(order, STATUS, DATE):\n \"\"\"\n Exports the Status of the order with the date time.\n\n Puts in the JSON file wether the Ticker or the Order has been printed.\n\n Parameters: \n order (object): The object containing all the information for the current order.\n STATUS (str) : The status of the order.\n DATE (str) : The Date which it was modified.\n\n Returns: \n void: Unused Return\n \"\"\"\n JSON_PATH = \"\".join(\n [order.OD, '/', order.NAME, '/', order.NAME, '.json'])\n with open(JSON_PATH) as json_file:\n JOB_INFO = json.load(json_file)\n now = datetime.now()\n current_time = \"\"\n if(DATE):\n current_time = \"_\" + now.strftime(\"%Y%m%d:%H%M\")\n order.status = STATUS + current_time\n JOB_INFO[\"Status\"] = order.status\n with open(JSON_PATH, 'w') as outfile:\n json.dump(JOB_INFO, outfile, indent=4, separators=(',', ': '))\n\n\ndef main(OUTPUT_DIRECTORY):\n log.logInit(\"JSON\")\n print = log.Print\n input = log.Input\n Start = str(input(\"Start #: \"))\n End = str(input(\"End #: \"))\n folders = files.folder_list(OUTPUT_DIRECTORY)\n ORDER_NAMES = []\n for ORDER_NUMBER in range(int(Start), int(End)+1):\n ORDER_NUMBER = str(ORDER_NUMBER).zfill(5)\n for i in folders: # Searchs for Requested Order Number from list of currently downloaded orders\n if ORDER_NUMBER in i:\n ORDER_NAMES.append(i)\n for ORDER_NAME in ORDER_NAMES:\n print(ORDER_NAME)\n order = o.Order()\n order.NAME = ORDER_NAME\n order.NUMBER = ORDER_NAME[:10]\n order.SUBJECT = ORDER_NAME[11:]\n order.OD = OUTPUT_DIRECTORY\n json_data(order)\n\n\nif __name__ == \"__main__\":\n main(\"SO/\")\n","repo_name":"ArthurVardevanyan/CPD_SO_Automated_Printing","sub_path":"jsonData.py","file_name":"jsonData.py","file_ext":"py","file_size_in_byte":8678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"15975615340","text":"# -*- coding: utf-8 -*-\nimport time,datetime, json, requests,pymysql\nimport pandas as pd\nimport traceback\nfrom selenium.webdriver import Chrome, ChromeOptions\nimport sys\n\n# ----------------数据库连接、关闭------------------------\n#连接数据库\ndef get_conn():\n #建立连接\n connect = pymysql.Connect(\n host='localhost',\n port=3306,\n user='root',\n passwd='123456',\n db='cov',\n charset='utf8'\n )\n #获取游标\n cursor = connect.cursor()\n return connect,cursor\n\n#关闭连接\ndef close_conn(connect,cursor):\n if connect:\n connect.close()\n if cursor:\n cursor.close()\n\n# ----------------爬取数据------------------------\n\n# 抓取腾讯疫情国内每日实时详细各省市和中国每日历史数据\ndef get_tencent_data():\n url1 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=&_=%d'%int(time.time()*1000)\n url2 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=&_=%d'%int(time.time()*1000)\n headers = {\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n r1 = requests.get(url1, headers)\n r2 = requests.get(url2, headers)\n\n #json字符串转字典\n res1 = json.loads(r1.text)\n res2 = json.loads(r2.text)\n\n data_all1 = json.loads(res1[\"data\"])\n data_all2 = json.loads(res2[\"data\"])\n\n #当日详细数据\n details = []\n update_time = data_all1[\"lastUpdateTime\"]\n data_country = data_all1[\"areaTree\"]\n data_province = data_country[0][\"children\"]\n for pro_infos in data_province:\n province = pro_infos[\"name\"]\n for city_infos in pro_infos[\"children\"]:\n city = city_infos[\"name\"]\n confirm = city_infos[\"total\"][\"confirm\"]\n confirm_add = city_infos[\"today\"][\"confirm\"]\n nowConfirm = city_infos['total']['nowConfirm']\n suspect = city_infos[\"total\"][\"suspect\"]\n heal = city_infos[\"total\"][\"heal\"]\n dead = city_infos[\"total\"][\"dead\"]\n dead_rate = city_infos['total']['deadRate']\n heal_rate = city_infos['total']['healRate']\n details.append([update_time, province, city,nowConfirm, confirm, confirm_add, suspect,heal, dead,dead_rate,heal_rate])\n\n #历史数据\n history = {}\n for day_infos in data_all2[\"chinaDayList\"]:\n ds = day_infos[\"y\"]+\".\"+day_infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) #改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n confirm = day_infos[\"confirm\"]\n suspect = day_infos[\"suspect\"]\n heal = day_infos[\"heal\"]\n dead = day_infos[\"dead\"]\n nowConfirm = day_infos[\"nowConfirm\"]\n nowSevere = day_infos[\"nowSevere\"]\n importedCase = day_infos[\"importedCase\"]\n noInfect = day_infos[\"noInfect\"]\n localConfirm = day_infos[\"localConfirm\"]\n dead_rate = day_infos[\"deadRate\"]\n heal_rate = day_infos[\"healRate\"]\n history[ds] = {\"confirm\":confirm, \"suspect\":suspect, \"heal\":heal, \"dead\":dead,\n \"importedCase\": importedCase, \"noInfect\": noInfect, \"localConfirm\":localConfirm, \"nowConfirm\":nowConfirm,\n \"nowSevere\":nowSevere, \"dead_rate\":dead_rate, \"heal_rate\":heal_rate}\n for day_infos in data_all2[\"chinaDayAddList\"]:\n ds = day_infos[\"y\"]+\".\"+day_infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) #改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n confirm = day_infos[\"confirm\"]\n suspect = day_infos[\"suspect\"]\n heal = day_infos[\"heal\"]\n dead = day_infos[\"dead\"]\n importedCase = day_infos[\"importedCase\"]\n noInfect = day_infos[\"infect\"]\n dead_rate = day_infos[\"deadRate\"]\n heal_rate = day_infos[\"healRate\"]\n localConfirm = day_infos[\"localConfirmadd\"]\n history[ds].update({\"confirm_add\":confirm, \"suspect_add\":suspect, \"heal_add\":heal, \"dead_add\":dead,\n \"importedCase_add\": importedCase, \"noInfect_add\": noInfect, \"localConfirm_add\":localConfirm,\n \"dead_rate_add\":dead_rate, \"heal_rate_add\":heal_rate})\n return history,details\n\n# 抓取各省从2020到2021的每日历史数据(无市区)\ndef get_province_history_data():\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36\"\n }\n url = \"http://111.231.75.86:8000/api/provinces/CHN/daily/\"\n\n response = requests.get(url=url, headers=headers)\n res = json.loads(response.text)\n details = []\n for infos in res:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n province = infos[\"provinceName\"]\n province_code = infos[\"provinceCode\"]\n nowConfirm = infos[\"currentConfirmedCount\"]\n nowConfirm_add = infos[\"currentConfirmedIncr\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n suspect_add = infos[\"suspectedCountIncr\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n heal_add = infos[\"curedIncr\"]\n dead_add = infos[\"deadIncr\"]\n nowSevere = infos[\"highDangerCount\"]\n nowMidSevere = infos[\"midDangerCount\"]\n details.append(\n [ds, province,province_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add, nowSevere, nowMidSevere])\n\n return details\n\n\n# 抓取本土风险划分数据\ndef get_localrisk_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=&_=%d' % int(time.time() * 1000)\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n r = requests.get(url, headers)\n res = json.loads(r.text)\n data_all = json.loads(res[\"data\"])\n locallist = []\n for local in data_all[\"statisGradeCityDetail\"]:\n ds = str(local[\"syear\"]) + \"/\" + local[\"date\"]\n tup = time.strptime(ds, \"%Y/%m/%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) # 改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n province = local[\"province\"]\n city = local[\"city\"]\n nowConfirm = local[\"nowConfirm\"]\n confirm = local[\"confirm\"]\n confirm_add = local[\"confirmAdd\"]\n heal = local[\"heal\"]\n dead = local[\"dead\"]\n grade = local[\"grade\"]\n locallist.append([ds, province,city,nowConfirm,confirm,confirm_add,heal,dead,grade])\n return locallist\n\n#抓取全球各国以及美国各洲最新的数据\ndef get_global_country_latest_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_foreign&callback=&_=%d' % int(time.time() * 1000)\n url2 = \"https://api.inews.qq.com/newsqa/v1/automation/foreign/country/ranklist\"\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n s = requests.session()\n s.keep_alive = False\n requests.DEFAULT_RETRIES = 5\n\n # 各国各城市数据\n details = []\n america = []\n\n #获取美国数据\n r1 = requests.get(url, headers)\n res1 = json.loads(r1.text)\n data_all = json.loads(res1[\"data\"])\n\n # 获取全球数据\n r2 = requests.post(url=url2, headers=headers)\n res2 = json.loads(r2.text)\n # print(res[\"data\"])\n for infos in res2[\"data\"]:\n ds = infos[\"y\"] + \".\" + infos[\"date\"]\n country = infos[\"name\"]\n continent = infos[\"continent\"]\n nowConfirm = infos[\"nowConfirm\"]\n nowConfirm_add = infos[\"nowConfirmCompare\"]\n confirm = infos[\"confirm\"]\n confirm_add = infos[\"confirmCompare\"]\n suspect = infos[\"suspect\"]\n heal = infos[\"heal\"]\n dead = infos[\"dead\"]\n heal_add = infos[\"healCompare\"]\n dead_add = infos[\"deadCompare\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup)\n details.append([ds,country, continent, confirm, confirm_add,nowConfirm,nowConfirm_add, suspect,\n heal, heal_add, dead, dead_add])\n\n # 美国数据\n for infos in data_all[\"foreignList\"]:\n name = infos[\"name\"]\n continent = infos[\"continent\"]\n nowConfirm = infos[\"nowConfirm\"]\n confirm = infos[\"confirm\"]\n confirm_add = infos[\"confirmAdd\"]\n suspect = infos[\"suspect\"]\n heal = infos[\"heal\"]\n dead = infos[\"dead\"]\n confirm_cmp = infos[\"confirmCompare\"]\n nowConfirm_cmp = infos[\"nowConfirmCompare\"]\n heal_cmp = infos[\"healCompare\"]\n dead_cmp = infos[\"deadCompare\"]\n if (infos[\"name\"] == \"美国\"):\n ds = infos[\"y\"] +\".\" + infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup)\n for i in infos[\"children\"]:\n city = i[\"name\"]\n citymap = i[\"nameMap\"]\n cconfirm = i[\"confirm\"]\n cconfirm_add = i[\"confirmAdd\"]\n csuspect = i[\"suspect\"]\n cheal = i[\"heal\"]\n cdead = i[\"dead\"]\n america.append([ds, name, city, citymap, cconfirm, cconfirm_add, csuspect, cheal, cdead])\n break\n\n return america,details\n\n#抓取全球各国历史数据(文件)\ndef get_global_country_history_data():\n try:\n details = []\n with open('./static/json/world-country-history.json','rb') as f:\n jsonStr = json.load(f)\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n country = infos[\"countryName\"]\n country_code = infos[\"countryCode\"]\n nowConfirm = infos[\"currentConfirmedCount\"]\n nowConfirm_add = infos[\"currentConfirmedIncr\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n suspect_add = infos[\"suspectedCountIncr\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n heal_add = infos[\"curedIncr\"]\n dead_add = infos[\"deadIncr\"]\n details.append(\n [ds, country,country_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add,\n dead, dead_add])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n# 获取美国各州历史数据\ndef get_america_state_history_data():\n try:\n details = []\n with open('./static/json/america-provinces-history.json','rb') as f:\n jsonStr = json.load(f)\n name = '美国'\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n city = infos[\"provinceName\"]\n citymap = infos[\"provinceCode\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n if(suspect == None): suspect = 0\n if(heal == None): heal = 0\n if(dead == None): dead = 0\n if(confirm == None): confirm =0\n if (confirm_add == None): confirm_add = 0\n details.append([ds, name, city, citymap, confirm, confirm_add, suspect, heal, dead])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n# 获取美国历史总体数据\ndef get_america_history_data():\n try:\n details = []\n with open('./static/json/america-history.json','rb') as f:\n jsonStr = json.load(f)\n name = '美国'\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"date\"]), '%Y%m%d').strftime('%Y-%m-%d')\n confirm = infos[\"positive\"] # 阳性累计\n confirm_add = infos[\"positiveIncrease\"] # 阳性新增\n suspect = 0\n heal = infos[\"recovered\"]\n dead = infos[\"death\"]\n dead_add = infos[\"deathIncrease\"]\n hospitalized = infos[\"hospitalized\"] #住院累计\n nowHospitalized = infos[\"hospitalizedCurrently\"] #当前住院 现有住院\n hospitalized_add = infos[\"hospitalizedIncrease\"] #住院新增\n nowInIcu = infos[\"inIcuCurrently\"] # 当前ICU\n inIcu = infos[\"inIcuCumulative\"] # 累计ICU\n negative = infos[\"negative\"] # 阴性检测累计\n negative_add = infos['negativeIncrease']\n onVentilator = infos['onVentilatorCumulative'] #使用呼吸机累计\n nowOnVentilator = infos['onVentilatorCurrently'] #当前使用呼吸机\n totalTestResults = infos['totalTestResults'] #累计监测\n totalTestResults_add = infos['totalTestResultsIncrease']\n if(onVentilator == None): onVentilator = 0\n if(nowOnVentilator == None): nowOnVentilator = 0\n if(nowHospitalized == None): nowHospitalized = 0\n if(hospitalized == None): hospitalized = 0\n if(hospitalized_add == None): hospitalized_add = 0\n if(heal == None): heal = 0\n if(dead == None): dead = 0\n if(confirm == None): confirm =0\n if (confirm_add == None): confirm_add = 0\n if (nowInIcu == None): nowInIcu = 0\n if (inIcu == None): inIcu = 0\n if (negative == None): negative = 0\n details.append([ds, confirm, confirm_add, suspect, heal, dead, dead_add, negative,negative_add,hospitalized, hospitalized_add, nowHospitalized,\n inIcu, nowInIcu, onVentilator,nowOnVentilator,totalTestResults,totalTestResults_add])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n#抓取全球历史总体数据\ndef get_global_histroy_data():\n url = \"https://api.inews.qq.com/newsqa/v1/automation/modules/list?modules=FAutoGlobalStatis,FAutoGlobalDailyList\"\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36\"\n }\n response = requests.get(url=url, headers=headers)\n res = json.loads(response.text)\n data = res[\"data\"]\n\n global_day_list = data['FAutoGlobalDailyList']\n global_statis = data['FAutoGlobalStatis']\n global_history = {}\n for global_day in global_day_list:\n ds = global_day[\"y\"] + \".\" + global_day[\"date\"]\n confirm = global_day[\"all\"][\"confirm\"]\n dead = global_day[\"all\"][\"dead\"]\n heal = global_day[\"all\"][\"heal\"]\n confirm_add = global_day[\"all\"][\"newAddConfirm\"]\n dead_rate = global_day[\"all\"][\"deadRate\"]\n heal_rate = global_day[\"all\"][\"healRate\"]\n global_history[ds] = {\"confirm\": confirm, \"confirm_add\": confirm_add, \"dead\": dead, \"heal\": heal,\n \"dead_rate\": dead_rate, \"heal_rate\": heal_rate}\n\n ds = global_statis[\"lastUpdateTime\"]\n confirm = int(global_statis[\"confirm\"])\n dead = int(global_statis[\"dead\"])\n heal = int(global_statis[\"heal\"])\n dead_rate = round(dead / confirm, 4) if confirm > 0 else 0.00\n heal_rate = round(heal / confirm, 4) if confirm > 0 else 0.00\n global_history[ds] = {\"confirm\": confirm,\n \"confirm_add\": global_statis[\"nowConfirmAdd\"],\n \"dead\": dead,\n \"heal\": heal,\n \"dead_rate\": dead_rate,\n \"heal_rate\": heal_rate}\n global_statis_list = [global_statis[key] for key in global_statis]\n\n return global_history, global_statis_list\n\n# 热搜新闻数据\ndef get_hotnews_data():\n url = \"https://voice.baidu.com/act/newpneumonia/newpneumonia/\"\n # 无头模式,无需打开浏览器,效率快\n option = ChromeOptions()\n # 隐藏浏览器\n option.add_argument(\"--headless\")\n # linux部署\n option.add_argument(\"--no-sandbox\")\n browser = Chrome(options = option)\n browser.get(url)\n # 整个网站的源码\n # print(browser.page_source)\n # 模拟按钮模仿人浏览网站点击展开\n but = browser.find_element_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div[11]/span')\n # but = browser.find_element_by_css_selector('#ptab-1 > div.Virus_1-1-304_2SKAfr > div.Common_1-1-304_3lDRV2 > span')\n\n # 模拟点击按钮,点击展开\n but.click()\n # 等待1秒\n time.sleep(1)\n #获取热搜头条信息\n content = []\n link = []\n news = browser.find_elements_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div/div[2]/a/div')\n a = browser.find_elements_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div/div[2]/a')\n\n for i, j in zip(news, a):\n # 热搜头条标题\n content.append(i.text)\n link.append(j.get_attribute(\"href\"))\n\n # 关闭浏览\n browser.close()\n return content,link\n\n\n# ----------------爬取数据结束------------------------\n\n# ----------------更新国内数据start------------------------\n#更新国内疫情详细数据\ndef update_details():\n cursor = None\n connect = None\n try:\n li = get_tencent_data()[1] #0历史,1当前数据\n connect,cursor = get_conn()\n sql = \"INSERT INTO details (update_time, province, city, nowConfirm, confirm, confirm_add,suspect, heal, dead, dead_rate, heal_rate) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select update_time from details order by id desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n #time.asctime() 接受时间元组并返回一个可读的形式为\"Tue Dec 11 18:07:14 2008\"\n # 以 f开头表示在字符串内支持大括号内的python 表达式\n print(f\"{time.asctime()}开始更新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() #提交事务\n print(f\"{time.asctime()}更新到最新数据\")\n else:\n print(f\"{time.asctime()}已是最新数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n#更新国内各省疫情历史每日数据\ndef update_province_history_data():\n cursor = None\n connect = None\n try:\n li = get_province_history_data() #0历史,1当前数据\n connect,cursor = get_conn()\n sql = \"INSERT INTO province_history (ds, province,province_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add, nowSevere, nowMidSevere) \" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select confirm from province_history where ds = %s and province = %s\"\n # sql_query = \"select %s = (select ds from province_history order by id desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n print(f\"{time.asctime()}开始更新各省历史数据\")\n for item in li:\n if not cursor.execute(sql_query, [item[0],item[1]]):\n print(item)\n cursor.execute(sql, item)\n connect.commit() #提交事务\n print(f\"{time.asctime()}已更新到最新各省历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n\n# 插入国内history数据\ndef insert_china_history():\n cursor = None\n connect = None\n try:\n dic = get_tencent_data()[0] #0历史数据\n print(f\"{time.asctime()}开始插入历史数据\")\n connect,cursor = get_conn()\n sql = \"insert into china_history values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for k, v in dic.items():\n print(\"\\n\")\n print(k)\n print(\"----\")\n print(v)\n print(\"\\n\")\n cursor.execute(sql, [k,v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"suspect\"), v.get(\"suspect_add\"),\n v.get(\"heal\"), v.get(\"heal_add\"), v.get(\"dead\"), v.get(\"dead_add\"), v.get(\"importedCase\"),\n v.get(\"importedCase_add\"), v.get(\"noInfect\"), v.get(\"noInfect_add\"), v.get(\"localConfirm\"),\n v.get(\"localConfirm_add\"), v.get(\"nowConfirm\"), v.get(\"nowSevere\"),v.get(\"dead_rate\"),\n v.get(\"heal_rate\"),v.get(\"dead_rate_add\"),v.get(\"heal_rate_add\")])\n connect.commit()\n print(f\"{time.asctime()}插入历史数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n#更新国内历史数据\ndef update_china_history_data():\n cursor = None\n connect = None\n try:\n dic = get_tencent_data()[0] # 0历史数据\n print(f\"{time.asctime()}开始更新历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into china_history values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select confirm from china_history where ds = %s\"\n #查询数据是否存在数据库里,不存在则插入\n for k,v in dic.items():\n if not cursor.execute(sql_query, k):\n print(k, v)\n cursor.execute(sql, [k,v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"suspect\"), v.get(\"suspect_add\"),\n v.get(\"heal\"), v.get(\"heal_add\"), v.get(\"dead\"), v.get(\"dead_add\"), v.get(\"importedCase\"),\n v.get(\"importedCase_add\"), v.get(\"noInfect\"), v.get(\"noInfect_add\"), v.get(\"localConfirm\"),\n v.get(\"localConfirm_add\"), v.get(\"nowConfirm\"), v.get(\"nowSevere\"),v.get(\"dead_rate\"),\n v.get(\"heal_rate\"),v.get(\"dead_rate_add\"),v.get(\"heal_rate_add\")])\n connect.commit()\n print(f\"{time.asctime()}历史数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n#插入地区风险数据\ndef insert_localrisk():\n cursor = None\n connect = None\n try:\n li = get_localrisk_data() # 0历史数据\n print(li)\n print(f\"{time.asctime()}开始插入风险数据\")\n connect, cursor = get_conn()\n sql = \"insert into localrisk(ds, province, city, nowConfirm, confirm, confirm_add, heal,dead, grade) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for i in li:\n cursor.execute(sql, i)\n connect.commit()\n print(f\"{time.asctime()}插入风险数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n#更新地区风险数据\ndef update_localrisk():\n cursor = None\n connect = None\n try:\n li = get_localrisk_data() # 0历史数据\n\n print(f\"{time.asctime()}开始插入风险数据\")\n connect, cursor = get_conn()\n sql = \"insert into localrisk(ds, province, city, nowConfirm, confirm, confirm_add, heal,dead, grade) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select %s = (select ds from localrisk where city = %s order by ds desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n for i in li:\n cursor.execute(sql_query, (i[0], i[2]))\n if not cursor.fetchone()[0]:\n print(i[0]+i[1]+i[2])\n cursor.execute(sql, i)\n connect.commit()\n print(f\"{time.asctime()}插入风险数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# 更新热搜数据\ndef update_hotsearch():\n cursor = None\n connect = None\n try:\n context,link = get_hotnews_data()\n print(f\"{time.asctime()}开始更新热搜数据\")\n connect, cursor = get_conn()\n sql = \"insert into hotsearch(dt,content,link) values(%s,%s,%s)\"\n sql_query = \"select * from hotsearch where content = %s\"\n ts = time.strftime(\"%Y-%m-%d %X\")\n for i,j in zip(context,link):\n if not cursor.execute(sql_query, i):\n print(ts, i, j)\n cursor.execute(sql, (ts, i, j))\n connect.commit() # 提交事务\n print(f\"{time.asctime()}热搜数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------更新国内数据end------------------------\n\n# ----------------更新全球数据start------------------------\n\n# 更新全球各国最新数据\ndef update_global_country_latest_data():\n cursor = None\n connect = None\n try:\n li = get_global_country_latest_data()[1] #0美国,1全球\n connect, cursor = get_conn()\n sql = \"INSERT INTO global_country_latest(ds, country, continent,confirm, confirm_add,nowConfirm, nowConfirm_add,suspect, heal,heal_add, dead, dead_add) \" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select ds from global_country_latest order by id desc limit 1)\"\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n print(f\"{time.asctime()}开始更新全球数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新全球数据\")\n else:\n print(f\"{time.asctime()}已是最新全球数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入全球各国历史数据(没有更新操作)\ndef insert_global_country_history_data():\n cursor = None\n connect = None\n try:\n li = get_global_country_history_data()\n print(f\"{time.asctime()}开始更新全球各国历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_country_history (ds, country,country_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add) \" \\\n \"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n print(f\"{time.asctime()}开始更新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球各国历史数据更新完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新美国各州最新数据\ndef update_america_state_latest():\n cursor = None\n connect = None\n try:\n li = get_global_country_latest_data()[0] # 0美国,1全球\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_state (ds, name, city, cityMap, confirm, confirm_add, suspect, heal, dead)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select ds from america_state order by id desc limit 1)\"\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n print(f\"{time.asctime()}开始更新美国各州最新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国各州最新数据\")\n else:\n print(f\"{time.asctime()}已是最新美国数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入美国各州历史数据(无更新)\ndef insert_america_state_history():\n cursor = None\n connect = None\n try:\n li = get_america_state_history_data()\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_state (ds, name, city, cityMap, confirm, confirm_add, suspect, heal, dead)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n print(f\"{time.asctime()}开始更新美国各州历史数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国各洲历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入美国历史数据(无更新)\ndef insert_america_history():\n cursor = None\n connect = None\n try:\n li = get_america_history_data()\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_history (ds, confirm, confirm_add, suspect, heal, dead, dead_add, negative,negative_add,hospitalized, hospitalized_add, nowHospitalized, \" \\\n \"inIcu, nowInIcu, onVentilator,nowOnVentilator,totalTestResults,totalTestResults_add)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n print(f\"{time.asctime()}开始更新美国历史数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n\n#插入全球历史总体数据\ndef insert_global_history():\n cursor = None\n connect = None\n try:\n dic = get_global_histroy_data()[0] # 0历史数据,1最新日期的总体数据\n print(f\"{time.asctime()}开始插入全球历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_history values(%s,%s,%s,%s,%s,%s,%s)\"\n for k, v in dic.items():\n print(\"\\n\")\n print(k)\n print(\"----\")\n print(v)\n print(\"\\n\")\n cursor.execute(sql, [k, v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"dead\"), v.get(\"heal\"),\n v.get(\"dead_rate\"), v.get(\"heal_rate\")])\n connect.commit()\n print(f\"{time.asctime()}插入全球���史数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新全球历史数据\ndef update_global_history_data():\n cursor = None\n connect = None\n try:\n dic = get_global_histroy_data()[0] # 0历史数据\n print(f\"{time.asctime()}开始更新全球历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_history values(%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select confirm from global_history where ds = %s\"\n # 查询数据是否存在数据库里,不存在则插入\n for k, v in dic.items():\n if not cursor.execute(sql_query, k):\n print(k, v)\n cursor.execute(sql, [k, v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"dead\"), v.get(\"heal\"),\n v.get(\"dead_rate\"), v.get(\"heal_rate\")])\n connect.commit()\n print(f\"{time.asctime()}全球历史数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------国外全球处理end------------------------\n\n\n\n\n# ----------------爬取疫苗数据start--------------------------\n\n# 获取全球疫苗每百人接种数据和全球疫苗累计接种数据\ndef get_total_vaccinations_data():\n # 读取数据\n per_data = pd.read_csv(r'./static/csv/covid-vaccination-doses-per-capita.csv')\n total_data = pd.read_csv(r'./static/csv/cumulative-covid-vaccinations.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n\n return vaccine_list\n\n# 获取完全接种COVID-19疫苗的总人口中的份额和总人数\ndef get_people_fully_vaccinated_data():\n per_data = pd.read_csv(r'./static/csv/share-people-fully-vaccinated-covid.csv')\n total_data = pd.read_csv(r'./static/csv/people-fully-vaccinated-covid.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n return vaccine_list\n\n# 获取已接受至少一剂COVID-19疫苗的总人口中的份额和总人数\ndef get_people_vaccinated_data():\n per_data = pd.read_csv(r'./static/csv/share-people-vaccinated-covid.csv')\n total_data = pd.read_csv(r'./static/csv/people-vaccinated-covid.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n return vaccine_list\n\n\n# ----------------爬取疫苗数据eng----------------------------\n\n# ----------------手动更新疫苗数据start--------------------------\n\n# 更新全球疫苗每百人接种数据和全球疫苗累计接种数据\ndef update_total_vaccinations_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_total_vaccinations_data()\n print(f\"{time.asctime()}全球疫苗每百人接种和累计接种数据开始更新\")\n sql = \"insert into `global_total_vaccinations`(ds,country,code,total_vaccine,per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗每百人接种和累计接种数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新完全接种COVID-19疫苗的总人口中的份额和总人数\ndef update_people_fully_vaccinated_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_people_fully_vaccinated_data()\n print(f\"{time.asctime()}全球疫苗完全接种COVID-19疫苗的总人口中的份额和总人数数据开始更新\")\n sql = \"insert into `global_people_fully_vaccinated`(ds,country,code,people_fully_vaccinated,people_fully_vaccinated_per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗完全接种COVID-19疫苗的总人口中的份额和总人数数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新已接受至少一剂COVID-19疫苗的总人口中的份额和总人数\ndef update_people_vaccinated_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_people_vaccinated_data()\n print(f\"{time.asctime()}全球疫苗已接受至少一剂COVID-19疫苗的总人口中的份额和总人数数据开始更新\")\n sql = \"insert into `global_people_vaccinated`(ds,country,code,people_vaccinated,people_vaccinated_per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗已接受至少一剂COVID-19疫苗的总人口中的份额和总人数数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------手动更新疫苗数据处理end--------------------------\n\n\n\nif __name__ == \"__main__\":\n # update_hotsearch()\n # print(get_america_history_data())\n # # 参数列表长度\n len = len(sys.argv)\n if(len == 1):\n s = \"\"\"\n 请输入参数\n 参数说明:\n up_his 更新全球和中国的历史数据\n up_hot 更新实时热搜\n up_de 更新全球和中国疫情详细数据\n up-risk 更新地区风险\n \"\"\"\n print(s)\n else:\n # 0表示文件路径,1开始是参数\n order = sys.argv[1]\n if order == 'up_his':\n update_china_history_data()\n update_global_history_data()\n update_province_history_data()\n elif order == 'up_hot':\n update_hotsearch()\n elif order == 'up_risk':\n update_localrisk()\n elif order == 'up_det':\n update_details()\n update_global_country_latest_data()\n update_america_state_latest()\n # insert_america_state_history()\n # insert_america_history()\n","repo_name":"crverr/covid19-system","sub_path":"spider-yiqing-data.py","file_name":"spider-yiqing-data.py","file_ext":"py","file_size_in_byte":38930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"12446658063","text":"#coding=utf-8;\n\n\nimport urllib.request\n\nimport ssl\nimport json\n\nssl._create_default_https_context = ssl._create_unverified_context;\ntarget_url = \"https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=2017-12-03&leftTicketDTO.from_station=SHH&leftTicketDTO.to_station=PEN&purpose_codes=ADULT\"\nuser_agent = \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Mobile Safari/537.36\"\n\n# 获取车辆信息\n\"\"\"\n https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=2017-12-03&leftTicketDTO.from_station=SHH&leftTicketDTO.to_station=PEN&purpose_codes=ADULT\n\"\"\"\n\n\ndef getTrainList():\n req = urllib.request.Request(target_url);\n req.add_header(\"User-Agent\",user_agent);\n rsp = urllib.request.urlopen(req);\n html = rsp.read();\n # 将截取到的json 数据转换成字典\n dic = json.loads(html);\n # 获取到车次信息 进行返回\n return dic[\"data\"][\"result\"];\n\n# 处理返回的车次信息\n\"\"\"\n 每一条车次信息 字段对应的位置\n 索引为23 对应的软卧\n 28 对应的是硬卧\n 29 对应的是硬座\n 26 对应的是无座\n 3 对应的车次\n\"\"\"\ntest_flag = 0;\ndef dealWithTrainInfo(dict):\n # 每一条车次信息以及字段\n for item in dict:\n temp_list = item.split('|');\n # 以下代码是为了知道字段对应的索引\n # global test_flag;\n # for i in temp_list:\n # print('%s--%s',(test_flag,i));\n # test_flag += 1;\n try:\n # 此处 要进��类型转换 字符串与0作比较,永远都是大于0的\n if int(temp_list[23]) > 0:\n print (temp_list[3]+'--'+'有票');\n except:\n # 在中文前面加上U 表示字符串是unicode 编码\n if temp_list[23] == u'有':\n print(temp_list[3] +'--'+ '有票');\n else:\n print(temp_list[3] +'--'+ \"无票\");\n\nif __name__ == '__main__':\n\n dict = getTrainList();\n dealWithTrainInfo(dict);","repo_name":"AlexanderYeah/SKPy12306Demo","sub_path":"Lession2/12306.py","file_name":"12306.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"72095957267","text":"#\n# @lc app=leetcode.cn id=345 lang=python3\n#\n# [345] 反转字符串中的元音字母\n#\n# https://leetcode-cn.com/problems/reverse-vowels-of-a-string/description/\n#\n# algorithms\n# Easy (46.11%)\n# Total Accepted: 6.7K\n# Total Submissions: 14.5K\n# Testcase Example: '\"hello\"'\n#\n# 编写一个函数,以字符串作为输入,反转该字符串中的元音字母。\n#\n# 示例 1:\n#\n# 输入: \"hello\"\n# 输出: \"holle\"\n#\n#\n# 示例 2:\n#\n# 输入: \"leetcode\"\n# 输出: \"leotcede\"\n#\n# 说明:\n# 元音字母不包含字母\"y\"。\n#\n#\n\n\nclass Solution:\n def reverseVowels(self, s: str) -> str: \n i = 0\n j = len(s) - 1\n yuan = 'aeiouAEIOU'\n s = [x for x in s]\n while i < j:\n while i < j and s[i] not in yuan:\n i += 1\n while i < j and s[j] not in yuan:\n j -= 1\n if i < j:\n t = s[i]\n s[i] = s[j]\n s[j] = t\n i += 1\n j -= 1\n return \"\".join(s)\n","repo_name":"fhyPayaso/fhyPayaso.github.io","sub_path":"code/Algorithm/leetcode/u999/345.反转字符串中的元音字母.py","file_name":"345.反转字符串中的元音字母.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1226296925","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport roslib\n#roslib.load_manifest('my_package')\nimport cv2\nimport rospy\nimport sys\nimport imutils\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom itertools import islice\nfrom cv_bridge import CvBridge, CvBridgeError\n\nbridge = CvBridge()\n\ndepth_img = []\nimg_arr = np.array([[0],[0]])\n\ndef depthCallback(data):\n\tglobal depth_img\n\tglobal img_arr\n\timg_arr = np.fromstring(data.data, np.uint8)\n\t#depth_ball = data.data[int(img_ballx),int(img_bally)]\n\timg_arr = img_arr[2::4].copy()\n\timg_arr.resize(480,640)\n\tprint(img_arr[240,320])\n\ndef main():\n\tprint(\"hello\")\n\tglobal depth_img\n\tglobal img_arr\n\trospy.init_node(\"depthTest\")\n\tdepth_sub = rospy.Subscriber(\"/camera/depth_registered/image_raw\",Image,depthCallback)\n\trate = rospy.Rate(10)\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tplt.ion()\n\tplt.plot()\n\n\twhile not rospy.is_shutdown():\n\t\trospy.wait_for_message(\"/camera/depth_registered/image_raw\",Image)\n\t\tprint(\"yo\")\n\t\t#depth_img = np.array(depth_img)\n\t\t#depth_img.shape = (depth_img.size//640*2,640*2)\n\t\t#print(type(depth_img))\n\t\tpos = plt.imshow(img_arr[:,:])\n\t\tnp.savetxt(\"foo.csv\",img_arr[240-20:240+20,320-20:320+20],delimiter=',')\n\t\t#fig.colorbar(pos)\n\t\tplt.pause(0.2)\n\t\tplt.show()\n\t\trate.sleep()\n\nif __name__=='__main__':\n\tmain()\n","repo_name":"johnhanckel/AMRClass","sub_path":"Robotics All Code/depthcloud_play.py","file_name":"depthcloud_play.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"11516641671","text":"from django.shortcuts import render, redirect, get_object_or_404,get_list_or_404\nfrom .models import *\nfrom django.views.generic import ListView\nfrom .forms import *\n\n# Create your views here.\ndef home(request):\n data ={\n 'products' : Product.objects.all(),\n }\n\n return render(request, 'Home.html', data)\n\n\ndef createProduct(request):\n data = {\n 'form': ProductForm()\n }\n if request.method == 'POST':\n formulario = ProductForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n data['mensaje'] = \"Guardado Correctamente\"\n else:\n data[\"form\"] = formulario\n return render(request, 'product/createProduct.html', data)\n\ndef editProduct(request, id_product):\n products= get_object_or_404(Product, id_product=id_product)\n data = {\n 'form': ProductForm(instance=products)\n }\n\n if request.method == 'POST':\n formulario=ProductForm(data=request.POST, instance= products,)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='/')\n data['form'] = formulario\n return render(request,'product/editProduct.html', data )\n\ndef deleteProduct(request,id_product):\n product=Product.objects.get(id_product=id_product)\n product.delete()\n return redirect('/')\n\ndef listTypeProduct(request):\n productTypeList=ProductType.objects.all()\n return render(request, 'TypeProduct/listTypeProduct.html', {'productTypeList': productTypeList})\n\ndef createProductType(request):\n data = {\n 'form': ProductTypeForm()\n }\n if request.method == 'POST':\n formulario = ProductTypeForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listTypeProduct')\n else:\n data[\"form\"] = formulario\n return render(request, 'TypeProduct/createProductType.html', data )\n\ndef editProductType(request, id_product_type):\n products_type= get_object_or_404(ProductType, id_product_type=id_product_type)\n data = {\n 'form': ProductTypeForm(instance=products_type)\n }\n\n if request.method == 'POST':\n formulario=ProductTypeForm(data=request.POST, instance= products_type)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listTypeProduct')\n data['form'] = formulario\n return render(request,'TypeProduct/editProductType.html', data )\n\ndef deleteProductType(request,id_product_type):\n productType=ProductType.objects.get(id_product_type=id_product_type)\n productType.delete()\n return redirect(to='listTypeProduct')\n\ndef listProvider(request):\n provider=ProductProvider.objects.all()\n return render(request, 'Provider/listProvider.html', {'provider': provider})\n\ndef providerData(request,id_provider):\n providers = get_object_or_404(Provider, id_provider=id_provider)\n data = {\n 'form': ProviderForm(instance=providers)\n }\n\n if request.method == 'POST':\n formulario = ProviderForm(data=request.POST, instance=providers, )\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listProvider')\n data['form'] = formulario\n return render(request, 'Provider/providerData.html', data)\n\ndef list2Provider(request):\n provider=Provider.objects.all()\n return render(request, 'Provider/gestionProvider.html', {'provider': provider})\n\ndef createProvider(request):\n data = {\n 'form': ProviderForm()\n }\n if request.method == 'POST':\n formulario = ProviderForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='list2Provider')\n else:\n data[\"form\"] = formulario\n return render(request, 'Provider/createProvider.html', data)\n\ndef deleteProvider(request,id_provider):\n provider = Provider.objects.get(id_provider=id_provider)\n provider.delete()\n return redirect(to='list2Provider')\n\n\ndef createProductProvider(request):\n data = {\n 'form': ProductProviderForm()\n }\n if request.method == 'POST':\n formulario = ProductProviderForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listProvider')\n else:\n data[\"form\"] = formulario\n return render(request, 'Provider/createProductProvider.html', data)","repo_name":"SergioSm12/SupermarketDjangoDos","sub_path":"project/xyz/Apps/Sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"22439012699","text":"# 이진 파일\n\n# 1. 이진 파일에서 바이트 읽기\nfilename = \"cat.jpg\"\ninfile = open(filename, \"rb\") #이진파일 읽기\nbytesArray = infile.read(8) #입력파일에서 8바이트 읽기\nbyte1 = bytesArray[0] # 첫번째 바이트 꺼내기\ninfile.close()\n\n\n# 2. 이진 파일에 바이트 저장하기\nfilename = \"out.aaa\"\noutfile = open(filename, \"wb\")\nbytesArray = bytes([255, 128, 0, 1])\noutfile.write(bytesArray)\noutfile.close()\n\n\n# 3. 이진 파일 복사하기\ninfile = open(\"123.png\", \"rb\")\noutfile = open(\"kkk.png\", \"wb\")\n\n\n# 4. 입력 파일에서 1024 바이트씩 읽어서 출력 파일에 쓴다. \nwhile True:\n copy_buffer = infile.read(1024)\n if not copy_buffer:\n break\n outfile.write(copy_buffer)\n\ninfile.close()\noutfile.close()\nprint(str(infile)+\"를 \" +str(outfile)+\"로 복사하였습니다. \")\n\n","repo_name":"peterchokr/python","sub_path":"src/chap10/p355_binary.py","file_name":"p355_binary.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"16769735480","text":"from django.urls import path\n\nfrom apps.customer.views import (\n OrderFormView, CartView, ProductCustomerView, ShopCustomerView,\n IndexCustomerView, ProductAddToCartView, ProductIncreaseView,\n ProductDecreaseView, ProductRemoveFromCartView, GetPaymentResponse\n)\n\n\nurlpatterns = [\n path('buy-product/', OrderFormView.as_view(), name='buy-product'),\n path('paybox-order/', GetPaymentResponse.as_view(), name='get_payment_response'),\n path('cart/', CartView.as_view(), name='cart'),\n path('add//', ProductAddToCartView.as_view(), name='cart_add'),\n path('remove//', ProductRemoveFromCartView.as_view(), name='cart_remove'),\n path('decrease//', ProductDecreaseView.as_view(), name='product_decrease'),\n path('increase//', ProductIncreaseView.as_view(), name='product_increase'),\n path('shop//', ShopCustomerView.as_view(), name='shop-customer'),\n path('/', IndexCustomerView.as_view(), name='index-customer'),\n path('shop///', ProductCustomerView.as_view(), name='product-customer'),\n]\n","repo_name":"nbdbkv/taplink","sub_path":"apps/customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"21415475916","text":"from typing import List\nimport pytest\n\nfrom ..app import app, client\nfrom ..models.groups import GroupDetailed\n\n\nclass TestGroups:\n # @pytest.mark.get_groups\n def test_route_exist(self) -> None:\n res = client.get(\n app.url_path_for('get groups'),\n json={},\n )\n assert res.status_code == 200\n\n # @pytest.mark.get_groups\n def test_invalid_ouput_raise_error(self) -> None:\n res = client.get(\n app.url_path_for(\"get groups\"),\n )\n assert res.json() == List[GroupDetailed]\n","repo_name":"pvenv/swimmy","sub_path":"docker/fastapi/swimmy/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30173863051","text":"import statistics\n\nfrom f1predict.race.EloModel import EloModel, EloDriver, EloConstructor, EloEngine\nfrom f1predict.race.retirementBlame import getRetirementBlame\n\nRETIREMENT_PENALTY = -1.8\nFINISHING_BONUS = 0.1\nBASE_RETIREMENT_PROBABILITY = 0.1\nRETIREMENT_PROBABILITY_CHANGE_TRACK = 0.33\nRETIREMENT_PROBABILITY_CHANGE_DRIVER = 0.10\nROOKIE_DRIVER_RATING = 1820\n\nclass DataProcessor:\n def __init__(self, seasonsData, raceResultsData, driversData, constructorsData, enginesData):\n self.seasonsData = seasonsData\n self.raceResultsData = raceResultsData\n self.driversData = driversData\n self.constructorsData = constructorsData\n self.enginesData = enginesData\n self.model = None\n\n def processDataset(self):\n self.model = EloModel({}, {}, {}, {})\n self.predictions = []\n for year, season in self.seasonsData.items(): # Read every season:\n self._updateModelsForYear(season)\n racesAsList = list(season.races.items())\n racesAsList.sort(key=lambda x: x[1].round)\n\n for raceId, data in racesAsList:\n if raceId in self.raceResultsData and self.raceResultsData[raceId]:\n results = self.raceResultsData[raceId]\n self._addNewDriversAndConstructors(results, year)\n self.model.addNewCircuit(data.circuitId)\n\n gaElos = {}\n classified = []\n retired = []\n for index, res in enumerate(results):\n self.model.addNewCircuitToParticipant(res[\"driverId\"], data.circuitId)\n gaElos[res[\"driverId\"]] = self.model.getGaElo(\n res[\"driverId\"], res[\"grid\"], data.circuitId)\n if res[\"position\"] is None:\n retired.append((res[\"driverId\"], res[\"status\"]))\n else:\n classified.append(res[\"driverId\"])\n\n # Generate predictions:\n sortedGaElos = [(driverId, gaElo) for (driverId, gaElo) in gaElos.items()]\n sortedGaElos.sort(key=lambda x: x[1], reverse=True)\n if sortedGaElos: # TODO is this if-check necessary?\n self.predictions.append([x[0] for x in sortedGaElos])\n\n # Adjust models based on race results\n eloAdjustments, alphaAdjustment = self._calculateTrackAlphaAdjustmentAndBestEloAdjustments(\n classified, results, data.circuitId)\n self._adjustEloRatings(classified, retired, eloAdjustments, data.circuitId)\n self._adjustRetirementFactors(retired, classified, data.circuitId)\n self.model.adjustCircuitAplha(\n alphaAdjustment, data.circuitId)\n\n\n # Returns the generated EloModel from the last processing, or an empty model if the function was not called yet\n def getModel(self):\n return self.model\n\n # Returns a list of all generated predictions from the last processing\n # Throws an exception if called before processing a dataset\n def getPredictions(self):\n if self.predictions == None:\n raise AssertionError(\n \"Predictions not generated yet! Call before calling me.\")\n return self.predictions\n\n def _updateModelsForYear(self, season):\n '''Resolves team name changes'''\n # Updating list of engines and constructors:\n for new, old in season.teamChanges.items():\n self.model.constructors[new] = self.model.constructors[old]\n self.model.constructors[new].name = self.constructorsData[new]\n\n for cId, engineId in season.constructorEngines.items():\n # Check that the constructor and engine exist\n if engineId not in self.model.engines:\n self.model.engines[engineId] = EloEngine(\n self.enginesData[engineId])\n if cId not in self.model.constructors:\n self.model.constructors[cId] = EloConstructor(\n self.constructorsData[cId], None)\n # Assign it its engine\n self.model.constructors[cId].engine = self.model.engines[engineId]\n\n def _updateModelsAtEndOfYear(self, season):\n # Delete old, unused constructors\n for new, old in season.teamChanges.items():\n del self.model.constructors[old]\n\n # Regress all powers towards the mean\n # TODO\n\n def _addNewDriversAndConstructors(self, resultsForRace, year):\n for res in resultsForRace:\n if res[\"driverId\"] not in self.model.drivers:\n self.model.drivers[res[\"driverId\"]] = EloDriver(\n self.driversData[res[\"driverId\"]], res[\"constructorId\"])\n if year > 2003:\n self.model.drivers[res[\"driverId\"]\n ].rating = ROOKIE_DRIVER_RATING\n if self.model.drivers[res[\"driverId\"]].constructor is not self.model.constructors[res[\"constructorId\"]]:\n self.model.drivers[res[\"driverId\"]\n ].constructor = self.model.constructors[res[\"constructorId\"]]\n\n def _calculateTrackAlphaAdjustmentAndBestEloAdjustments(self, driverIDs, resultsForRace, circuitId):\n eloAdjustments = ()\n eloAdjustmentsSum = None\n bestAdjustment = 0\n adjustments = [0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98,\n 0.99, 1, 1.01, 1.02, 1.03, 1.04, 1.05, 1.06, 1.07, 1.08, 1.09, 1.1]\n for alphaAdjustment in adjustments:\n results = {}\n gaElos = {}\n for index, res in enumerate(resultsForRace):\n results[res[\"driverId\"]] = res[\"position\"]\n gaElos[res[\"driverId\"]] = self.model.getGaEloWithTrackAlpha(\n res[\"driverId\"], res[\"grid\"], circuitId, alphaAdjustment)\n curEloAdjustments = self._calculateEloAdjustments(driverIDs, gaElos, results)\n curEloAdjustmentsSum = 0\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[0].values()))\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[1].values()))\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[2].values()))\n\n if not eloAdjustmentsSum or curEloAdjustmentsSum < eloAdjustmentsSum:\n eloAdjustmentsSum = curEloAdjustmentsSum\n eloAdjustments = curEloAdjustments\n bestAdjustment = alphaAdjustment\n return eloAdjustments, bestAdjustment\n\n def _calculateEloAdjustments(self, driverIDs, gaElos, results):\n driverAdjustments = {}\n engineAdjustments = {}\n constructorAdjustments = {}\n for i in range(len(driverIDs)):\n for k in range(i+1, len(driverIDs)):\n if driverIDs[i] not in driverAdjustments:\n driverAdjustments[driverIDs[i]] = 0\n if driverIDs[k] not in driverAdjustments:\n driverAdjustments[driverIDs[k]] = 0\n\n if self.model.drivers[driverIDs[i]].constructor not in constructorAdjustments:\n constructorAdjustments[self.model.drivers[driverIDs[i]].constructor] = 0\n if self.model.drivers[driverIDs[k]].constructor not in constructorAdjustments:\n constructorAdjustments[self.model.drivers[driverIDs[k]].constructor] = 0\n\n if self.model.drivers[driverIDs[i]].constructor.engine not in engineAdjustments:\n engineAdjustments[self.model.drivers[driverIDs[i]\n ].constructor.engine] = 0\n if self.model.drivers[driverIDs[k]].constructor.engine not in engineAdjustments:\n engineAdjustments[self.model.drivers[driverIDs[k]\n ].constructor.engine] = 0\n\n headToHeadResult = 1 if results[driverIDs[i]] < results[driverIDs[k]] else 0\n expectedScore = self.model.getExpectedScore(\n gaElos[driverIDs[i]], gaElos[driverIDs[k]])\n driverAdjustments[driverIDs[i]] += headToHeadResult - expectedScore\n driverAdjustments[driverIDs[k]] += expectedScore - headToHeadResult\n\n constructorAdjustments[self.model.drivers[driverIDs[i]\n ].constructor] += headToHeadResult - expectedScore\n constructorAdjustments[self.model.drivers[driverIDs[k]\n ].constructor] += expectedScore - headToHeadResult\n\n engineAdjustments[self.model.drivers[driverIDs[i]\n ].constructor.engine] += headToHeadResult - expectedScore\n engineAdjustments[self.model.drivers[driverIDs[k]\n ].constructor.engine] += expectedScore - headToHeadResult\n\n return (driverAdjustments, constructorAdjustments, engineAdjustments)\n\n def _adjustEloRatings(self, classified, retired, eloAdjustments, circuitId):\n for driverId in classified:\n self.model.adjustEloRating(\n driverId, eloAdjustments[0][driverId] + FINISHING_BONUS, circuitId)\n for (driverId, _) in retired:\n self.model.adjustEloRating(\n driverId, RETIREMENT_PENALTY, circuitId)\n\n for constructor in eloAdjustments[1]:\n self.model.adjustEloRatingConstructor(\n constructor, eloAdjustments[1][constructor], circuitId)\n\n for engine in eloAdjustments[2]:\n self.model.adjustEloRatingEngine(\n engine, eloAdjustments[2][engine], circuitId)\n\n def _adjustRetirementFactors(self, retired, classified, circuitID):\n const_retirements = {}\n eng_retirements = {}\n all_retirements = []\n \n # Process drivers who were classified in the race\n for driverID in classified:\n if self.model.drivers[driverID].constructor not in const_retirements:\n const_retirements[self.model.drivers[driverID].constructor] = []\n if self.model.drivers[driverID].constructor.engine not in eng_retirements:\n eng_retirements[self.model.drivers[driverID].constructor.engine] = []\n\n all_retirements.append(0)\n self.model.drivers[driverID].retirementProbability *= 1-RETIREMENT_PROBABILITY_CHANGE_DRIVER\n const_retirements[self.model.drivers[driverID].constructor].append(0)\n eng_retirements[self.model.drivers[driverID].constructor.engine].append(0)\n\n # Process drivers who retired from the race \n for (driverID, retirementReason) in retired:\n if self.model.drivers[driverID].constructor not in const_retirements:\n const_retirements[self.model.drivers[driverID].constructor] = []\n if self.model.drivers[driverID].constructor.engine not in eng_retirements:\n eng_retirements[self.model.drivers[driverID].constructor.engine] = []\n\n all_retirements.append(1)\n blame = getRetirementBlame(retirementReason)\n self.model.drivers[driverID].retirementProbability = (3 * blame[0] * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * self.model.drivers[driverID].retirementProbability\n const_retirements[self.model.drivers[driverID].constructor].append(blame[1])\n eng_retirements[self.model.drivers[driverID].constructor.engine].append(blame[2])\n\n # Adjust overall retirement factor \n self.model.overallRetirementProbability = statistics.mean(all_retirements) * \\\n RETIREMENT_PROBABILITY_CHANGE_DRIVER + (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) \\\n * self.model.overallRetirementProbability\n \n # Adjust track retirement factors\n if circuitID not in self.model.tracksRetirementFactor:\n self.model.tracksRetirementFactor[circuitID] = BASE_RETIREMENT_PROBABILITY\n oldValue = self.model.tracksRetirementFactor[circuitID]\n self.model.tracksRetirementFactor[circuitID] += (statistics.mean(all_retirements) -\n oldValue) * RETIREMENT_PROBABILITY_CHANGE_TRACK\n \n # Adjust constructor factors\n for constructor, blames in const_retirements.items():\n newValue = statistics.mean(blames)\n constructor.retirementProbability = (3 * newValue * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * constructor.retirementProbability\n\n # Adjust engine factors\n for engine, blames in eng_retirements.items():\n newValue = statistics.mean(blames)\n engine.retirementProbability = (3 * newValue * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * engine.retirementProbability\n","repo_name":"villekuosmanen/F1Predict","sub_path":"f1predict/race/DataProcessor.py","file_name":"DataProcessor.py","file_ext":"py","file_size_in_byte":13072,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"48"}
+{"seq_id":"41849213485","text":"Max=int(input('输入斐波那契数列的最大项数:'))\n\ndef fab(Max):\n n,a,b=0,0,1\n while n 0:\n# vertices.append((x, y, 50))\n# inh = True\n# inv = True\n# else:\n# vertices.append((x, y, 0))\n# if inv:\n# inv = False\n# edgeVertices.append(vertCount)\n# elif inh:\n# inh = False\n# edgeVertices.append(vertCount)\n# vertCount += 1\n\n# faces = []\n# for y in range(image.height - 1):\n# for x in range(image.width - 1):\n# offset = y * image.width + 1 # vertex index in obj-format starts at 1\n# # relative vertex positions: [u]pper, [l]ower / [l]eft, [r]ight\n# ul, ur, ll, lr = (offset + x, offset + x + 1,\n# offset + image.width + x, offset + image.width + x + 1)\n# # two triangles per square: upper left and lower right\n# faces.append((ul, ur, ll))\n# faces.append((ll, ur, lr))\n\n# verticesToRemove = []\n# for i, v in enumerate(vertices):\n# if v[2] == 0:\n# verticesToRemove.append(i)\n# verticesToRemove = set(verticesToRemove) - set(edgeVertices)\n\n# facesToRemove = []\n# for i, f in enumerate(faces):\n# if set(f).issubset(verticesToRemove):\n# facesToRemove.append(i)\n \n# # verticesToKeep = set(range(len(vertices))) - set(verticesToRemove)\n# # newVertices = [vertices[i] for i in verticesToKeep]\n \n# facesToKeep = set(range(len(faces))) - set(facesToRemove)\n# newFaces = [faces[i] for i in facesToKeep]\n\n# return vertices, newFaces\n\n# def get_back(image, vertices, offset):\n# faces = []\n# for y in range(image.height - 1):\n# for x in range(image.width - 1):\n# offset = y * image.width + 1 # vertex index in obj-format starts at 1\n# # relative vertex positions: [u]pper, [l]ower / [l]eft, [r]ight\n# ul, ur, ll, lr = (offset + x, offset + x + 1,\n# offset + image.width + x, offset + image.width + x + 1)\n# # two triangles per square: upper left and lower right\n# faces.append((ul, ur, ll))\n# faces.append((ll, ur, lr))\n\n# verticesToRemove = []\n# for i, v in enumerate(vertices):\n# if v[2] > 0:\n# verticesToRemove.append(i)\n# verticesToRemove = set(verticesToRemove) - set(edgeVertices)\n\n# facesToRemove = []\n# for i, f in enumerate(faces):\n# if not set(f).isdisjoint(verticesToRemove):\n# facesToRemove.append(i)\n \n# # verticesToKeep = set(range(len(vertices))) - set(verticesToRemove)\n# # newVertices = [vertices[i] for i in verticesToKeep]\n \n# facesToKeep = set(range(len(faces))) - set(facesToRemove)\n# newFaces = [faces[i] for i in facesToKeep]\n\n# newNewFaces = []\n# for f in newFaces:\n# newF = (f[0] + offset, f[1] + offset, f[2] + offset)\n# newNewFaces.append(newF)\n\n# return vertices, newFaces\n\n# def main(im, outpath, scale):\n# im = Image.open(im)#.resize((64,64))\n \n# scale = float(scale)\n# print('Image: size={}x{}, mode={}'.format(*im.size, im.mode))\n# print('Polygon count: {}'.format(2 * (im.width -1) * (im.height - 1)))\n# # vertices = create_vertices(im, scale)\n# # faces = create_faces(im, *im.size)\n# vertices, faces = get_front_and_sides(im)\n# back_v, back_f = get_back(im, len(vertices))\n# vertices = vertices + back_v\n# faces = faces + back_f\n# with open(outpath, 'w') as outfile:\n# for v in vertices:\n# outfile.write('v {} {} {}\\n'.format(*v))\n# for f in faces:\n# outfile.write('f {} {} {}\\n'.format(*f))\n\n# main('data/aloi/sil_grey_256/1/1_r0.png', 'testmesh/out.obj', 1.)\n\nimport numpy as np\nimport torch\n\nclass Initialiser:\n def __init__(self, image, im_dim=256):\n self.visible = (image[0] > 0).astype(int)\n self.objheight, self.top = self.vert()\n self.objwidth, self.left = self.horiz()\n self.im_dim = im_dim\n\n def vert(self):\n rows = []\n for i, row in enumerate(self.visible):\n if np.isin(1, row):\n rows.append(i)\n return rows[len(rows)-1] - rows[0], rows[0]\n\n def horiz(self):\n cols = []\n for i, col in enumerate(np.transpose(self.visible)):\n if np.isin(1, col):\n cols.append(i)\n return cols[len(cols)-1] - cols[0], cols[0]\n\n def initialise(self, mesh, orig_rad = 90):\n width_scale = orig_rad / (self.objwidth/2)\n height_scale = orig_rad / (self.objheight/2)\n\n self.horizontal_scale(mesh, width_scale)\n self.vertical_scale(mesh, height_scale)\n \n orig_top = (self.im_dim/2) - orig_rad/height_scale\n orig_left = (self.im_dim/2) - orig_rad/width_scale\n shift_per_pixel = 1/orig_rad\n horiz_shift = (orig_left - self.left) * shift_per_pixel\n vert_shift = (orig_top - self.top) * shift_per_pixel\n\n self.horizontal_shift(mesh, horiz_shift)\n self.vertical_shift(mesh, vert_shift)\n\n print(\"\\n\")\n print(\"w scale: \", width_scale)\n print(\"orig_rad: \", orig_rad)\n print(\"img left: \", self.left)\n print(\"sphere left: \", orig_left)\n print(\"horiz shift: \", horiz_shift)\n print(\"width per pixel: \", shift_per_pixel)\n print(\"\\n\")\n print(\"h scale: \", height_scale)\n print(\"orig_rad: \", orig_rad)\n print(\"img top: \", self.top)\n print(\"sphere top: \", orig_top)\n print(\"vert shift: \", vert_shift)\n print(\"height per pixel: \", shift_per_pixel)\n print(\"\\n\")\n\n def horizontal_scale(self, mesh, scale):\n x = torch.transpose(mesh.vertices[0], 0, 1)[0]\n x = torch.unsqueeze((x / scale), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,0] = x\n\n def vertical_scale(self, mesh, scale):\n y = torch.transpose(mesh.vertices[0], 0, 1)[1]\n y = torch.unsqueeze((y / scale), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,1] = y\n\n def horizontal_shift(self, mesh, shift):\n x = torch.transpose(mesh.vertices[0], 0, 1)[0]\n x = torch.unsqueeze((x - shift), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,0] = x\n\n def vertical_shift(self, mesh, shift):\n y = torch.transpose(mesh.vertices[0], 0, 1)[1]\n y = torch.unsqueeze((y + shift), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,1] = y","repo_name":"realdingke/L335_project","sub_path":"examples/init_mesh.py","file_name":"init_mesh.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40311972566","text":"#coding=utf-8\n\nimport pandas as pd\n\ndef findlastindexof(ticker,allgp,indexlist):\n for ind in indexlist:\n if allgp['ticker'][ind] == ticker:\n break\n return ind\n\ndef zfrankin(timeperiod,begindate,_tradedate,gc,x=0.5,howlong=90):\n \"\"\"\n 计算股票涨幅排名\n\n Parameters\n ----------\n timeperiod : 排名时间段\n begindate : 交易数据开始时间,最好取早于timeperiod的时间,给非交易日留点缓冲\n _tradedate : 统计结束时间\n gc : 要统计的股票列表\n x : 涨幅标准\n howlong : 次新股标准 \n Examples\n --------\n\n Returns\n -------\n list : 涨幅大于X,上市时间超过howlong的股票列表\n \"\"\"\n allgp = DataAPI.MktEqudAdjGet(beginDate=begindate,endDate=_tradedate,secID=gc,isOpen='1',pandas='1')\n _highest = _turnrate = _ticker = 0\n _lowest = 99999999\n _zfdit ={}\n _ticker = allgp['ticker'].iloc[0]\n _indexlist = sorted(allgp['ticker'].index,reverse=True)\n _tickerlastindex = findlastindexof(_ticker,allgp,_indexlist)\n _tickertime = _tickerlastindex+1\n #print _ticker,_tickerlastindex,len(_indexlist)\n for _r in allgp.iterrows():\n if _ticker != _r[1]['ticker']:\n if(_turnrate > 1):\n _zfdit[_ticker]=[_ticker,_highest/_lowest-1.,_turnrate]\n _ticker = _r[1]['ticker']\n _highest=_turnrate=0\n _lowest=99999999\n _tickerlastindex = findlastindexof(_ticker,allgp,_indexlist)\n _tickertime = _tickerlastindex - _r[0]+1\n if _tickerlastindex - _r[0] > timeperiod:\n continue\n _highest = max(_highest,_r[1]['highestPrice'])\n _lowest = min(_lowest,_r[1]['lowestPrice'])\n _turnrate = _turnrate + _r[1]['turnoverRate']\n zfranklist = [ v for v in sorted(_zfdit.values(),key=lambda x:x[1],reverse=True)]\n zfranklist = [j for (i,j) in enumerate(zfranklist) if j[1] >= x and len(DataAPI.MktEqudAdjGet(endDate=_tradedate,ticker=j[0],isOpen='1',pandas='1'))>howlong]\n return zfranklist\n#zfrankin(10,'20170101','20170208',['000001.XSHE','000002.XSHE'])","repo_name":"fswzb/MT","sub_path":"lib.zfrank.py","file_name":"lib.zfrank.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2784985067","text":"'''\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\n'''\ndef checkequality(arr,index):\n ch = arr[0][index]\n for i in arr:\n if i[index] != ch:\n return 1\n return 0\n\ndef longestCommonPrefix(strs):\n flag = 0\n prefix = \"\"\n if len(strs) == 0:\n return \"\"\n min_length = len(strs[0])\n for i in strs:\n if len(i) 1:\n q.enqueue(int(line[1]))\n else:\n out.write(str(q.dequeue()) + '\\n')\n\nout.close()\n","repo_name":"shivammehta25/Fun-Coding","sub_path":"EDXCourseITMO/Week2/queue_collections.py","file_name":"queue_collections.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"811345591","text":"import json\nimport time\nimport datetime\nimport os\nfrom termcolor import colored\nimport pandas as pd\n\ndef print_exception(e, addicional_info, logger = None):\n error_type = type(e).__name__\n error_message = e.args[0]\n msg = colored(f'{addicional_info} >> {error_type}: {error_message}', 'red', attrs = ['bold'])\n if logger:\n logger.exception(msg)\n else:\n print(f'[EXCEPTION] {msg}')\n\ndef print_error(message, logger = None):\n msg = colored(message, 'red')\n if logger:\n logger.error(msg)\n else:\n print(f'[ERROR] {msg}')\n\ndef print_info(message, logger = None, color = 'green'):\n msg = colored(message, color)\n if logger:\n logger.info(msg)\n else:\n print(f'[INFO] {msg}')\n\ndef wait_for_file(file_path, timeout = 3600, interval = 1):\n start_time = time.time()\n while not os.path.exists(file_path):\n if time.time() - start_time > timeout:\n raise TimeoutError(f'File {file_path} Not Found After Timeout')\n time.sleep(interval)\n\ndef load_csv_file(file_path, cols = list()):\n if os.path.exists(file_path):\n if cols:\n df = pd.read_csv(file_path, header = None, names = cols)\n else:\n df = pd.read_csv(file_path)\n return df\n return pd.DataFrame()\n\ndef load_file(file_path):\n with open(file_path, 'r') as text_file:\n lines = text_file.readlines()\n lines = [line.rstrip('\\n') for line in lines]\n return lines\n\ndef epoch_to_human_date(epoch_time):\n date = datetime.datetime.fromtimestamp(epoch_time)\n human_readable_date = date.strftime('%Y-%m-%d %H:%M:%S')\n return human_readable_date\n\ndef save_as_json(data, json_file):\n with open(str(json_file), 'w') as fp:\n json.dump(data, fp, indent = 4)\n","repo_name":"Malware-Hunter/SF23-AMGenerator","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15216588801","text":"#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nDESCRIP = 'Evalaute, write html for `.ipynb` notebook file'\nEPILOG = \\\n\"\"\"\nOpens given NBFILE as notebook. Evaluates, writing output notebook to OUTDIR.\nWrites HTML to OUTDIR.\n\"\"\"\nfrom os.path import join as pjoin, splitext, isdir, split as psplit\n\nimport io\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\n# IPython before and after the big split\ntry:\n from nbformat import read as nb_read, write as nb_write, convert as nb_convert\nexcept ImportError:\n from IPython.nbformat import read as nb_read, write as nb_write, convert as nb_convert\ntry:\n from nbconvert import html\nexcept ImportError:\n from IPython.nbconvert import html\n\nfrom runipy.notebook_runner import NotebookRunner\n\nDEFAULT_TEMPLATE = 'perrinate.tpl'\nDEFAULT_READ_FORMAT = 3\nDEFAULT_WRITE_FORMAT = 3\nHTML_FORMAT = 4\n\n\ndef evaluate_notebook(nb, working_dir=None):\n # Create evaluated version and save it to the dest path.\n nb_runner = NotebookRunner(nb=nb, working_dir=working_dir)\n nb_runner.run_notebook()\n return nb_runner.nb\n\n\ndef nb_to_html(nb, template_name=DEFAULT_TEMPLATE, resources=None):\n \"\"\"convert notebook to html\n \"\"\"\n exporter = html.HTMLExporter(template_file=template_name)\n full_resources = dict(metadata = nb.metadata)\n if resources is not None:\n full_resources.update(resources)\n output, resources = exporter.from_notebook_node(\n nb, resources=full_resources)\n return output\n\n\ndef main():\n parser = ArgumentParser(description=DESCRIP,\n epilog=EPILOG,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('nbfile', type=str,\n help='notebook file')\n parser.add_argument('outdir', type=str,\n help='directory to output files')\n parser.add_argument('--template', type=str,\n default=DEFAULT_TEMPLATE,\n help='html template name')\n args = parser.parse_args()\n if not isdir(args.outdir):\n raise RuntimeError('{} is not a directory'.format(args.outdir))\n write_ipynb(args.nbfile, args.outdir, template_name=args.template)\n\n\ndef write_ipynb(nb_path, out_dir, template_name=DEFAULT_TEMPLATE):\n fpath, fname = psplit(nb_path)\n froot, ext = splitext(fname)\n with io.open(nb_path, 'rt') as f:\n nb = nb_read(f, DEFAULT_READ_FORMAT)\n nb.metadata['name'] = froot\n nb_evaluated = evaluate_notebook(nb, working_dir=fpath)\n with io.open(pjoin(out_dir, fname), 'wt') as f:\n nb_write(nb, f, DEFAULT_WRITE_FORMAT)\n nb_html = nb_to_html(nb_convert(nb_evaluated, HTML_FORMAT),\n template_name=template_name,\n resources=dict(nb_fname=fname))\n with io.open(pjoin(out_dir, froot + '.html'), 'wb') as f:\n f.write(nb_html.encode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matthew-brett/perrin-academy","sub_path":"tools/write_ipynb.py","file_name":"write_ipynb.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6050112378","text":"from test.utils import with_random, BaseTest\nimport tri_image.utils as utils\n\n\n#######################################################################\ndef slow(f):\n f.slow = True\n return f\n\n\n#######################################################################\nclass TestEvolver(BaseTest):\n ###################################################################\n def test_constructor(self):\n self.get_evolver()\n\n ###################################################################\n @with_random\n def test_randomlyMoveTriangle(self):\n e = self.get_evolver()\n tri = utils.create_random_triangles(e.size, 1, utils.RGB)[0]\n self.assertEqual(tri.coordinates, [1, 2, 3, 4, 5, 6])\n e.randomly_move_triangle(tri, variance=20)\n\n # the triangle should now have had the center of the triangle\n # moved by some amount, limited to be between -variance, +variance\n self.assertEqual(tri.coordinates, [12, 14, 14, 16, 16, 18])\n","repo_name":"tobynance/tri_image","sub_path":"test/test_evolver.py","file_name":"test_evolver.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"14081161246","text":"\"\"\"\nImplements class containing the stochastic simulation logic outlined in the\nfollowing doc: \nhttps://docs.google.com/document/d/18wv_2vcH9tKx1OJ0PpJoI8QZMTSutzX9f44mNKgVS1g/edit#\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass SIRStochasticSimulation:\n def __init__(self, params):\n\n # Meta-parameters governing the maximum number of days an\n # individual spends in each 'infection' state\n self.max_time_ID = params[\"max_time_ID\"]\n\n # parameters governing distribution over time spent in each\n # of these infection states:\n # Assumptions about the sample_X_times variables:\n # sample_X_times(n) returns a numpy array times of length max_time_X\n # such that times[k] is the number of people who stay in state X\n # for k time periods, and sum(times) == n.\n self.sample_ID_times = params[\"ID_time_function\"]\n\n # assumption: sample_QI_exit_count(n) returns a number m <= n\n # indicating the number of people in the state QI\n # who exit quarantine, given than n people initially\n # start there\n self.sample_QI_exit_count = params[\"sample_QI_exit_function\"]\n self.sample_QS_exit_count = params[\"sample_QS_exit_function\"]\n\n # parameters governing distribution over transition out of\n # each infection state\n self.exposed_infection_p = params[\"exposed_infection_p\"]\n self.contacts_lambda = params[\"expected_contacts_per_day\"]\n\n # parameters governing test protocol\n self.days_between_tests = params[\"days_between_tests\"]\n self.test_pop_fraction = params[\"test_population_fraction\"]\n self.test_QFNR = params[\"test_protocol_QFNR\"]\n self.test_QFPR = params[\"test_protocol_QFPR\"]\n\n # parameters governing contact tracing\n self.perform_contact_tracing = params[\"perform_contact_tracing\"]\n self.contact_tracing_c = params[\"contact_tracing_constant\"]\n\n # flag governing meaning of the pre-ID state\n\n # parameters governing initial state of simulation\n self.pop_size = params[\"population_size\"]\n self.init_ID_count = params[\"initial_ID_count\"]\n\n self.init_S_count = self.pop_size - self.init_ID_count\n assert self.init_S_count >= 0\n\n # instantiate state variables and relevant simulation variables\n self.reset_initial_state()\n\n def reset_initial_state(self):\n self.S = self.init_S_count\n self.ID = self.sample_ID_times(self.init_ID_count)\n self.QS = 0\n self.QI = 0\n self.R = 0\n\n var_labels = self.get_state_vector_labels()\n self.sim_df = pd.DataFrame(columns=var_labels)\n self._append_sim_df()\n self.current_day = 0\n self.last_test_day = -1\n\n def run_new_trajectory(self, T):\n self.reset_initial_state()\n for _ in range(T):\n self.step()\n return self.sim_df\n\n def run_contact_trace(self, new_QI):\n raise (Exception(\"not supported\"))\n leave_E = min(sum(self.E), new_QI * self.contact_tracing_c)\n new_QI = int(self.exposed_infection_p * leave_E)\n new_QS = leave_E - new_QI\n self.QS = self.QS + new_QS\n self.QI = self.QI + new_QI\n\n idx = self.max_time_E - 1\n while leave_E > 0:\n leave_E_idx = min(self.E[idx], leave_E)\n self.E[idx] -= leave_E_idx\n leave_E -= leave_E_idx\n idx -= 1\n\n def run_test(self):\n \"\"\" execute one step of the testing logic \"\"\"\n # infectious_test_pop = free_infectious * self.test_pop_fraction\n # fluid_new_QI = infectious_test_pop * (1 - self.test_QFNR)\n\n # the probability that a free infected individual is quarantined\n # on this round of testing\n new_QI_p = self.test_pop_fraction * (1 - self.test_QFNR)\n\n # sample the number of free infected people who end up quarantined\n # first from the exposed state -- multiply by exposed_infection_p to account for uncertain\n # nature of infection status in the E group\n new_QI_from_ID = np.random.binomial(self.ID, new_QI_p)\n\n # probability a free-susceptible person becomes quarantined\n new_QS_p = self.test_pop_fraction * self.test_QFPR\n # sample number of free susceptible people who become quarantined\n new_QS_from_S = np.random.binomial(self.S, new_QS_p)\n\n self.ID = self.ID - new_QI_from_ID\n self.S = self.S - new_QS_from_S\n\n new_QI = sum(new_QI_from_ID)\n self.QI = self.QI + new_QI\n\n new_QS = new_QS_from_S\n self.QS = self.QS + new_QS\n\n if self.perform_contact_tracing:\n self.run_contact_trace(new_QI)\n\n def step(self):\n \"\"\" simulate a single day in the progression of the disease \"\"\"\n\n # do testing logic first\n if (\n self.current_day - self.last_test_day >= self.days_between_tests\n or self.last_test_day == -1\n ):\n self.last_test_day = self.current_day\n self.run_test()\n\n free_infectious = 0\n free_infectious += sum(self.ID)\n # free_infectious += sum(self.E) * self.exposed_infection_p\n\n free_susceptible = self.S\n\n # simulate new exposures between free infectious & free susceptible:\n free_tot = free_infectious + free_susceptible + self.R\n\n poisson_param = (\n free_infectious * self.contacts_lambda * free_susceptible / free_tot\n )\n\n n_contacts = min(np.random.poisson(poisson_param), self.S)\n new_ID = np.random.binomial(n_contacts, self.exposed_infection_p)\n\n # resolve ID queue\n new_R = self.ID[0]\n\n # sample number of people who leave quarantine\n leave_QI = self.sample_QI_exit_count(self.QI)\n new_R += leave_QI\n\n new_S = self.sample_QS_exit_count(self.QS)\n\n # update relevant state variables:\n self.S = self.S + new_S - new_ID\n self.R += new_R\n\n self.QI -= leave_QI\n self.QS -= new_S\n\n # update array-based state variables\n self._shift_array_state_variables()\n self.ID = self.ID + self.sample_ID_times(new_ID)\n\n self._append_sim_df()\n\n self.current_day += 1\n\n def _append_sim_df(self):\n data = self.get_current_state_vector()\n labels = self.get_state_vector_labels()\n new_row_df = pd.DataFrame([data], columns=labels)\n self.sim_df = self.sim_df.append(new_row_df, ignore_index=True)\n if sum(data) != self.pop_size:\n raise (Exception(\"population has shrunk\"))\n\n def _shift_array_state_variables(self):\n idx = 0\n while idx <= self.max_time_ID - 2:\n self.ID[idx] = self.ID[idx + 1]\n idx += 1\n self.ID[self.max_time_ID - 1] = 0\n\n def get_current_state_vector(self):\n return np.concatenate([[self.S], [self.QS], [self.QI], [self.R], self.ID])\n\n def get_state_vector_labels(self):\n return [\"S\", \"QS\", \"QI\", \"R\"] + [\n \"ID_{}\".format(x) for x in range(self.max_time_ID)\n ]\n","repo_name":"saitcakmak/BoRisk","sub_path":"BoRisk/test_functions/covid_simulators/sir_stochastic_sim.py","file_name":"sir_stochastic_sim.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"}
+{"seq_id":"10508308625","text":"import re\n\nwith open(\"day2.input\") as f:\n data = f.readlines()\n\n# \"7-10 m: qmpgmmsmmmmkmmkj\"\npat = re.compile(\"^([0-9]+)-([0-9]+) (\\S): (.*)$\")\n\n# Part 1\nvalid_count = 0\nfor line in data:\n m = pat.match(line)\n if not m:\n raise Exception(\"Bad match\", line)\n occurrences = m.group(4).count(m.group(3))\n if int(m.group(1)) <= occurrences <= int(m.group(2)):\n valid_count += 1\nprint(valid_count)\n\n# Part 2\nvalid_count = 0\nfor line in data:\n m = pat.match(line)\n if not m:\n raise Exception(\"Bad match\", line)\n p1, p2, ch, pw = int(m.group(1)), int(m.group(2)), m.group(3), m.group(4)\n if (pw[p1 - 1] == ch) != (pw[p2 - 1] == ch):\n valid_count += 1\nprint(valid_count)\n","repo_name":"Jemgoss/adventofcode","sub_path":"2020/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"21034052744","text":"from setuptools import setup\nimport subprocess\nfrom typing import List\n\n\ndef find_version(filename: str = \"version\") -> str:\n \"\"\"Parse the version and build details stored in the 'version' file.\"\"\"\n try:\n cmd: List[str] = [\"git\", \"describe\", \"--tags\", \"--always\", \"HEAD\"]\n gitversion: str = subprocess.check_output(\n cmd, stderr=subprocess.DEVNULL\n ).decode().strip()\n build: List[str] = gitversion.split(\"-\")\n # -- (e.g. 0.2-8-adfebee)\n if len(build) > 1:\n return \"{}.post{}\".format(build[0], build[1])\n\n # tagged commit\n return gitversion\n except subprocess.CalledProcessError:\n # If .git does not exist, default to an old dev version\n return \"0.1.dev0\"\n\n\nsetup(version=find_version())\n","repo_name":"rgildein/juju-verify","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"33562205802","text":"# https://www.codechef.com/AUG20B/problems/CRDGAME3\n\ntest = int(input())\n\ndef ceil(n):\n if(n > int(n)):\n return int(n) + 1\n return int(n)\n\nfor _ in range(test):\n ab = input().split()\n a = int(ab[0])/9\n b = int(ab[1])/9\n\n a = ceil(a)\n b = ceil(b)\n\n if(a < b):\n print(\"0\",a)\n else:\n print(\"1\",b)\n","repo_name":"NikithKS/Days-of-Coding","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9091832568","text":"import pytest\n\nfrom app import db, create_app\nfrom tests.utils import register, login\nfrom app.controllers import populate_db_by_test_data, bid_generation\n\n\n@pytest.fixture\ndef client():\n app = create_app(environment=\"testing\")\n app.config[\"TESTING\"] = True\n\n with app.test_client() as client:\n app_ctx = app.app_context()\n app_ctx.push()\n db.drop_all()\n db.create_all()\n populate_db_by_test_data()\n bid_generation()\n register(\"sam\")\n login(client, \"sam\")\n yield client\n db.session.remove()\n db.drop_all()\n app_ctx.pop()\n\n\ndef test_edited_bids(client):\n response = client.post(\"/archive_or_export\", data={'1': 'on', '2': 'on'}, follow_redirects=True)\n assert response.status_code == 200\n assert b'Archived' in response.data\n\n\ndef test_biddings(client):\n response = client.get(\"/biddings\")\n assert response.status_code == 200\n assert b\"Client\" in response.data\n","repo_name":"Simple2B/flora","sub_path":"tests/test_biddings.py","file_name":"test_biddings.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32878329797","text":"\n\ndef find_three_words(text: str):\n\n def symbols_of_ascii():\n '''\n Return list of ASCII characters in the ranges\n 33 - 47 | 58 - 64 | 91 - 96 | 123 - 126\n '.', ',', ':', '!', '\"', \"'\", '[', ']', '-', '(', ')', etc.\n '''\n list = []\n list.extend([chr(i) for i in range(33, 48)])\n list.extend([chr(i) for i in range(58, 65)])\n list.extend([chr(i) for i in range(91, 97)])\n list.extend([chr(i) for i in range(123, 127)])\n return list\n\n lst_no = symbols_of_ascii()\n lst = []\n\n for word in text.lower().split():\n if not word in lst_no:\n _word = word\n if word[-1] in lst_no:\n _word = _word[:-1]\n if word[0] in lst_no:\n _word = _word[1:]\n lst.append(_word)\n\n if len(lst) < 3:\n return []\n\n _dict = dict()\n for word in lst:\n _dict[word] = _dict.get(word, 0) + 1\n\n _list = []\n for key, value in _dict.items():\n _list.append((value, key))\n _list.sort(reverse=True)\n\n result = []\n for freq, word in _list[0:3]:\n result.append(word)\n\n return result\n\n\ntext = input('Type your text: ')\nprint(find_three_words(text))\n","repo_name":"Roninon/Test-Task","sub_path":"find-three-words.py","file_name":"find-three-words.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72526788627","text":"import unittest\nfrom mock import Mock\nimport yangvoodoo\nimport subprocess\nimport time\nimport yangvoodoo.sysrepodal\n\n\nclass test_sysrepodal(unittest.TestCase):\n def setUp(self):\n self.subject = yangvoodoo.sysrepodal.SysrepoDataAbstractionLayer()\n self.subject.session = Mock()\n\n def test_handle_error_no_subscribers(self):\n error_mock = Mock()\n error_mock.xpath.return_value = \"/path\"\n error_mock.message.return_value = \"The node is not enabled in running datastore\"\n errors_mock = Mock()\n errors_mock.error_cnt.return_value = 1\n errors_mock.error.return_value = error_mock\n self.subject.session.get_last_errors = Mock(return_value=errors_mock)\n\n with self.assertRaises(\n yangvoodoo.Errors.SubscriberNotEnabledOnBackendDatastore\n ) as context:\n self.subject._handle_error(\"/path\", \"err\")\n self.assertEqual(\n str(context.exception),\n \"There is no subscriber connected able to process data for the following path.\\n /path\",\n )\n\n def test_handle_error_no_other_backend_error(self):\n error_mock = Mock()\n error_mock.xpath.return_value = \"/path\"\n error_mock.message.return_value = \"Someother stuff went wrong\"\n errors_mock = Mock()\n errors_mock.error_cnt.return_value = 1\n errors_mock.error.return_value = error_mock\n self.subject.session.get_last_errors = Mock(return_value=errors_mock)\n\n with self.assertRaises(yangvoodoo.Errors.BackendDatastoreError) as context:\n self.subject._handle_error(\"/path\", \"err\")\n self.assertEqual(\n str(context.exception),\n \"1 Errors occured\\nError 0: Someother stuff went wrong (Path: /path)\\n\",\n )\n","repo_name":"anter74/python-yang-voodoo","sub_path":"test/integration/test_sysrepodal.py","file_name":"test_sysrepodal.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"27204382812","text":"import unittest\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom angorapy.common.policies import BetaPolicyDistribution\nfrom angorapy.models import get_model_builder\nfrom angorapy.utilities.model_utils import reset_states_masked, build_sub_model_from, get_layers_by_names\n\n\nclass UtilTest(unittest.TestCase):\n\n def test_masked_state_reset(self):\n model = tf.keras.Sequential((\n tf.keras.layers.Dense(2, batch_input_shape=(7, None, 2)),\n tf.keras.layers.LSTM(5, stateful=True, name=\"larry\", return_sequences=True),\n tf.keras.layers.LSTM(5, stateful=True, name=\"harry\"))\n )\n\n l_layer = model.get_layer(\"larry\")\n h_layer = model.get_layer(\"harry\")\n l_layer.reset_states([s.numpy() + 9 for s in l_layer.states])\n h_layer.reset_states([s.numpy() + 9 for s in h_layer.states])\n reset_states_masked(model, [True, False, False, True, False, False, True])\n\n self.assertTrue(np.allclose([s.numpy() for s in model.get_layer(\"larry\").states],\n [s.numpy() for s in model.get_layer(\"harry\").states]))\n self.assertTrue(np.allclose([s.numpy() for s in model.get_layer(\"larry\").states], [\n [0, 0, 0, 0, 0],\n [9, 9, 9, 9, 9],\n [9, 9, 9, 9, 9],\n [0, 0, 0, 0, 0],\n [9, 9, 9, 9, 9],\n [9, 9, 9, 9, 9],\n [0, 0, 0, 0, 0],\n ]))\n\n # def test_submodeling_from(self):\n # env = gym.make(\"LunarLanderContinuous-v2\")\n # full_model, _, _ = get_model_builder(\"simple\", \"gru\", shared=False)(env, BetaPolicyDistribution(env))\n # sub_model_from_a = build_sub_model_from(full_model, \"beta_action_head\")\n # sub_model_from_b = build_sub_model_from(full_model, \"policy_recurrent_layer\")\n #\n # for sub_model_from in [sub_model_from_a, sub_model_from_b]:\n # layer = get_layers_by_names(sub_model_from, [\"beta_action_head\"])[0]\n #\n # input_shape_raw = layer.get_input_shape_at(1)\n # input_shape_replaced = tuple(v if v is not None else 1 for v in input_shape_raw)\n #\n # out = sub_model_from(tf.random.normal(input_shape_replaced))","repo_name":"ccnmaastricht/angorapy","sub_path":"tests/test_model_utils.py","file_name":"test_model_utils.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"}
+{"seq_id":"38914235263","text":"import requests\nimport urllib\n\nsession = requests.Session()\nsession.max_redirects = 10000\nrequest = session.get(\"http://localhost\")\n\nchars = \"\"\nfor response in request.history:\n chars += urllib.parse.unquote(response.url.split(\"=\")[-1])\n\nprint(chars)\n\n# Take base64 and decode it for the flag\n","repo_name":"wmgcyber/intakectf-2021-public","sub_path":"Miscellaneous/redirects/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10656915065","text":"import copy\nimport matplotlib.pyplot as plt\nimport parameters_MF_MB as PRM\nimport figures_final as GRAPH\nimport matplotlib_latex_bridge as mlb\n\nmlb.setup_page(textwidth=6.97522, columnwidth=3.36305, fontsize=10)\n\n\ndef main():\n\n\t##################################################\n\t# LEARNING PLOT AND VIOLIN STATISTICAL ANALYSIS FIGURE ##########################\n\tparams = copy.deepcopy(PRM.params)\n\tparams['replay_refs'] = [0,1,2,4]\n\tfig_det = GRAPH.figure_learning_curves_violin_plots(det=True, params=params, thres=0.05)\n\tfig_nondet = GRAPH.figure_learning_curves_violin_plots(det=False, params=params, thres=0.05)\n\tfig_det.savefig(\"Saved_figures/learning_plots_det_1200.jpg\", format='jpg', dpi=1200)\n\tfig_nondet.savefig(\"Saved_figures/learning_plots_nodet_1200.jpg\", format='jpg', dpi=1200)\n\t# plt.show()\n\n\t##################################################\n\n\t# # Q-VALUES AND REPLAYS ANALYSIS FIGURE ##########################\n\tparams = copy.deepcopy(PRM.params)\n\tparams['replay_refs'] = [0,1,2,4]\n\tfig_det = GRAPH.figure_Qvalues(det=True, params=params, legends=False)\n\tfig_nondet = GRAPH.figure_Qvalues(det=False, params=params, legends=True)\n\tfig_det.savefig(\"Saved_figures/qvalues_det.pdf\")\n\tfig_nondet.savefig(\"Saved_figures/qvalues_nodet.jpg\", format='jpg', dpi=300)\n\t# plt.show()\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"esther-poniatowski/Massi2022","sub_path":"data+code_2generate_the_paper_figures/learning_performance_figure/figure_learning_and_qvalues.py","file_name":"figure_learning_and_qvalues.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"13955930982","text":"'''\nA pangram is a string that contains every letter of the alphabet. Given a sentence determine whether it is a pangram in the English alphabet. Ignore case. Return either pangram or not pangram as appropriate.\n'''\n\ndef pangrams(s):\n alphabet = {\n 'a': 0,\n 'b': 0,\n 'c': 0,\n 'd': 0,\n 'e': 0,\n 'f': 0,\n 'g': 0,\n 'h': 0,\n 'i': 0,\n 'j': 0,\n 'k': 0,\n 'l': 0,\n 'm': 0,\n 'n': 0,\n 'o': 0,\n 'p': 0,\n 'q': 0,\n 'r': 0,\n 's': 0,\n 't': 0,\n 'u': 0,\n 'v': 0,\n 'w': 0,\n 'x': 0,\n 'y': 0,\n 'z': 0\n }\n s = s.lower()\n split = [*s]\n for letter in split:\n if letter in alphabet:\n alphabet[letter] += 1\n\n print(alphabet)\n counts = alphabet.values()\n if 0 in counts:\n print('not pangram')\n return 'not pangram'\n else: \n print('pangram')\n return 'pangram'\n\npangrams('We promptly judged antique ivory buckles for the next prize')\npangrams('We promptly judged antique ivory buckles for the prize')","repo_name":"scottydphillips/hackerRankPracticeAlgos","sub_path":"python/pangrams.py","file_name":"pangrams.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"22116255884","text":"'''\n\n Rotina que lê o banco de dados das UCDS de referência de cada bacia\n e plota gráficos de barra com limites de \"atenção\" e \"alerta\", assim como\n gera tabela excel com estas informações.\n\n Autor: Francisco Thiago Franca Parente (BHYK)\n Criação: 11/03/2020\n\n Edições:\n + 27/08/2020:\n Modificado o gráfico dos últimos dois anos. Foi acrescentado dois\n xticks aos gráfico. Um referente à média ponderada entre janeiro e\n o mês de interesse e outro com o percentual anual do ano anterior.\n\n Ainda, foi adicionado ao código uma nova forma de exportar os\n resultados já exatamente como exposto na tabela do relatório.\n\n'''\n\n# _____________________________________________________________________________\n# Modificar aqui\n# _____________________________________________________________________________\nimport os\n# Diretório onde serão salvos os outputs\nPATH = os.path.normpath(\"XXXXXXXXXXXXXXXX\")\n\n# Intervalo de data para busca no banco de dados\nDATEMIN = u\"01/01/2010 00:00:00\"\nDATEMAX = u\"31/01/2022 23:00:00\"\n\n# Bacias de interesse\nBACIAS = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n# Definindo os limites de atenção e alerta de vento e onda\nwind_lim = [20., 28.]\nwave_lim = [1.5, 2., 2.5]\n\n# NÚMERO do mês de interesse (1 -jan, 2-fev, ...)\nm_interesse = [1]\n\n# Unidade de medida para vento\nunidademedida = 'nós'\n\n# _____________________________________________________________________________\n\nfrom warnings import filterwarnings\n# Desativação de alertas minimizando mensagens no console\nfilterwarnings(\"ignore\")\n\nfrom sys import path\nfrom datetime import timedelta\nfrom datetime import datetime as dt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.patheffects as PathEffects\nimport time\npth1 = 'XXXXXXXXXXXXXXX'\ndirs = ['data', 'math', 'settings', 'graph']\nfor d in dirs:\n pth2 = pth1 + d\n path.append(pth2)\n\nimport definitions as mopdef\nimport statistic as stc\nimport OCNdb as ocn\nimport histograma as htg\nfrom calendar import monthrange\n# _____________________________________________________________________________\n\n# Variável de escrita excel\nwave_writer = pd.ExcelWriter(PATH + '\\\\wave_todas_bacias.xlsx')\nwind_writer = pd.ExcelWriter(PATH + '\\\\wind_todas_bacias.xlsx')\n\n# Labels dos meses do ano\nMNTHLBL = ('Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun',\n 'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez')\n\n# datetime para reindexação\ndti = dt.strptime(DATEMIN, '%d/%m/%Y %H:%M:%S')\ndtf = dt.strptime(DATEMAX, '%d/%m/%Y %H:%M:%S')\n\n# Carregando quais são as unidades unidades representativas das bacias\nUCDS = mopdef.get_ucds_bacias()\n\n# Verificando os anos que serão avaliados\n# ano atual\nYEAR = int(DATEMAX[6:10])\n\nstart = time.time()\n_wind, _wave = pd.DataFrame(), pd.DataFrame()\nfor bx, bacia in enumerate(BACIAS):\n # Pegando as ucds de referência de cada bacia\n ucds_wind = list(filter(None,\n [item for ucd in UCDS.loc[bacia].VENTO.values\n for item in ucd]))\n ucds_wave = list(filter(None,\n [item for ucd in UCDS.loc[bacia].ONDA.values\n for item in ucd]))\n # Lendo dados do banco\n crono = time.time()\n print(\"{} // {}\".format('Lendo vento de', bacia))\n wind = ocn.get_BDs(ucds_wind, [DATEMIN, DATEMAX], 'meteo')\n print(\"{} // {:.2f} min\".format(\n 'Tempo de leitura do vento',\n (time.time() - crono) / 60))\n print(\"{} // {}\".format('Lendo onda de', bacia))\n crono = time.time()\n wave = ocn.get_BDs(ucds_wave, [DATEMIN, DATEMAX], 'wave')\n print(\"{} // {}\".format('Finalizada consulta de dados de', bacia))\n print(\"{} // {:.2f} min\".format(\n 'Tempo de leitura do onda',\n (time.time() - crono) / 60))\n # Verificando unidade de medidia\n if unidademedida == 'nós':\n wind.WSPD = wind.WSPD * 1.94384449\n\n # # Plotando para verificação da série que será analisada\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # wind.groupby(level=[2]).median().WSPD.plot(ax=ax)\n # ax.set_title(\"Vento {}\".format(bacia))\n # fig.savefig(\n # '{}\\\\Vento_{}.png'.format(PATH, bacia),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # wave.groupby(level=[2]).median().VAVH.plot(ax=ax)\n # ax.set_title(\"Onda {}\".format(bacia))\n # fig.savefig(\n # '{}\\\\Onda_{}.png'.format(PATH, bacia),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # Calculando percentual de dados.\n p_wind = stc.percentual(\n wind.groupby(level=[2]).median().WSPD,\n wind_lim,\n '.1f',\n 'Int.',\n atype='anual')\n p_wave = stc.percentual(\n wave.groupby(level=[2]).median().VAVH,\n wave_lim,\n '.1f',\n 'Hs',\n atype='anual')\n _wind = _wind.append(pd.concat([p_wind], keys=[bacia], names=['Bacia']))\n _wave = _wave.append(pd.concat([p_wave], keys=[bacia], names=['Bacia']))\n print('[{}: Ok]'.format(bacia))\n\n_wind.to_excel(wind_writer)\n_wave.to_excel(wave_writer)\nwind_writer.close()\nwave_writer.close()\n\nprint(\"{} // {:.2f} min\".format(\n 'Tempo Total de leitura dos dados',\n (time.time() - start) / 60))\n# _____________________________________________________________________________\n# Plotando\n# _____________________________________________________________________________\n\nwidth = .45\n\n# PLOTA DADO DE TODOS OS ANOS DO MÊS DE INTERESSE\nfor name, param in zip(['wind', 'wave'], [_wind, _wave]):\n for m in m_interesse:\n fig = plt.figure(figsize=(12, 9))\n\n ordem = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n for splt, bc in enumerate(ordem):\n\n xlbl = [str(x) for x in param.index.levels[1]]\n xtik = np.arange(0, len(xlbl), 1)\n\n if param.loc[bc].xs(MNTHLBL[m - 1], level=1).shape[0] != len(xlbl):\n ckdata = param.loc[bc].xs(MNTHLBL[m - 1], level=1)\n for ey in xlbl:\n if int(ey) not in ckdata.index:\n ckdata = ckdata.append(\n pd.DataFrame(\n data=[],\n index=[int(ey)],\n columns=ckdata.columns))\n ckdata = ckdata.sort_index()\n bars = {x: ckdata[x].values for x in ckdata.columns}\n else:\n ckdata = param.loc[bc].xs(MNTHLBL[m - 1], level=1)\n lmts = list(ckdata.columns)\n lmts.reverse()\n bars = {x: ckdata[x].values for x in lmts}\n\n ax = fig.add_subplot(\n int('{}1{}'.format(len(param.index.levels[0]), splt + 1)))\n\n if len(bars) > 2:\n colors = ['#E24A33', '#FBC15E', '#27AE60']\n else:\n colors = ['#E24A33', '#FBC15E']\n\n bottom = np.zeros(len(xtik))\n for n, bar in enumerate(bars.keys()):\n rects2 = ax.bar(\n xtik,\n bars[bar],\n width, color=colors[n],\n bottom=np.nan_to_num(bottom),\n align='center', alpha=.7, edgecolor='k',\n label=bar)\n bottom += bars[bar]\n\n\n if name == 'wind':\n ax.set_ylim(0., 40)\n dy, legx = 6, .738\n else:\n ax.set_ylim(0., 120)\n dy, legx = 10, .868\n ax.set_xlim(0 - width, xtik[-1] + width)\n ax.set_ylabel('Registros (%)', fontsize=14)\n \n ax.text(-0.4, ax.get_ylim()[1] - dy, bc, fontsize=14, weight='bold')\n\n plt.xticks(xtik, xlbl, fontsize=14)\n for label in ax.xaxis.get_majorticklabels():\n label.set_fontsize(14)\n for label in ax.yaxis.get_majorticklabels():\n label.set_fontsize(14)\n # COLOCANDO VALORES NAS BARRAS\n texto = [(bars[x]) for x in bars.keys()]\n\n bottom = np.zeros(len(xtik))\n for tx in texto:\n strnumb = ['{0:2.1f}%'.format(round(x, 2)).replace('.', ',') for x in tx]\n for _, x in enumerate(range(len(xtik))):\n txt = ax.text(\n xtik[x],\n tx[x] + bottom[x],\n strnumb[x],\n horizontalalignment='center',\n fontsize=11)\n plt.setp(\n txt,\n path_effects=[\n PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n bottom += tx\n\n ax.legend(\n prop={'size': 14},\n bbox_to_anchor=(legx, -.2),\n ncol=len(bars))\n\n fig.savefig(\n '{}\\\\{}_anual.png'.format(PATH, name),\n format='png',\n bbox_inches='tight')\n\n\n# _____________________________________________________________________________\n# PLOTA DADO DOS ÚLTIMOS DOIS ANOS\n# _____________________________________________________________________________\nnewlabel = MNTHLBL.__add__(tuple(\n ['Média \\n ponderada \\n (Jan - {})'.format(MNTHLBL[m_interesse[0]-1]),\n 'Total \\n de {}\\n e {}'.format(param.index.levels[1][-2], param.index.levels[1][-3])]\n))\n\nxax2 = np.append(np.arange(1, 13), 14)*1.3\nwidth = 0.3\n\nfor name, param in zip(['wind', 'wave'], [_wind, _wave]):\n fig = plt.figure(figsize=(12, 9))\n yr = list(param.index.levels[1][-3:])\n\n ordem = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n for splt, bc in enumerate(ordem):\n\n ax = fig.add_subplot(int('{}1{}'.format(\n len(param.index.levels[0]),\n splt + 1)))\n for i, y in enumerate(yr):\n if i == 0:\n dx = -.3\n sb = .3\n linewidth = 1\n elif i == 1:\n dx = .0\n sb = .7\n linewidth = 1\n else:\n dx = .3\n sb = 1.\n linewidth = 2\n\n # Calculado o peso de cada mês para média ponderada\n peso = []\n for m in np.arange(1, 13, 1):\n peso.append(np.mean([\n monthrange(y, m)[1]\n for y in np.unique(param.loc[bc].index.get_level_values(0))\n ]) / 31)\n try:\n # pegando somente os percentuais dos meses até o mês de interesse\n sqz = param.loc[bc].loc[y].drop('Total', axis=0)\n # pegando percentuais até o mes de interesse para acumulado\n avg = param.loc[bc].loc[y].drop('Total', axis=0)[:m_interesse[0]]\n\n lmts = list(param.columns)\n lmts.reverse()\n bars = {}\n for cll in lmts:\n bars[cll] = np.nan_to_num(np.append(\n sqz[cll].values,\n round(np.average(\n avg[cll].values,\n weights=peso[:m_interesse[0]]), 2)))\n\n # the bars\n if len(bars) > 2:\n colors = ['#E24A33', '#FBC15E', '#27AE60']\n else:\n colors = ['#E24A33', '#FBC15E']\n\n bottom = np.zeros(len(xax2))\n for n, bar in enumerate(bars.keys()):\n rects2 = ax.bar(\n xax2 + dx,\n bars[bar],\n width, color=colors[n],\n bottom=np.nan_to_num(bottom),\n align='center', alpha=sb, edgecolor='k',\n label=bar, linewidth=linewidth)\n bottom += np.nan_to_num(bars[bar])\n\n for label in ax.xaxis.get_majorticklabels():\n label.set_fontsize(14)\n for label in ax.yaxis.get_majorticklabels():\n label.set_fontsize(14)\n except Exception:\n continue\n # texto = [(bars[x]) for x in bars.keys()]\n\n # bottom = np.zeros(len(xax2))\n # for tx in texto:\n # strnumb = ['{0:2.1f}%'.format(round(x, 2)).replace('.', ',') for x in tx]\n # for _, x in enumerate(range(len(xax2))):\n # txt = ax.text(\n # xax2[x] + dx,\n # tx[x] + bottom[x],\n # strnumb[x],\n # horizontalalignment='center',\n # fontsize=11)\n # plt.setp(\n # txt,\n # path_effects=[\n # PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n # bottom += tx\n\n # ax.set_xlim(0, 17)\n if name == 'wind':\n ax.set_ylim(0., 40)\n ax.text(0.2, ax.get_ylim()[1] - 6, bc, fontsize=14, weight='bold')\n\n else:\n ax.set_ylim(0., 110)\n ax.text(0.2, ax.get_ylim()[1] - 10, bc, fontsize=14, weight='bold')\n ax.set_ylabel('Registros (%)', fontsize=14)\n\n sb, dx = [.3, .7], [-.3, .3]\n for m, yss in enumerate(yr[:-1]):\n btm = 0\n for n, cll in enumerate(lmts):\n acmval = param.loc[bc].loc[yss].loc['Total'][cll]\n acum = ax.bar(\n 16*1.3 + dx[m], acmval,\n width + .2, color=colors[n],\n align='center', alpha=sb[m], bottom=btm, edgecolor='k')\n btm += acmval\n if splt == len(param.index.levels[0]) - 1:\n plt.xticks(np.append(xax2, 16 * 1.3), newlabel,\n fontsize=14, rotation=0)\n else:\n plt.xticks([])\n\n if name == 'wind':\n bboxx = (.86, -.40)\n xx0, yy0 = -1., -23\n xx1, yy1 = 5.6, -23\n xx2, yy2 = 12.6, -23\n if name == 'wave':\n bboxx = (.82, -.4)\n xx0, yy0 = .03, -60\n xx1, yy1 = 6.0, -60\n xx2, yy2 = 12.5, -60\n ax.text(xx2, yy2, str(yr[2]), weight='bold')\n ax.text(xx1, yy1, str(yr[1]), weight='bold')\n ax.text(xx0, yy0, str(yr[0]), weight='bold')\n\n ax.legend(\n prop={'size': 12},\n bbox_to_anchor=bboxx,\n frameon=False,\n ncol=3,\n columnspacing=5.5)\n\n fig.savefig(\n '{}\\\\{}_compara_{}_{}_{}.png'.format(PATH, name, yr[0], yr[1], yr[2]),\n format='png',\n bbox_inches='tight')\n\n# _____________________________________________________________________________\n# EXPORTANDO TABELA UTILIZADA NO RELATÓRIO \n# _____________________________________________________________________________\nwindtable = _wind.xs(MNTHLBL[m_interesse[0] - 1], level=2)\nwavetable = _wave.xs(MNTHLBL[m_interesse[0] - 1], level=2)\nindyrs = np.arange(2018, windtable.index.levels[1][-1] + 1, 1)\n\ntwd, twv = pd.DataFrame(), pd.DataFrame()\nfor bc in windtable.index.levels[0]:\n wd = windtable.loc[bc].loc[indyrs]\n mnyrs = windtable.loc[bc][:-1].mean().to_frame().T\n mnyrs.index = ['{} a {}'.format(windtable.loc[bc].index[0],\n windtable.loc[bc].index[-2])]\n wd = wd.append(mnyrs)\n wd[\"Total\"] = wd.sum(axis=1)\n wd = wd.round(1)\n\n twd = twd.append(pd.concat([wd], keys=[bc], names=['Bacia']))\n\n wv = wavetable.loc[bc].loc[indyrs]\n mnyrs = wavetable.loc[bc][:-1].mean().to_frame().T\n mnyrs.index = ['{} a {}'.format(wavetable.loc[bc].index[0],\n wavetable.loc[bc].index[-2])]\n wv = wv.append(mnyrs)\n wv[\"Total\"] = wv.sum(axis=1)\n wv = wv.round(1)\n\n twv = twv.append(pd.concat([wv], keys=[bc], names=['Bacia']))\n\nbcorder = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\nexcel = pd.ExcelWriter('{}\\\\Tabela_relatorio.xlsx'.format(PATH))\nfor sheet, parm in zip(('vento', 'onda'), (twd, twv)):\n parm.T[bcorder].T.to_excel(excel, sheet_name=sheet)\nexcel.close()\n\n#_________________________________________________________________\n# Linhas para plot de comparação entre UCDs, média e mediana\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # for ucd in wind.index.levels[0]:\n # wind.loc[ucd].loc['YOUNG'].WSPD.plot(\n # ax=ax,\n # alpha=1,\n # linewidth=0,\n # marker='o',\n # markersize=4)\n # wind.groupby(level=[2]).median().WSPD.plot(\n # ax=ax,\n # linestyle='-',\n # alpha=1,\n # linewidth=2,\n # color='k')\n # wind.groupby(level=[2]).mean().WSPD.plot(\n # ax=ax,\n # linestyle='-',\n # alpha=1,\n # linewidth=2,\n # color='r')\n # fig.savefig(\n # '{}\\\\teste1.png'.format(PATH),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # for ucd in wind.index.levels[0]:\n # wind.loc[ucd].loc['YOUNG'].WSPD[:200].plot(\n # ax=ax,\n # alpha=1,\n # linewidth=0,\n # marker='o',\n # markersize=4)\n # wind.groupby(level=[2]).median().WSPD[:200].plot(\n # ax=ax,\n # linestyle='-',\n # alpha=.7,\n # linewidth=3,\n # color='k')\n # wind.groupby(level=[2]).mean().WSPD[:200].plot(\n # ax=ax,\n # linestyle='-',\n # alpha=.7,\n # linewidth=3,\n # color='r')\n # fig.savefig(\n # '{}\\\\teste2.png'.format(PATH),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n","repo_name":"thiag0p/BHYK_scripts_ocn","sub_path":"scripts/05.Demandas_diferenciadas/Analise_desempenho.py","file_name":"Analise_desempenho.py","file_ext":"py","file_size_in_byte":17778,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"421752812","text":"import json\n\n# data = '{\"aa\":\"北京\"}'\n# dict_data = json.loads(data)\n# print(data,type(data))\n# print(dict_data,type(dict_data))\n# str_data = json.dumps(dict_data)\n# print(str_data,type(str_data)),\n#\n# with open('temp.json','w') as f:\n# f.write(str_data)\n\nwith open('temp.json','r') as f:\n data = json.load(f)\nprint(data)\n\nwith open('temp2.json','w') as g:\n json.dump(data,g)","repo_name":"ioscarry/JXWY_PLUS","sub_path":"dump_load.py","file_name":"dump_load.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73554576786","text":"import json\nimport logging\nfrom logging import FileHandler\nfrom logging import Formatter\nimport os\nimport time\nimport numpy as np\nfrom pacemaker.v00 import Pacemaker\n\nCLOCK_FREQ_HZ = 48\nN_ACTIONS = 2\nN_POSITIONS = 5\n\n# valid levels are {DEBUG, INFO, WARNING, ERROR, CRITICAL}\nLOGGING_LEVEL = logging.INFO\n\n\nclass World:\n \"\"\"\n In this world, the agent can occupy one of N_POSITIONS on a line.\n It has two actions available, move right and move left by one position.\n Attempts to move past the last position have no effect.\n\n action[0] indicates a move to the left\n action[1] indicates a move to the right\n \"\"\"\n\n def __init__(self):\n self.pacemaker = Pacemaker(CLOCK_FREQ_HZ)\n self.n_actions = N_ACTIONS\n self.n_positions = N_POSITIONS\n\n # Initialize the world\n self.position = np.random.randint(self.n_positions)\n\n # Set up logging\n os.makedirs(\"log\", exist_ok=True)\n log_name = f\"{int(time.time())}\"\n self.logger = logging.getLogger(\"world\")\n self.logger.setLevel(LOGGING_LEVEL)\n logger_file_handler = FileHandler(\n os.path.join(\"log\", f\"{log_name}_world.log\"))\n logger_file_handler.setLevel(LOGGING_LEVEL)\n logger_file_handler.setFormatter(Formatter(\"%(message)s\"))\n self.logger.addHandler(logger_file_handler)\n\n def run(self, model_action_q, model_sensor_q, animation_sensor_q):\n while True:\n self.pacemaker.beat()\n # The combined effect of all actions issued.\n # Positive values are steps to the right.\n # Negative values are steps to the left.\n # Zero means no action.\n net_action = 0\n while not model_action_q.empty():\n timestamp, actions = model_action_q.get()\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"DEBUG\",\n \"ts\": timestamp,\n \"action_received\": list(actions),\n }\n )\n )\n net_action = net_action - actions[0] + actions[1]\n\n # Apply the actions\n self.position += net_action\n # Enforce lower and upper limits\n self.position = np.maximum(\n 0, np.minimum(self.n_positions - 1, self.position)\n )\n acted_time = time.time()\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"DEBUG\",\n \"ts\": acted_time,\n \"new_position\": int(self.position),\n }\n )\n )\n\n # Communicate the new position back to the model\n sensors = np.zeros(self.n_positions)\n try:\n sensors[int(self.position)] = 1\n except IndexError:\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"ERROR\",\n \"ts\": acted_time,\n \"msg\": (\n f\"IndexError assigning position {self.position}\"\n + f\"to sensor array of size {sensors.size}\"\n ),\n }\n )\n )\n\n model_sensor_q.put((acted_time, sensors))\n animation_sensor_q.put((acted_time, sensors))\n","repo_name":"brohrer/robot-training-game","sub_path":"world/v03.py","file_name":"v03.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"3130781873","text":"import json\nfrom typing import Callable\n\nfrom eth_account.signers.local import (\n LocalAccount,\n)\nfrom web3 import Web3, HTTPProvider\n\nfrom scripts_py.libs.accounts import AccountsHandler\n\n\nclass W3Obj:\n\n def __init__(self, url: str = \"http://localhost:8545\", *args, **kwargs):\n self.w3 = Web3(HTTPProvider(url))\n self.accounts_handler = AccountsHandler()\n self.set_signer(self.accounts_handler.accounts[0])\n print(\"Connected? \", self.w3.isConnected())\n\n def get_w3(self) -> Web3:\n return self.w3\n\n def get_eoa(self, key: str) -> LocalAccount:\n if not self.accounts_handler.check_pk(key):\n raise ValueError(f\"{key} does not exist\")\n eoa = self.w3.eth.account.privateKeyToAccount(key)\n return eoa\n\n def set_signer(self, key):\n eoa = self.get_eoa(key)\n self.signer = eoa\n\n def _get_nonce(self, signer):\n return self.w3.eth.get_transaction_count(signer.address)\n\n def send_transaction(self, function: Callable, tx_kwargs: list, signer=None):\n signer = signer or self.signer\n try:\n tx = function(*tx_kwargs).buildTransaction({\n 'from': signer.address,\n 'gas': 30000000,\n 'maxFeePerGas': self.w3.toWei('2', 'gwei'),\n 'maxPriorityFeePerGas': self.w3.toWei('1', 'gwei'),\n 'nonce': self._get_nonce(signer),\n })\n signed_tx = signer.sign_transaction(tx)\n tx_hash = self.w3.eth.send_raw_transaction(signed_tx.rawTransaction)\n tx_receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash)\n return tx_receipt\n except Exception as e:\n try:\n print(\"Error:\", e.args[0][\"data\"][\"message\"])\n except:\n print(\"Error: \", str(e))\n return None\n\n\n\n\n","repo_name":"ReadMost/voyage_rauan_hw","sub_path":"scripts_py/libs/w3_basic.py","file_name":"w3_basic.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"35325949965","text":"import re\nimport json\n\nENTITY_PATTERN = re.compile('Q[0-9]+')\nPREDICATE_PATTERN = re.compile('P[0-9]+')\n\nwith open(\"../data/labels_dict.json\") as labelFile:\n labels_dict = json.load(labelFile)\n\ndef is_timestamp(timestamp):\n pattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')\n if not(pattern.match(timestamp)):\n return False\n else:\n return True\n\n\ndef convertTimestamp(timestamp):\n yearPattern = re.compile('^[0-9][0-9][0-9][0-9]-00-00T00:00:00Z')\n monthPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-00T00:00:00Z')\n dayPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')\n timesplits = timestamp.split(\"-\")\n year = timesplits[0]\n if yearPattern.match(timestamp):\n return year\n month = convertMonth(timesplits[1])\n if monthPattern.match(timestamp):\n return month + \" \" + year\n elif dayPattern.match(timestamp):\n day = timesplits[2].rsplit(\"T\")[0]\n return day + \" \" + month + \" \" + year\n\n return timestamp\n\ndef convertMonth(month):\n return{\n \"01\": \"january\",\n \"02\": \"february\",\n \"03\": \"march\",\n \"04\": \"april\",\n \"05\": \"may\",\n \"06\": \"june\",\n \"07\": \"july\",\n \"08\": \"august\",\n \"09\": \"september\",\n \"10\": \"october\",\n \"11\": \"november\",\n \"12\": \"december\"\n }[month]\n\n\ndef get_label(entity):\n label = \"\"\n if entity.startswith(\"Q\") or entity.startswith(\"P\"):\n #for predicates: P10-23, split away counting\n if \"-\" in entity:\n e = entity.split(\"-\") [0]\n else:\n e = entity\n if e in labels_dict.keys():\n label = labels_dict[e]\n else:\n if is_timestamp(entity):\n label = convertTimestamp(entity)\n elif entity.startswith(\"+\"):\n label = entity.split(\"+\")[1]\n else:\n label = entity\n\n return label\n\n\ndef fill_missing_prefixes(prefixes, sparql):\n new_sparql = sparql\n for alias, uri in prefixes.items():\n if sparql.find(alias) != -1 and sparql.find(uri) == -1:\n new_sparql = uri + \" \" + new_sparql\n return new_sparql\n\n# if __name__ == '__main__':\n# sparql = \"SELECT ?obj WHERE { wd:Q567 p:P39 ?s . ?s ps:P39 ?obj . ?s pq:P580 ?x filter(contains(YEAR(?x),'1994')) }\"\n# PREFIXES_WIKIDATA = {\n# \" p:\": \"PREFIX p: \",\n# \"wdt:\": \"PREFIX wdt: \",\n# \"wd:\": \"PREFIX wd: \",\n# \"xsd:\": \"PREFIX xsd: \",\n# \"pq:\": \"PREFIX pq: \",\n# \"ps:\": \"PREFIX ps: \",\n# \"rdfs:\": \"PREFIX rdfs: \"\n# }\n# print(fill_missing_prefixes(PREFIXES_WIKIDATA, sparql))","repo_name":"semantic-systems/seq2sparql-rl","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"20691400395","text":"import torch \nimport numpy as np\nimport json\nfrom bert_seq2seq import Tokenizer, load_chinese_base_vocab\nfrom bert_seq2seq import load_bert\n\n\npredicate2id, id2predicate = {}, {}\nwith open('../state_dict/all_50_schemas', encoding=\"utf-8\") as f:\n for l in f:\n l = json.loads(l)\n if l['predicate'] not in predicate2id:\n id2predicate[len(predicate2id)] = l['predicate']\n predicate2id[l['predicate']] = len(predicate2id)\n\ndef search(pattern, sequence):\n \"\"\"从sequence中寻找子串pattern\n 如果找到,返回第一个下标;否则返回-1。\n \"\"\"\n n = len(pattern)\n for i in range(len(sequence)):\n if sequence[i:i + n] == pattern:\n return i\n return -1\n\ndef search_subject(token_ids, subject_labels, idx2word):\n # subject_labels: (lens, 2)\n if type(subject_labels) is torch.Tensor:\n subject_labels = subject_labels.numpy()\n if type(token_ids) is torch.Tensor:\n token_ids = token_ids.cpu().numpy()\n subjects = []\n subject_ids = []\n start = -1\n end = -1\n for i in range(len(token_ids)):\n if subject_labels[i, 0] > 0.5:\n start = i\n for j in range(len(token_ids)):\n if subject_labels[j, 1] > 0.5:\n subject_labels[j, 1] = 0\n end = j\n break\n if start == -1 or end == -1:\n continue\n subject = \"\"\n for k in range(start, end + 1):\n subject += idx2word[token_ids[k]]\n # print(subject)\n subject_ids.append([start, end])\n start = -1\n end = -1\n subjects.append(subject)\n\n return subjects, subject_ids\n\ndef search_object(token_ids, object_labels, idx2word):\n objects = []\n if type(object_labels) is torch.Tensor:\n object_labels = object_labels.numpy()\n if type(token_ids) is torch.Tensor:\n token_ids = token_ids.cpu().numpy()\n start = np.where(object_labels[:, :, 0] > 0.5)\n end = np.where(object_labels[:, :, 1] > 0.5)\n for _start, predicate1 in zip(*start):\n for _end, predicate2 in zip(*end):\n if _start <= _end and predicate1 == predicate2:\n object_text = \"\"\n for k in range(_start, _end + 1):\n # print(token_ids(k))\n object_text += idx2word[token_ids[k]]\n objects.append(\n (id2predicate[predicate1], object_text)\n )\n break \n \n return objects\n\ndef relation_extract(model, text, word2idx, tokenizer, device=\"cpu\"):\n idx2word = {v: k for k , v in word2idx.items()}\n with torch.no_grad():\n token_ids_test, segment_ids = tokenizer.encode(text, max_length=256)\n token_ids_test = torch.tensor(token_ids_test, device=device).view(1, -1)\n # 先预测subject\n pred_subject = model.predict_subject(token_ids_test)\n pred_subject = pred_subject.squeeze(0)\n subject_texts, subject_idss = search_subject(token_ids_test[0], pred_subject.cpu(), idx2word)\n if len(subject_texts) == 0:\n return \"没有预测出任何信息\"\n result_info = \"\"\n for sub_text, sub_ids in zip(subject_texts, subject_idss):\n result_info += \"s is \" + str(sub_text) + \"\\n\"\n sub_ids = torch.tensor(sub_ids, device=device).view(1, -1)\n # print(\"sub_ids shape is \" + str(sub_ids))\n object_p_pred = model.predict_object_predicate(token_ids_test, sub_ids)\n res = search_object(token_ids_test[0], object_p_pred.squeeze(0).cpu(), idx2word)\n result_info += \"p and o is \" + str(res) + \"\\n\"\n return result_info\n\n\n\n\n\n","repo_name":"920232796/NLP_flask","sub_path":"nlp_api/test/relation_extract_test.py","file_name":"relation_extract_test.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"}
+{"seq_id":"8662664651","text":"import unittest.mock\n\nimport pytest\n\nimport automation.package_test.test\nimport tests.conftest\nfrom mlrun.utils import logger\n\n\ndef test_test_requirements_vulnerabilities():\n package_tester = automation.package_test.test.PackageTester()\n cases = [\n {\n \"output\": \"\"\"\n[\n [\n \"fastapi\",\n \"<0.75.2\",\n \"0.67.0\",\n \"Fastapi 0.75.2 updates its dependency 'ujson' ranges to include a security fix.\",\n \"48159\",\n null,\n null\n ]\n]\"\"\",\n \"expected_to_fail\": True,\n },\n {\n \"output_file\": tests.conftest.tests_root_directory\n / \"automation\"\n / \"package_test\"\n / \"assets\"\n / \"ignored_vulnerabilities.json\",\n },\n {\n \"output\": \"\",\n },\n ]\n for case in cases:\n logger.info(\"Testing case\", case=case)\n\n def _run_command_mock(command, *args, **kwargs):\n # _test_requirements_vulnerabilities flow is running two commands:\n # 1. pip install safety - we don't care about it, so simply return success\n # 2. safety check --json - this is the actual one we want to mock the output for\n if command == \"pip install safety\":\n return 0, \"\", \"\"\n elif command == \"safety check --json\":\n if case.get(\"output_file\"):\n with open(case[\"output_file\"]) as file:\n output = file.readlines()\n output = \"\".join(output)\n else:\n output = case.get(\"output\")\n code = 255 if output else 0\n return code, output, \"\"\n else:\n raise NotImplementedError(f\"Got unexpected command: {command}\")\n\n package_tester._run_command = unittest.mock.Mock(side_effect=_run_command_mock)\n if case.get(\"expected_to_fail\"):\n with pytest.raises(AssertionError, match=\"Found vulnerable requirements\"):\n package_tester._test_requirements_vulnerabilities(\"some-extra\")\n else:\n package_tester._test_requirements_vulnerabilities(\"some-extra\")\n","repo_name":"Hedingber/mlrun","sub_path":"tests/automation/package_test/test_package_test.py","file_name":"test_package_test.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"22228447713","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport uuid\nimport re\nimport json\nimport time\nimport random\nimport calendar\nimport execjs\nimport scrapy\nimport logging\nimport requests\n\nfrom scrapy import signals\nfrom twisted.internet import reactor, defer, task\nfrom twisted.names import client\nfrom twisted.internet import task\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.web._newclient import ResponseNeverReceived\nfrom twisted.internet.error import DNSLookupError, ConnectionLost, TimeoutError, TCPTimedOutError, ConnectionRefusedError\nfrom scrapy.http import Response, TextResponse\n\nfrom wenshu import jshelper\nfrom wenshu.items import TaskItem, DocItem\nfrom wenshu.exceptions import WafVerifyError, RemindKeyError, RemindError, Vl5xTimeoutError\n\nfrom netifaces import AF_INET\nimport netifaces as ni\n\nlogger = logging.getLogger(__name__)\n\nclass ListSpider(scrapy.Spider):\n\tname = 'list'\n\tallowed_domains = ['court.gov.cn']\n\tcustom_settings = {\n\t\t'CONCURRENT_REQUESTS': 1024,\n\t\t'RETRY_ENABLED': True,\n\t\t'MAX_RETRY_TIMES': 3,\n\t\t'DOWNLOAD_TIMEOUT': 30,\n\t\t'DOWNLOAD_DELAY': 0,\n\t\t'CONCURRENT_REQUESTS_PER_DOMAIN': 256,\n\t\t'CONCURRENT_REQUESTS_PER_IP': 256\n\t}\n\tstart_urls = []\n\n\thandle_httpstatus_all = True\n\n\tHOME_URL = 'http://wenshu.court.gov.cn'\n\tSTART_URL = 'http://wenshu.court.gov.cn/List/List?sorttype=1&conditions=searchWord+2+AJLX++案件类型:民事案件'\n\tLIST_CONTENT_URL = 'http://wenshu.court.gov.cn/List/ListContent'\n\tCODE_URL = 'http://wenshu.court.gov.cn/ValiCode/GetCode'\n\n\tCAPTCHA_URL = 'http://wenshu.court.gov.cn/User/ValidateCode/'\n\tCAPTCHA_VALIDATE_URL = 'http://wenshu.court.gov.cn/Content/CheckVisitCode'\n\n\tCAPTCHA_SOLVE_URL = 'http://localhost:5000/solve'\n\tF80COOKIES_URL = 'http://localhost:3000/f80Cookies'\n\n\tCONCURRENT_SESSIONS_PER_IP = 64\n\n\tWAF_DELAY = 310\n\n\tCHANGE_IP_ENABLED = False\n\n\tSTATS_INTERVAL = 60\n\n\tDEBUG_TASK_IDS = []\n\n\tlast_task = {}\n\n\t# available_proxies = {}\n\t# used_proxies = {}\n\twenshu_servers = ['61.160.224.60']\n\n\tstats = {\n\t\t'start': time.time(),\n\t\t'speed': 0,\n\t\t'total': 0,\n\t\t'_count_queue': [],\n\t\t'_last_scraped_count': {'time': time.time(), 'count': 0}\n\t}\n\n\tdef start_requests(self):\n\t\tself._init_stats_task()\n\n\t\tself.wenshu_servers = self.crawler.settings.get('WENSHU_SERVERS', [])\n\t\tfor request in self.CdnRequests():\n\t\t\tyield request\n\n\tdef CdnRequests(self):\n\t\trequests = []\n\t\tfor i in range(0, len(self.wenshu_servers) if self.wenshu_servers else 1):\n\t\t\tfor count in range(0, self.CONCURRENT_SESSIONS_PER_IP):\n\t\t\t\trequest = self.ListRequest()\n\t\t\t\trequest.meta['delay_request'] = random.random() * self.CONCURRENT_SESSIONS_PER_IP\n\t\t\t\tif self.wenshu_servers:\n\t\t\t\t\trequest.meta['ip_addr'] = self.wenshu_servers[i]\n\t\t\t\trequests.append(request)\n\t\treturn requests\n\n\tdef ListRequest(self):\n\t\tf80s = jshelper.f80sCookie()\n\t\tf80t = jshelper.f80tCookie()\n\t\treturn scrapy.Request(url = self.START_URL, headers = {'Referer': 'http://wenshu.court.gov.cn'}, cookies = {'FSSBBIl1UgzbN7N80T': f80t, 'FSSBBIl1UgzbN7N80S': f80s}, callback = self.parse_list, errback = self.other_error, dont_filter = True, meta = {'dont_delay': True})\n\n\tdef NumberRequest(self):\n\t\tpost_data = {'guid': self.create_guid()}\n\t\treturn scrapy.FormRequest(url = self.CODE_URL, formdata = post_data, headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_number, errback = self.other_error)\n\n\tdef parse(self, response):\n\t\traise Exception('Unkown request!')\n\n\tdef ListContentRequest(self, response, task = None):\n\t\tsession = response.request.meta.get('session', None)\n\n\t\tif task is None:\n\t\t\ttask = self.last_task = self.task_pipeline.next_list_task()\n\n\t\tif not task:\n\t\t\tlogger.info('No more task')\n\t\t\treturn\n\n\t\tpage = task.get('page', 1)\n\t\tpost_data = {\n\t\t\t'Param': self._task_to_post_param(task),\n\t\t\t'Index': str(page if page else 1),\n\t\t\t'Page': \"10\",\n\t\t\t'Order': '法院层级',\n\t\t\t'Direction': 'asc',\n\t\t\t'vl5x': session.get('vl5x', ''),\n\t\t\t'number': session.get('number', ''),\n\t\t\t'guid': self.create_guid()\n\t\t}\n\n\t\tf80s = jshelper.f80sCookie()\n\t\tf80t = jshelper.f80tCookie()\n\n\t\trequest = scrapy.FormRequest(url = self.LIST_CONTENT_URL, formdata = post_data, headers = {'Referer': 'http://wenshu.court.gov.cn'}, cookies = {'FSSBBIl1UgzbN7N80T': f80t, 'FSSBBIl1UgzbN7N80S': f80s}, callback = self.list_request_loop, errback = self.other_error, meta = {'task': task, 'param': post_data})\n\t\tlogger.debug('Processing task: Param:{};Index:{}'.format(post_data['Param'], post_data['Index']))\n\n\t\tif task.get('task_id') == -1:\n\t\t\trequest.meta['delay_request'] = 10\n\t\t\tsession['no_task_sleep_count'] = session.get('no_task_sleep_count', 0) + 1\n\t\t\tif session.get('no_task_sleep_count', 0) > 5:\n\t\t\t\traise scrapy.exceptions.CloseSpider('Finished because no more tasks!')\n\t\t\telse:\n\t\t\t\tlogger.info('Wait 10 secs for new task...')\n\n\t\treturn request\n\n\tdef list_request_loop(self, response):\n\t\ttask = response.request.meta.get('task', None)\n\t\tif len(response.text) == 0: #retry\n\t\t\tlogger.debug('response is empty, retry for task ' + str(task.get('task_id')))\n\t\t\tyield self.ListContentRequest(response, task)\n\t\t\treturn\n\t\ttry:\n\t\t\tjson_string = eval(response.text)\n\t\t\tjson_string = json_string.replace('\\r', '').replace('\\n', '').replace('\\t', '').replace('\\\\\\\",\\\"案件类型\\\"', '\\\",\\\"案件类型\\\"').replace('0\\\"},]', '0\\\"}]')\n\n\t\t\tdata = json.loads(json_string)\n\n\t\t\trunEval = data[0].get('RunEval', '')\n\n\t\t\tdoc_count = int(data[0].get('Count', '0'))\n\t\t\ttask['doc_count'] = doc_count\n\t\t\t\n\t\t\tlogger.debug('Scraped task id = {}, total count: {}'.format(task.get('task_id', 0), doc_count))\n\n\t\t\tdocs = []\n\t\t\tif len(data) > 1:\n\t\t\t\tfor item in data[1:]:\n\t\t\t\t\tdoc = DocItem()\n\t\t\t\t\tdoc_id = item.get('文书ID', '')\n\t\t\t\t\t# doc_id = jshelper.decryptDocID(runEval, doc_id)\n\t\t\t\t\tdoc['doc_id'] = doc_id\n\t\t\t\t\tdoc['status'] = 0\n\t\t\t\t\tdoc['case_name'] = item.get('案件名称', '')\n\t\t\t\t\tdoc['case_no'] = item.get('案号', '')\n\t\t\t\t\tdoc['case_type'] = item.get('案件类型', '')\n\t\t\t\t\tdoc['court_name'] = item.get('法院名称', '')\n\t\t\t\t\tdoc['trial_date'] = item.get('裁判日期', '')\n\t\t\t\t\tdoc['trial_summary'] = item.get('裁判要旨段原文', '')\n\t\t\t\t\tdocs.append(doc)\n\n\t\t\t\tdoc_ids = jshelper.decryptDocIDs(runEval, list(map(lambda doc: doc['doc_id'], docs)))\n\t\t\t\tif not (len(doc_ids) == len(docs)):\n\t\t\t\t\traise Exception('Error: doc_ids length not equals to docs length')\n\t\t\t\tfor i in range(0, len(docs)):\n\t\t\t\t\tdocs[i]['doc_id'] = doc_ids[i]\n\t\t\t\t\t\n\t\t\tif len(docs) > 1:\n\t\t\t\tself.doc_pipeline.save_docs(docs)\n\t\t\t\n\t\t\ttask['fails'] = 0\n\n\t\t\tself.crawler.stats.inc_value('docid_scraped_count', count = len(docs) - 1, spider=self)\n\t\t\t\t\t\n\t\texcept Exception as e:\n\n\t\t\ttask['fails'] = task.get('fails', 0) + 1\n\t\t\tlogger.error('Parse list response error\\n%(error)s\\nrepsone code:%(status)d\\nrequest task:\\n%(task)s\\nrequest param:\\n%(param)s\\nresponse text:\\n%(text)s', {'error': e, 'status': response.status, 'task': task, 'text': response.text, 'param': response.request.meta.get('param')}, exc_info = True, extra = {'response': response})\n\n\t\tfinally:\n\n\t\t\tfails = task.get('fails', 0) > 0\n\t\t\tif fails:\n\t\t\t\ttask['status'] = 0\n\t\t\telif task.get('status', 0) == -1:\n\t\t\t\ttask['status'] = 1\n\n\t\t\tself.task_pipeline.update(task)\n\t\t\tyield self.ListContentRequest(response)\n\n\tdef parse_list(self, response):\n\t\tsession = response.request.meta.get('session', None)\n\t\tcookies = session.get('cookies', None)\n\n\t\tset_cookies = response.headers.getlist('Set-Cookie')\n\t\tif set_cookies and len(set_cookies) > 0:\n\t\t\tfor cookie in set_cookies:\n\t\t\t\tname_value = cookie.decode().split(';')[0].split('=')\n\t\t\t\tif name_value[0] == 'vjkl5':\n\t\t\t\t\tvjkl5 = name_value[1]\n\t\t\t\t\tvl5x = jshelper.getKey(vjkl5)\n\t\t\t\t\tsession['vjkl5'] = vjkl5\n\t\t\t\t\tsession['vl5x'] = vl5x\n\t\t\t\t\tsession['vl5x_time'] = time.time()\n\n\t\tvl5x = session.get('vl5x', None)\n\t\tif vl5x:\n\t\t\tyield self.NumberRequest()\n\t\telse:\n\t\t\tyield self.ListRequest()\n\n\tdef parse_number(self, response):\n\t\tif len(response.text) >= 4 and len(response.text) < 40:\n\t\t\tself.last_number_time = time.time()\n\t\t\tnumber = response.text\n\t\t\tsession = response.request.meta.get('session', None)\n\t\t\tsession['number'] = number\n\t\t\tsession['number_time'] = time.time()\n\t\t\tlogger.debug('Success get code: {}'.format(response.text))\n\t\t\tyield self.ListContentRequest(response)\n\t\telse:\n\t\t\tlogger.debug('Failed get code, retrying...')\n\t\t\tyield self.NumberRequest()\n\n\tdef other_error(self, failure):\n\t\ttask = failure.request.meta.get('task', None)\n\t\tif task:\n\t\t\tif task.get('status', 0) == -1:\n\t\t\t\ttask['status'] = 0\n\t\t\tself.task_pipeline.update(task)\n\n\t\tif not (failure.check(Vl5xTimeoutError) or task):\n\t\t\tlogger.error('%s:%s', repr(failure), failure.request.url)\n\n\t\t#middleware will not handle errback output, so call middleware method here, ref:\n\t\t# FIXME: don't ignore errors in spider middleware, in scraper.py\n\t\trequest = self.ListRequest()\n\t\trequest = self.session_ware.process_output_request(request, failure, self)\n\t\trequest.meta['delay_request'] = 0.5\n\n\t\tif failure.check(TimeoutError, ResponseNeverReceived, ConnectionRefusedError, WafVerifyError):\n\t\t\tlogger.error('%s:%s', repr(failure), failure.request.url)\n\t\t\t# request.meta['delay_request'] = self.WAF_DELAY\n\n\t\tyield request\n\t\n\n\tdef _init_stats_task(self):\n\t\tself.prev_docid_scraped_count = 0\n\t\tself.multiplier = 60.0 / self.STATS_INTERVAL\n\t\tself.stats_task = task.LoopingCall(self._log)\n\t\tself.stats_task.start(self.STATS_INTERVAL)\n\n\tdef _log(self):\n\t\tdocid_scraped_count = self.crawler.stats.get_value('docid_scraped_count', 0)\n\t\tdocrate = (docid_scraped_count - self.prev_docid_scraped_count) * self.multiplier\n\t\tself.prev_docid_scraped_count = docid_scraped_count\n\t\tmsg = 'Last task date: %(year)s-%(month)s-%(day)s, Crawled %(docs)d docs (at %(docrate)d docs/min)'\n\t\tlog_args = {'year': self.last_task.get('year', 0), 'month': self.last_task.get('month', 0), 'day': self.last_task.get('day', 0), 'docs': docid_scraped_count, 'docrate': docrate}\n\t\tlogger.info(msg, log_args)\n\n\t@classmethod\n\tdef from_crawler(cls, crawler, *args, **kwargs):\n\t\tspider = super(ListSpider, cls).from_crawler(crawler, *args, **kwargs)\n\t\tcrawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)\n\t\treturn spider\n\n\tdef spider_closed(self, spider):\n\t\tjshelper.free()\n\n\tdef create_guid(self):\n\t\tc = lambda: format(int((1 + random.random()) * 65536), 'x')[1:]\n\t\treturn c() + c() + '-' + c() + '-' + c() + c() + '-' + c() + c() + c()\n\n\t#Param: 法院层级:基层法院,案件类型:民事案件,审判程序:一审,文书类型:判决书,裁判日期:2018-12-11 TO 2018-12-11,法院地域:北京市\n\tdef _task_to_post_param(self, task):\n\t\tself.TASK_PARAM_KEY_MAP = {\n\t\t\t'trial_date': '裁判日期',\n\t\t\t'court_area': '法院地域',\n\t\t\t'middle_court': '中级法院',\n\t\t\t'basic_court': '基层法院',\n\t\t\t'doc_type': '文书类型',\n\t\t}\n\t\t\n\t\tday = task.get('day', None)\n\t\tmonth = task.get('month', None)\n\t\tyear = task.get('year', None)\n\t\tstart_day = day\n\t\tend_day = day\n\t\tstart_month = month\n\t\tend_month = month\n\n\t\tif day is None:\n\t\t\tstart_day = 1\n\t\t\tif month is None:\n\t\t\t\tstart_month = 1\n\t\t\t\tend_month = 12\n\t\t\t\tend_day = 31\n\t\t\telse:\n\t\t\t\tend_day = calendar.monthrange(year, end_month)[1]\n\n\t\td = {}\n\n\t\tfor key in self.TASK_PARAM_KEY_MAP.keys():\n\t\t\tvalue = task.get(key, None)\n\t\t\tif key == 'trial_date':\n\t\t\t\td[self.TASK_PARAM_KEY_MAP[key]] = '{}-{}-{} TO {}-{}-{}'.format(year, '{:02d}'.format(start_month), '{:02d}'.format(start_day), year, '{:02d}'.format(end_month), '{:02d}'.format(end_day))\n\t\t\telif key == 'court_area' and value == '最高人民法院':\n\t\t\t\td['法院层级'] = '最高法院'\n\t\t\telif value is None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif len(value) > 0:\n\t\t\t\t\td[self.TASK_PARAM_KEY_MAP[key]] = value\n\n\t\tcourt_level = task.get('court_level', None)\n\t\tif not (court_level is None):\n\t\t\td['法院层级'] = court_level\n\n\t\tparam = []\n\t\tfor key in d.keys():\n\t\t\tparam.append('{}:{}'.format(key, d[key]))\n\n\t\treturn ','.join(param)\n\n\t# def CaptChaRequest(self):\n\t# \treturn scrapy.Request(url = self.CAPTCHA_URL + str(random.randint(1, 9999)), headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_captcha, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def CaptChaValidateRequest(self, captcha_code):\n\t# \treturn scrapy.FormRequest(url = self.CAPTCHA_VALIDATE_URL, formdata = {'ValidateCode': captcha_code}, headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_captcha_validate, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def CaptChaSovleRequest(self, prepped):\n\t# \treturn scrapy.Request(url = self.CAPTCHA_SOLVE_URL, method = 'POST', headers = prepped.headers, body = prepped.body, callback = self.parse_captcha_solve, errback = self.other_error, meta = {'dont_delay': True})\n\n\n\t# def parse_captcha(self, response):\n\t# \tif isinstance(response, Response) and response.status == 200:\n\t# \t\tfiles = {'captcha_image': response.body}\n\t# \t\tprepped = requests.Request('POST', self.CAPTCHA_SOLVE_URL, files=files).prepare()\n\t# \t\tyield self.CaptChaSovleRequest(prepped)\n\t# \telse:\n\t# \t\tlogger.info('Failed to get captcha, retrying...')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def parse_captcha_solve(self, response):\n\t# \tif len(response.text) > 0:\n\t# \t\tyield self.CaptChaValidateRequest(response.text)\n\t# \telse:\n\t# \t\tlogger.error('Not recognize the captcha')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def parse_captcha_validate(self, response):\n\t# \tif response.text == '1':\n\t# \t\tlogger.info('Success solve captcha!')\n\t# \t\tsession = response.request.meta.get('session', None)\n\t# \t\tyield self.ListRequest()\n\t# \telse:\n\t# \t\tlogger.info('Retry solve captcha...')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def F80ForListRequest(self):\n\t# \treturn scrapy.Request(url = self.F80COOKIES_URL, callback = self.parse_f80_for_list, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def F80ForListContentRequest(self):\n\t# \treturn scrapy.Request(url = self.F80COOKIES_URL, callback = self.parse_f80_for_list_content, errback = self.other_error, meta = {'dont_delay': True})\n\n\n\t# def parse_f80_for_list(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tf80cookies = json.loads(response.text)\n\t# \tsession['f80s'] = f80cookies['f80s']\n\t# \tsession['f80t'] = f80cookies['f80t']\n\t# \tyield self.ListRequest(response)\n\n\t# def parse_f80_for_list_content(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tf80cookies = json.loads(response.text)\n\t# \tsession['f80s'] = f80cookies['f80s']\n\t# \tsession['f80t'] = f80cookies['f80t']\n\t# \tyield self.ListContentRequest(response)\n\n\n\t# def HomeRequest(self):\n\t# \treturn scrapy.Request(url = self.HOME_URL, callback = self.parse_home, errback = self.other_error, dont_filter = True, meta = {'dont_delay': True})\n\n\t# def parse_home(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tmeta = response.css('#9DhefwqGPrzGxEp9hPaoag::attr(content)').extract_first()\n\t# \tsession['meta'] = meta\n\n\t# \tywtu = jshelper.getYWTU(meta)\n\t# \tsession['ywtu'] = ywtu\n\n\t# \tset_cookies = response.headers.getlist('Set-Cookie')\n\t# \tif set_cookies and len(set_cookies) > 0:\n\t# \t\tfor cookie in set_cookies:\n\t# \t\t\tname_value = cookie.decode().split(';')[0].split('=')\n\t# \t\t\tif name_value[0] in ['FSSBBIl1UgzbN7N80S', 'FSSBBIl1UgzbN7N80T']:\n\t# \t\t\t\tsession[name_value[0]] = name_value[1]\n\n\t# \t\tyield self.ListRequest(response)\n\n\t# def parse_server_list(self, response):\n\t# \tif isinstance(response, Response) and response.status == 200:\n\t# \t\tprint(response.text)\n\t# \t\tresult = json.loads(response.text)\n\t# \t\tif result and result.get('status', False):\n\t# \t\t\tservers = result.get('data', {})\n\t# \t\t\tself.wenshu_servers = [s.get('ip') for s in servers]\n\t# \t\t\tlogger.debug('Success to get server list: {}'.format(self.wenshu_servers))\n\t# \t\t\treturn self.ListRequests(response)\n\t# \telse:\n\t# \t\tlogger.error('%s:%s', repr(response), response.request.url)\n\n\t# \traise scrapy.exceptions.CloseSpider('Error: can not get wenshu server ips')\n\n\n\t# def change_ip_address(self):\n\t# \tlogger.info('Now will renew pppoe ip addr, please wait...')\n\n\t# \twhile True:\n\t# \t\tlast_ip_addr = ni.ifaddresses('ppp0')[AF_INET][0]['addr']\n\t# \t\tos.system('osascript ' + self.settings.get('PROJECT_ROOT') + '/scripts/renewip.scpt')\n\t# \t\ttime.sleep(0.5)\n\t# \t\tnew_ip_addr = ni.ifaddresses('ppp0')[AF_INET][0]['addr']\n\t# \t\tif new_ip_addr != last_ip_addr:\n\t# \t\t\tbreak\n\t# \tlogger.info('Successed change ip!')\n\n\n\t# def load_available_proxies(self):\n\t# \tproxies = self.proxy_pipline.available_proxies()\n\t# \tfor proxy in proxies:\n\t# \t\tif (not proxy['ip'] in self.available_proxies.keys()) and (not proxy['ip'] in self.used_proxies.keys()):\n\t# \t\t\tself.available_proxies[proxy['ip']] = proxy\n\n\t# def next_available_proxy(self):\n\t# \tfor ip in self.available_proxies.keys():\n\t# \t\tif not ip in self.used_proxies.keys():\n\t# \t\t\tproxy = self.available_proxies.pop(ip)\n\t# \t\t\tself.used_proxies[proxy['ip']] = proxy\n\t# \t\t\treturn proxy","repo_name":"sparkwj/wenshu","sub_path":"wenshu/spiders/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":16790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"74943893265","text":"from copy import deepcopy\nfrom typing import List\n\n\ndef add_padding(img: List[List[str]], size: int, val: str):\n for _ in range(size):\n img.insert(0, [val] * len(img[0]))\n img.append([val] * len(img[0]))\n for i in range(len(img)):\n img[i].insert(0, val)\n img[i].append(val)\n\n\ndef solve(enh: str, img: List[List[str]], steps: int):\n add_padding(img, 2, '.')\n for _ in range(steps):\n border = img[0][0]\n add_padding(img, 1, border)\n new_img = deepcopy(img)\n for i in range(len(new_img[0])):\n if border == '.':\n new_img[0][i] = enh[0]\n new_img[-1][i] = enh[0]\n else:\n new_img[0][i] = enh[-1]\n new_img[-1][i] = enh[-1]\n\n for i in range(len(new_img)):\n if border == '.':\n new_img[i][0] = enh[0]\n new_img[i][-1] = enh[0]\n else:\n new_img[i][0] = enh[-1]\n new_img[i][-1] = enh[-1]\n\n for i in range(1, len(new_img) - 1):\n for j in range(1, len(new_img[0]) - 1):\n square = img[i - 1][j - 1:j + 2] + img[i][j - 1:j + 2] + img[i + 1][j - 1:j + 2]\n bin_str = ''\n for e in square:\n bin_str += '0' if e == '.' else '1'\n idx = int(bin_str, 2)\n new_img[i][j] = enh[idx]\n img = new_img\n\n count = 0\n for row in img:\n count += row.count('#')\n print(count)\n\n\nif __name__ == '__main__':\n with open('test.txt', 'r') as file:\n input_lines = file.readlines()\n input_lines = [line.replace('\\r\\n', '').replace('\\n', '') for line in input_lines]\n\n enhancer = input_lines[0]\n\n image = []\n for ii in range(2, len(input_lines)):\n image.append(list(input_lines[ii]))\n\n solve(enhancer, image, 50)\n","repo_name":"gumbernator/Advent-of-Code-2021","sub_path":"day20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"13616302427","text":"\"\"\"The manifold_utils module provides interfaces for manifold learning and dimensionality reduction.\n \n\"\"\"\n__author__ = (\"Bernhard Lehner \")\n\n\nfrom sklearn.manifold import TSNE\n\n\ndef tsne_embedding(X, n_dim=2, perplexity=3):\n tsne = TSNE(n_components=n_dim,\n init='random',\n perplexity=perplexity,\n learning_rate='auto')\n X_embedded = tsne.fit_transform(X)\n \n return X_embedded","repo_name":"berni-lehner/structural_health_monitoring","sub_path":"src/manifold_utils.py","file_name":"manifold_utils.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"14102457909","text":"__author__ = \"Frank Shen\"\n\n\n# 递归实现\ndef quick_sort(seq):\n if len(seq) < 2:\n return seq\n else:\n pivot = seq[0]\n left = [elem for elem in seq[1:] if elem <= pivot]\n right = [elem for elem in seq[1:] if elem > pivot]\n return quick_sort(left) + [pivot] + quick_sort(right)\n\n\ndef test_quick_sort():\n import random\n ll = list(range(10))\n for i in range(10):\n random.shuffle(ll)\n assert quick_sort(ll) == list(range(10))\n\n\ntest_quick_sort()\n\n\n# def test_quick_sort():\n# import random\n# seq = list(range(10))\n# random.shuffle(seq)\n# print(quick_sort(seq))\n#\n#\n# test_quick_sort()\n#\n# def partition(array, beg, end):\n# pivot_index = beg\n# pivot = array[pivot_index]\n# left = pivot_index + 1\n# right = end - 1\n# while True:\n# while left <= right and array[left] < pivot:\n# left += 1\n#\n# while right >= left and array[right] >= pivot:\n# right -= 1\n# if left > right:\n# break\n# else:\n# array[left], array[right] = array[right], array[left]\n# array[pivot_index], array[right] = array[right], array[pivot_index]\n# return right\n#\n#\n# def quick_sort_inplace(array, beg, end):\n# if beg < end:\n# pivot = par\n","repo_name":"Frankssss/DataStructure-Algorithm","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10727955124","text":"from django.http import JsonResponse\nfrom django.views.generic import DetailView,TemplateView\nfrom plan.views import PlanDetailView,PlanView\nfrom django.views.generic.list import ListView\nfrom django.views.generic import DetailView\nfrom django.db.models import Q,Max\n\nfrom homepage.utils import get_context_obj\nfrom .models import FixedInternet\nfrom .forms import FixedInternetForm\nfrom .utils import donut_calculation\n\n\nclass FixedInternetHome(TemplateView):\n template_name = 'fixed_internet/fixed_internet_home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form_class'] = FixedInternetForm()\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n return context\n\n\nclass FixedInternetList(PlanView):\n model = FixedInternet\n context_object_name = 'plans'\n template_name = 'fixed_internet/fixed_internet_listing.html'\n page_template = 'fixed_internet/fixed_internet_listing_template.html'\n\n def __init__(self):\n self.form_class = None\n\n super(FixedInternetList, self).__init__()\n\n def get_queryset(self):\n sort = self.request.GET.get(\"order_by\")\n\n data_range = self.request.GET.get('data')\n price_range = self.request.GET.get('price_range')\n selected_network = self.request.GET.get('selected_network')\n try:\n country = self.request.session.get('country')\n except:\n country = \"SA\"\n #\n qs = FixedInternet.active_fixed_internet.all()\n qs = qs.filter(country__country_code=country)\n\n\n if data_range:\n qs = qs.filter(Q(upload_speed__gte=int(data_range)))\n # #\n if price_range:\n qs = qs.filter(Q(monthly_fee__gte=int(price_range)))\n # #\n # if selected_network:\n #\n # qs = qs.filter(operator_id__operator=selected_network)\n # #\n # if sort:\n # qs = qs.order_by(sort)\n\n self.form_class = FixedInternetForm(self.request.GET, qs=qs,session = self.request.session)\n return qs\n\n\n def get(self, request, *args, **kwagrs):\n\n return super(FixedInternetList, self).get(request, *args, **kwagrs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n context['form_class'] = self.form_class\n return donut_calculation(context)\n\n\nclass FixedInternetDetail(DetailView):\n template_name = 'fixed_internet/fixed_internet_detail.html'\n model = FixedInternet\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n return context\n\n\ndef fixed_count(request):\n \"\"\"\n :param request: only_sim=on&phone_name=samsung1&phone_media=&phone_slug=&data=0&minutes=21&messages=0&\n selected_network=\n :return: count of plan\n \"\"\"\n\n data = request.GET.get('data', None)\n\n selected_network = request.GET.get('selected_network', None)\n try:\n country = request.session.get('country')\n except:\n country = \"SA\"\n\n qs = FixedInternet.active_fixed_internet.all()\n qs = qs.filter(country__country_code=country)\n\n if selected_network:\n qs = qs.filter(operator_id__operator=selected_network)\n\n if data:\n qs = qs.filter(Q(download_speed__gte=int(data)))\n\n return JsonResponse(qs.count(), status=200, safe=False)","repo_name":"Aravindhan-M/first_project","sub_path":"fixed_internet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"13755101045","text":"import sys\nimport heapq\nfrom collections import deque\n\n\ndef solve():\n input = sys.stdin.readline\n\n N, K = map(int, input().split())\n\n jewel = [list(map(int, input().split())) for _ in range(N)]\n bag = [int(input()) for _ in range(K)]\n\n # 1. 보석 무게 순 정렬\n jewel = deque(sorted(jewel))\n # 2. 가방 무게 순 정렬\n bag.sort()\n # 3. 이미 들어갈 수 있는 무게라고 확인한 값들 중에 최대인 값 힙으로 저장\n max_heap = []\n\n ans = 0\n for i in range(K):\n while jewel:\n jewel_weight, jewel_value = jewel[0]\n if (bag[i] >= jewel_weight):\n heapq.heappush(max_heap, -jewel_value) # 최대 순으로 힙에 저장\n jewel.popleft()\n else:\n break\n if max_heap:\n ans += -(heapq.heappop(max_heap)) # 넣을 수 있는 값 중 최대 값 넣어줌\n print(ans)\n\n\nsolve()\n","repo_name":"Daejjyu/Algorithm","sub_path":"Jungle/Week4_Dp, Greedy/00_exam_3_1202_보석 도둑.py","file_name":"00_exam_3_1202_보석 도둑.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"38296148441","text":"from tkinter import *\nfrom tkinter import ttk\nimport time\nfrom tkinter import filedialog\nfrom typing import Literal\nimport json\n\n# from Tkinter import Widget\n# Widget._nametowidget(parent)\n\nfile = open('E:\\\\1.Python_f\\\\Glossory\\\\Tools_main_file\\\\tdlist\\\\info.json', 'a')\nroot = Tk()\nroot.geometry(\"500x500\")\n#Collapsible pane\nclass Cp(ttk.Frame):\n \"\"\"\n -----USAGE-----\n collapsiblePane = CollapsiblePane(parent,\n expanded_text =[string],\n collapsed_text =[string])\n\n collapsiblePane.pack()\n button = Button(collapsiblePane.frame).pack()\n \"\"\"\n\n def __init__(self, parent, expanded_text =\"Collapse <<\",\n collapsed_text =\"Expand >>\", expanding = False, name='cp'):\n\n ttk.Frame.__init__(self, parent, name=name)\n\n # These are the class variable\n # see a underscore in expanded_text and _collapsed_text\n # this means these are private to class\n self.parent = parent\n self.expanded_text = expanded_text\n self.collapsed_text = collapsed_text\n\n # Here weight implies that it can grow it's\n # size if extra space is available\n # default weight is 0\n self.columnconfigure(1, weight = 1)\n\n # Tkinter variable storing integer value\n self.variable = IntVar()\n\n # Checkbutton is created but will behave as Button\n # cause in style, Button is passed\n # main reason to do this is Button do not support\n # variable option but checkbutton do\n self.button = ttk.Checkbutton(self, variable = self.variable,\n command = self.activate, style =\"TButton\")\n self.button.grid(row = 0, column = 0)\n\n # This wil create a separator\n # A separator is a line, we can also set thickness\n self.separator = ttk.Separator(self, orient =\"horizontal\")\n self.separator.grid(row = 0, column = 1, sticky =\"we\")\n\n self.frame = ttk.Frame(self, name='special')\n\n # This will call activate function of class\n self.activate()\n\n\n if expanding:\n self.toggle()\n\n def activate(self):\n if not self.variable.get():\n\n # As soon as button is pressed it removes this widget\n # but is not destroyed means can be displayed again\n self.frame.grid_forget()\n\n # This will change the text of the checkbutton\n self.button.configure(text = self.collapsed_text)\n\n elif self.variable.get():\n # increasing the frame area so new widgets\n # could reside in this container\n self.frame.grid(row = 1, column = 0, columnspan = 2)\n self.button.configure(text = self.expanded_text)\n\n def toggle(self, _state = 'default'):\n \"\"\"Switches the label frame to the opposite state.\"\"\"\n self.variable.set(not self.variable.get())\n if _state != 'default':\n self.variable.set(_state)\n self.activate()\n\ndic = {}\n\nclass autoE(Entry):\n \"\"\"please don't make fontcolor same to placecolor\"\"\"\n def __init__(self, parent, placeholder=None, placecolor='gray', fontcolor='black', only=Literal['None', 'Num', 'Text'], limit=int, space=True, quote=True, **arg):\n Entry.__init__(self, parent, fg=placecolor, **arg)\n self.bind(\"\", self.click)\n self.bind(\"\", self.out)\n self.bind(\"\", self.check)\n self.ph = placeholder\n self.fontc = fontcolor\n self.only = only\n self.limit = limit\n \n self.num = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n self.text = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n self.quote = \"\"\"[ ! @ # $ % ^ & * ( ) , . / < > ? ; ' \\ : ` ~ - = _ +\"\"\"\n if space:\n self.num.append(' ')\n self.text.append(' ')\n if quote:\n self.num.extend(self.quote.split(' '))\n self.text.extend(self.quote.split(' ')) \n self.placecolor = placecolor\n self.insert(END, self.ph)\n\n def click(self, e):\n if self.get() == self.ph and self['fg'] == self.placecolor:\n self.delete(0, END)\n self['fg'] = self.fontc\n def out(self, e):\n if not self.get():\n self['fg'] = self.placecolor\n self.insert(END, self.ph)\n def check(self, e):\n s = ''\n if self.only == 'Num':\n \n for w in self.get():\n if w in self.num:\n s += w\n try:\n s = s[:self.limit]\n except TypeError:\n pass\n self.delete(0, END)\n self.insert(END, s)\n elif self.only == 'Text':\n for w in self.get():\n if w in self.text:\n s += w\n try:\n s = s[:self.limit]\n except TypeError:\n pass\n self.delete(0, END)\n self.insert(END, s)\n return\n\n##### ENDED ON trying to change the task from doing to finished. Since the master cannnot be changed, working on\n##### to collect all the info of each widget in the task and moving it to the opposite pane. The get funtion will\n##### trying to get the parents, the widget info and everything. Trying to work on that. Haven't finished yet.\n\ndef get(e):\n n = e.widget.winfo_parent()\n \n # n = n[0:-1]\n # n = '.'.join(n)\n print(n)\n get = [x for x in e.widget.config()]\n dic[e.widget] = {}\n for x in get:\n d = e.widget.cget(x)\n dic[e.widget][x] = d\n \n # s = \"\"\n n = root.nametowidget(n)\n n.destroy()\n # \n # n['parent'] = Fini_cf\n return\ncount = 0\n\ndef add_task(e):\n global count\n task = mainE.get()\n if task:\n tf = Frame(Undo_cf, name=task.lower()+'-frame'+str(count))\n tf.pack(anchor='w')\n # var = StringVar()\n # for x in range(0, 10):\n check = ttk.Checkbutton(tf, text=task, variable=StringVar(), name=task.lower()+'-check'+str(count))\n check.state(['!alternate'])\n check.pack(padx=20)\n count += 1\n check.bind('', get)\n \n \n on_configure(None)\n return\n\ndef on_configure(event):\n # update scrollregion after starting 'mainloop'\n # when all widgets are in canvas\n canvas.configure(scrollregion=canvas.bbox('all'))\n Undo_c.configure(scrollregion=Undo_c.bbox('all'))\n Fini_c.configure(scrollregion=Fini_c.bbox('all'))\n return\n\ndef _on_mousewheel(event):\n print(event, event.delta)\n print(event.widget)\n sx, sy = scrollbar.get()\n if 'undo_cf' in str(event.widget):\n Undo_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n if 1.0 in Undo_s.get() and event.delta == -120 or 0.0 in Undo_s.get() and event.delta == 120:\n canvas.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n elif 'fini_cf' in str(event.widget):\n Fini_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n if 1.0 in Fini_s.get() and event.delta == -120 or 0.0 in Fini_s.get() and event.delta == 120:\n Fini_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n if sx == 0.0 and sy == 1.0:\n return\n canvas.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n\ndef FrameWidth(event):\n canvas_width = event.width\n canvas.itemconfig(canva_frame, width = canvas_width)\n Undo_c['width'] = canvas_width -50\n Undo_c.itemconfig(Undo_cff, width=canvas_width-50)\n Undo_c['height'] = root.winfo_height()-200\n Fini_c['width'] = canvas_width -50\n Fini_c.itemconfig(Fini_cff, width=canvas_width-50)\n Fini_c['height'] = root.winfo_height()-200\n return\n\nFone = Frame(root, name='fone')\nFone.pack(anchor='center')\n\ncanvas = Canvas(root, name='main_canvas')\ncanvas.pack(side=LEFT, fill=BOTH, expand=True)\n\nscrollbar = Scrollbar(root, command=canvas.yview, name='main_scrollbar')\nscrollbar.pack(side=RIGHT, fill=Y)\n\n# scrollbar.bind('', on_configure)\n# canvas.bind('', on_configure)\n\ncanvas.configure(yscrollcommand = scrollbar.set)\n\ncanvas.bind('', on_configure)\n\nTaskF = Frame(canvas, name='taskf')\n# TaskF.pack(expand=True, fill=BOTH)\ncanva_frame = canvas.create_window((0,0), window=TaskF, anchor='nw')\ncanvas.bind('', FrameWidth)\ncanvas.bind_all(\"\", _on_mousewheel)\n# TaskF = Frame(root)\n# TaskF.pack(anchor='w', padx=50, pady=50)\n\nmainE = Entry(Fone, width=50, name='maine')\nmainE.grid(row=0, column=0)\nmainE.bind(\"\", lambda x: maincp.toggle(_state=1))\nmainE.bind(\"\", add_task)\n\naddB = Button(Fone, text='+', font='times, 20', name='addb')\naddB.grid(row=0, column=1, padx=10)\n# addB.bind(\"\", add_task)\n\nmaincp = Cp(Fone, expanded_text='Description____________', collapsed_text='Description------------------', expanding=False, name='maincp')\nmainT = Text(maincp.frame, height=5, width=50, name='maint')\nmainT.grid(row=10, column=0, sticky='w')\nmaincp.grid(row=3, columnspan=2, sticky='w')\n\ndue = Frame(maincp.frame)\ndue.grid(row=1, columnspan=2)\n\nminute_e = autoE(due, 'min', only='Num', limit=2, space=False, quote=False, width=4)\nminute_e.grid(row=0, column=2)\n\nlab = Label(due, text=':')\nlab.grid(row=0, column=1)\n\nhour_e = autoE(due, 'hour', only='Num', limit=2, space=False, quote=False, width=4)\nhour_e.grid(row=0, column=0)\n\nday_e = autoE(due, 'day', only='Num', space=False, quote=False, limit=2, width=4)\nday_e.grid(row=0, column=5, pady=10, padx=20)\n\nmonth_e = autoE(due, placeholder='month', only='Num', space=False, quote=False, limit=2, width=6)\nmonth_e.grid(row=0, column=4)\n\nyear_e = autoE(due, placeholder='year', only='Num', limit=4, space=False, quote=False, width=4)\nyear_e.grid(row=0, column=3, padx=20)\n\npriF = Frame(maincp.frame)\npriF.grid(row=2, columnspan=2)\n\ndef fore(e):\n \n for b in radio:\n b['foreground'] = 'white'\n if e == \"de\":\n pri.set(0)\n return\n e.widget['foreground'] = 'black'\n\npri = IntVar()\ncolor = ['None', 'red', 'blue', 'green', 'brown', 'gray']\nradio = []\nfor x in range(1, 6):\n pri_O = Radiobutton(priF, variable=pri, text='priority ' + str(x), indicatoron=0, background=color[x], value=x, foreground='white', selectcolor='yellow')\n pri_O.bind('', fore)\n pri_O.grid(row=0, column=x)\n radio.append(pri_O)\npri_Od = Button(priF, text='No priority', command= lambda: fore(\"de\"), bg='pink', fg='black')\npri_Od.grid(row=0, column=6, padx=5, pady=5)\n\n\n\nUNDO = Cp(TaskF, expanded_text =\">>Unfinished Works\", collapsed_text =\"<', FrameWidth)\n\nUndo_s = Scrollbar(Undo_f, command=Undo_c.yview, name='undo_s')\nUndo_s.pack(side=RIGHT, fill='y')\n\nUndo_cf = Frame(Undo_c, name='undo_cf', bg='white')\nUndo_cff = Undo_c.create_window((0,0), window=Undo_cf, anchor='nw')\n\nUndo_c.configure(yscrollcommand=Undo_s.set)\n\n\nUndo_c.bind('', on_configure)\n\n\n\n\n\nFINI = Cp(TaskF, expanded_text='>>Finished', collapsed_text='<', FrameWidth)\n\nFini_s = Scrollbar(Fini_f, command=Fini_c.yview, name='fini_s')\nFini_s.pack(side=RIGHT, fill='y')\n\nFini_cf = Frame(Fini_c, name='fini_cf', bg='white')\nFini_cff = Fini_c.create_window((0,0), window=Fini_cf, anchor='nw')\n\nFini_c.configure(yscrollcommand=Fini_s.set)\n\n\nFini_c.bind('', on_configure)\n\n\n\n\n\n\n\n\n\n\n\n\n\nUndo_f.bind(\"\", on_configure)\nFini_f.bind(\"\", on_configure)\nUNDO.bind(\"\", on_configure)\nFINI.bind(\"\", on_configure)\n\n\n\n\n\n\n\n\n\n\n\n\nroot.mainloop()","repo_name":"Today100/Glossary","sub_path":"Tools_main_file/Pending/tdlist(pending)/trial2.py","file_name":"trial2.py","file_ext":"py","file_size_in_byte":12343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"27017269023","text":"'''\nA BT is one of\nBT(Number, BT, BT)\nNone\n'''\nclass InvalidBinTreeError(Exception):\n pass\n\n\nclass BT:\n def __init__(self, num, left=None, right=None):\n # input validation\n if not (isinstance(left, BT) or left == None):\n raise InvalidBinTreeError(\"left tree is not a valid BT: {0}\".format(left))\n if not (isinstance(right, BT) or right == None):\n raise InvalidBinTreeError(\"right tree is not a valid BT: {0}\".format(right))\n\n self.num = num\n self.left = left\n self.right = right\n\n def flatten(self):\n '''returns a list with all nodes, using infix'''\n if self.left == None and self.right == None:\n return [self.num]\n elif self.left == None and isinstance(self.right, BT):\n return [self.num] + self.right.flatten()\n elif isinstance(self.left, BT) and self.right == None:\n return self.left.flatten() + [self.num]\n else:\n return self.left.flatten() + [self.num] + self.right.flatten()\n\n def __str__(self):\n if self.left == None and self.right == None:\n return \"({0}, (), ())\".format(self.num)\n elif self.left == None:\n return \"({0}, (), {1})\".format(self.num, str(self.right))\n elif self.right == None:\n return \"({0}, {1}, ())\".format(self.num, str(self.left))\n else:\n return \"({0}, {1}, {2})\".format(self.num, str(self.left), str(self.right))\n\n def __eq__(self, other):#\n if other == None:\n return False\n else:\n return self.num == other.num and self.left == other.left and self.right == other.right\n\n def clone(self):\n if self.left == None and self.right == None:\n return BT(self.num)\n elif self.left == None:\n return BT(self.num, None, self.right.clone())\n elif self.right == None:\n return BT(self.num, self.left.clone(), None)\n else:\n # both branches are trees\n return BT(self.num, self.left.clone(), self.right.clone())\n\n\nclass UnorderedBSTError(Exception):\n pass\n\n\nclass BST(BT):\n def __init__(self, num, left=None, right=None):\n super().__init__(num, left, right)\n # validate tree structure\n if not self.isValid():\n raise UnorderedBSTError(\"BST is not properly ordered\")\n\n def isValid(self):\n '''is the tree a valid binary search tree?'''\n flattened = self.flatten()\n for i in range(0, len(flattened) - 1):\n if not flattened[i] < flattened[i + 1]:\n return False\n return True\n\n def __contains__(self, ele):\n if ele == self.num:\n return True\n elif ele < self.num:\n if self.left == None:\n return False\n else:\n # there's a left tree and it might be there\n return ele in self.left\n elif ele > self.num:\n if self.right == None:\n return False\n else:\n # there's a right tree and it might be there\n return ele in self.right\n else:\n return False\n\n def getPath(self, ele):#\n # assume it's in the tree\n if ele == self.num:\n return \"/{0}\".format(self.num)\n elif ele < self.num:\n return \"/{0}{1}\".format(self.num, self.left.getPath(ele))\n elif ele > self.num:\n return \"/{0}{1}\".format(self.num, self.right.getPath(ele))\n","repo_name":"quasarbright/quasarbright.github.io","sub_path":"python/binTree/binTree.py","file_name":"binTree.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"4544750715","text":"import binascii\nimport hashlib\nimport hmac\nimport json\nimport time\nfrom typing import Dict\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom django.test import Client, RequestFactory\nfrom django.urls import reverse\nfrom pytest_django.fixtures import SettingsWrapper\n\nfrom blossom.api.slack import client as slack_client\nfrom blossom.api.slack.actions import is_valid_github_request, process_action\nfrom blossom.api.views.slack import github_sponsors_endpoint\n\n# TODO: There is a way to mock decorators, but I can't figure it out.\n# There's a lot of testing that needs to happen for this module, but I can't\n# get past the threading decorator and the patch calls don't seem to work.\n# Resources:\n# https://stackoverflow.com/questions/7667567/can-i-patch-a-python-decorator-before-it-wraps-a-function # noqa: E501\n# http://alexmarandon.com/articles/python_mock_gotchas/\n# https://stackoverflow.com/questions/36812830/mocking-decorators-in-python-with-mock-and-pytest # noqa: E501\n\n# NOTE: In order to test slack, you must add the `settings` hook and set\n# `settings.ENABLE_SLACK = True`. MAKE SURE that if you're writing a new\n# test that uses ENABLE_SLACK that you patch `requests.post` or it will\n# try and ping modchat (if you're running locally) or explode if this is\n# running in the github actions pipeline.\n\nSLACK_SIGNING_SECRET = \"12345\"\n\n\ndef get_slack_headers(body: dict, settings: SettingsWrapper) -> dict:\n \"\"\"Mock the headers required by slack validation.\"\"\"\n create_time = str(int(time.time()))\n\n body = json.dumps(body)\n sig_basestring = \"v0:\" + create_time + \":\" + body\n signature = (\n \"v0=\"\n + hmac.new(\n bytes(settings.SLACK_SIGNING_SECRET, \"latin-1\"),\n msg=bytes(sig_basestring, \"latin-1\"),\n digestmod=hashlib.sha256,\n ).hexdigest()\n )\n\n return {\n \"HTTP_X-Slack-Signature\": signature,\n \"HTTP_X-Slack-Request-Timestamp\": create_time,\n }\n\n\ndef test_challenge_request(client: Client, settings: SettingsWrapper) -> None:\n \"\"\"Test handling of Slack's new endpoint challenge message.\"\"\"\n settings.SLACK_SIGNING_SECRET = SLACK_SIGNING_SECRET\n data = {\"challenge\": \"asdfasdfasdf\"}\n headers = get_slack_headers(data, settings)\n result = client.post(\n reverse(\"slack\"), json.dumps(data), content_type=\"application/json\", **headers\n )\n assert result.content == b\"asdfasdfasdf\"\n\n\n@pytest.mark.parametrize(\n \"test_data\",\n [\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": \"nope\", \"result\": False},\n {\n \"data\": {\"bbb\": \"ccc\"},\n \"signature\": \"sha1=757fc3cb2f66db92a1d164c116358660e4e7656e\",\n \"result\": True,\n },\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": \"sha1=ttthhhbbbttt\", \"result\": False},\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": None, \"result\": True},\n ],\n)\ndef test_is_github_valid_request(\n rf: RequestFactory, settings: SettingsWrapper, test_data: Dict\n) -> None:\n \"\"\"Test to ensure that a webhook from GitHub Sponsors is valid.\"\"\"\n request = rf.post(\n \"slack/github/sponsors/\",\n data=test_data[\"data\"],\n content_type=\"application/json\",\n )\n\n settings.GITHUB_SPONSORS_SECRET_KEY = \"shhh, it's a secret\"\n\n if not test_data[\"signature\"]:\n test_data[\"signature\"] = \"sha1={}\".format(\n binascii.hexlify(\n hmac.digest(\n msg=request.body,\n key=settings.GITHUB_SPONSORS_SECRET_KEY.encode(),\n digest=\"sha1\",\n )\n ).decode()\n )\n\n request.headers = {\"x-hub-signature\": test_data[\"signature\"]}\n assert is_valid_github_request(request) is test_data[\"result\"]\n\n\ndef test_github_missing_signature(rf: RequestFactory) -> None:\n \"\"\"Test to ensure a request that is missing the signature is marked invalid.\"\"\"\n \"\"\"Test to ensure that a webhook from GitHub Sponsors is valid.\"\"\"\n request = rf.post(\n \"slack/github/sponsors/\", data={\"aaa\": \"bbb\"}, content_type=\"application/json\"\n )\n assert is_valid_github_request(request) is False\n\n\n@pytest.mark.parametrize(\n \"test_data\",\n [\n {\n \"username\": \"bob\",\n \"tier\": \"A\",\n \"action\": \"created\",\n \"result\": \":tada: GitHub Sponsors: [created] - bob | A :tada:\",\n \"status_code\": 200,\n },\n {\n \"username\": \"bobbert\",\n \"tier\": \"B\",\n \"action\": \"cancelled\",\n \"result\": \":sob: GitHub Sponsors: [cancelled] - bobbert | B :sob:\",\n \"status_code\": 200,\n },\n {\n \"username\": \"bobby\",\n \"tier\": \"C\",\n \"action\": \"edited\",\n \"result\": (\":rotating_light: GitHub Sponsors: [edited] - bobby | C :rotating_light:\"),\n \"status_code\": 200,\n },\n ],\n)\ndef test_github_sponsor_slack_message(\n rf: RequestFactory, settings: SettingsWrapper, test_data: Dict\n) -> None:\n \"\"\"Test to ensure webhooks from GitHub Sponsors trigger appropriate slack pings.\"\"\"\n slack_client.chat_postMessage = MagicMock()\n request = rf.post(\n \"slack/github/sponsors/\",\n data={\n \"action\": test_data[\"action\"],\n \"sponsorship\": {\n \"sponsor\": {\"login\": test_data[\"username\"]},\n \"tier\": {\"name\": test_data[\"tier\"]},\n },\n },\n content_type=\"application/json\",\n )\n request.headers = {\n \"x-hub-signature\": \"sha1={}\".format(\n binascii.hexlify(\n hmac.digest(\n msg=request.body,\n key=settings.GITHUB_SPONSORS_SECRET_KEY.encode(),\n digest=\"sha1\",\n )\n ).decode()\n )\n }\n response = github_sponsors_endpoint(request)\n\n assert slack_client.chat_postMessage.call_args[1][\"text\"] == test_data[\"result\"]\n assert response.status_code == test_data[\"status_code\"]\n\n\ndef test_process_action_check() -> None:\n \"\"\"Test that a check action is routed correctly.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"check_approved_1\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 1\n assert report_mock.call_count == 0\n assert message_mock.call_count == 0\n\n\ndef test_process_action_report() -> None:\n \"\"\"Test that a report action is routed correctly.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"approve_submission_3\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 0\n assert report_mock.call_count == 1\n assert message_mock.call_count == 0\n\n\ndef test_process_action_unknown() -> None:\n \"\"\"Test that an error message is sent for an unknown action.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"asdas\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 0\n assert report_mock.call_count == 0\n assert message_mock.call_count == 1\n","repo_name":"GrafeasGroup/blossom","sub_path":"blossom/api/tests/slack/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":8473,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"}
+{"seq_id":"16336723552","text":"#! /usr/bin/env python3\n\nimport sys, glob\nfrom qe_tokenization import perform_tokenization\nimport argparse\nimport stanza\nfrom spacy_stanza import StanzaLanguage\nimport pandas as pd\nfrom utils import load_text_file\nfrom spacy_dummy_tokenizer import WhitespaceTokenizer\n\n\ndef processLanguagePair(lgpair, keyfile_prefix, rawtranslations_glob, output_path):\n \"\"\"\n params:\n lgpair: translation language direction, e.g., en-de\n keyfile_prefix: prefix of the provided key files\n rawtranslations_glob: raw, detokenized translations\n output_path: path to store the WSD indices and their correct/incorrect labels. a dataframe with\n columns ['Sentence', 'Correct WSD output', 'Wrong WSD words indices']\n \"\"\"\n\n # load sense keys from file\n sense_keys = []\n k = load_text_file(keyfile_prefix + \".key.txt\")\n for line in k:\n elements = line.strip().split(\"\\t\")\n t = (elements[0], elements[1], elements[2], tuple(elements[3].split(\" \")), tuple(elements[4].split(\" \")))\n sense_keys.append(t)\n\n # load domain keys from file\n indomain_keys = set()\n outdomain_keys = set()\n d = load_text_file(keyfile_prefix + \".domain.txt\")\n for line in d:\n elements = line.strip().split(\"\\t\")\n if elements[2] == \"in\":\n indomain_keys.add((elements[0], elements[1]))\n else:\n outdomain_keys.add((elements[0], elements[1]))\n\n # load lemmatizer\n snlp = stanza.Pipeline(lang=lgpair[-2:])\n nlp = StanzaLanguage(snlp)\n # Replace the default tokenizer in the pipeline with the dummy tokenizer, since we will use this on\n # pre-tokenized text\n nlp.tokenizer = WhitespaceTokenizer(nlp)\n\n # load and process submissions\n results = {}\n rawsubmissions = sorted(glob.glob(rawtranslations_glob))\n for rawsubmission in rawsubmissions:\n # Create the df to store the sentence and word level WSD correct/incorrect result\n wsd_labels_df = pd.DataFrame(columns=['Sentence', 'Correct WSD output', 'Wrong WSD words indices'])\n\n counts = {\"pos_in\": 0, \"pos_out\": 0, \"neg_in\": 0, \"neg_out\": 0, \"unk_in\": 0, \"unk_out\": 0}\n trans_sentences = load_text_file(rawsubmission)\n trans_sentences_tok = perform_tokenization(lang=lgpair[-2:], inlist=trans_sentences)\n\n wsd_labels_df['Sentence'] = trans_sentences\n\n for i, (trans_sentence, trans_sentence_tok, key) in \\\n enumerate(zip(trans_sentences, trans_sentences_tok, sense_keys)):\n if (key[2], \" \".join(key[3])) in indomain_keys:\n suffix = \"_in\"\n elif (key[2], \" \".join(key[3])) in outdomain_keys:\n suffix = \"_out\"\n else:\n print(\"Domain not found:\", (key[2], \" \".join(key[3])))\n\n # first look in tokenized data\n tokwords = [x.lower() for x in trans_sentence_tok]\n posfound = any([posword in tokwords for posword in key[3]])\n negfound = any([negword in tokwords for negword in key[4]])\n\n negative_indices = []\n # Store the indices of the negative words in the tokenized sentence\n if negfound:\n for tokword_i, tokword in enumerate(tokwords):\n if tokword in key[4]:\n negative_indices.append(tokword_i)\n\n # if not found, look in lemmatized data\n if (not posfound) and (not negfound):\n posfound = False\n negfound = False\n\n # Perform lemmatization\n doc = nlp(trans_sentence)\n for token_i, token in enumerate(doc):\n if token.lemma_.lower() in key[3]:\n posfound = True\n if token.lemma_.lower() in key[4]:\n negfound = True\n negative_indices.append(token_i)\n\n if posfound and not negfound:\n counts[\"pos\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = True\n elif negfound:\n counts[\"neg\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = False\n else:\n counts[\"unk\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = None\n\n wsd_labels_df['Wrong WSD words indices'].iloc[i] = negative_indices\n\n wsd_labels_df.to_csv(output_path)\n\n counts[\"cov_in\"] = (counts[\"pos_in\"] + counts[\"neg_in\"]) / (\n counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"unk_in\"])\n counts[\"cov_out\"] = (counts[\"pos_out\"] + counts[\"neg_out\"]) / (\n counts[\"pos_out\"] + counts[\"neg_out\"] + counts[\"unk_out\"])\n counts[\"cov_all\"] = (counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"]) / (\n counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"unk_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"] +\n counts[\"unk_out\"])\n\n # Precision = pos / (pos+neg)\n counts[\"prec_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (counts[\"pos_in\"] + counts[\"neg_in\"])\n counts[\"prec_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"neg_out\"])\n counts[\"prec_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"])\n\n # RecallA = pos / (pos+unk)\n # This is the definition of recall that was used to compute the results tables\n # in the papers, but *does not* correspond to the definition given in the papers.\n counts[\"recA_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (counts[\"pos_in\"] + counts[\"unk_in\"])\n counts[\"recA_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"unk_out\"])\n counts[\"recA_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"pos_out\"] + counts[\"unk_out\"])\n\n # RecallB = pos / (pos+unk+neg)\n # This formula corresponds to the definition given in the papers,\n # but is *not* the one that was used to compute the results tables.\n counts[\"recB_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (\n counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"neg_in\"])\n counts[\"recB_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"unk_out\"] + counts[\"neg_out\"])\n counts[\"recB_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\n \"unk_out\"] + counts[\"neg_out\"])\n\n # F1A is based on RecallA\n counts[\"f1A_in\"] = 0 if (counts[\"prec_in\"] + counts[\"recA_in\"]) == 0 else 2 * counts[\"prec_in\"] * counts[\n \"recA_in\"] / (counts[\"prec_in\"] + counts[\"recA_in\"])\n counts[\"f1A_out\"] = 0 if (counts[\"prec_out\"] + counts[\"recA_out\"]) == 0 else 2 * counts[\"prec_out\"] * counts[\n \"recA_out\"] / (counts[\"prec_out\"] + counts[\"recA_out\"])\n counts[\"f1A_all\"] = 0 if (counts[\"prec_all\"] + counts[\"recA_all\"]) == 0 else 2 * counts[\"prec_all\"] * counts[\n \"recA_all\"] / (counts[\"prec_all\"] + counts[\"recA_all\"])\n\n # F1B is based on RecallB\n counts[\"f1B_in\"] = 0 if (counts[\"prec_in\"] + counts[\"recB_in\"]) == 0 else 2 * counts[\"prec_in\"] * counts[\n \"recB_in\"] / (counts[\"prec_in\"] + counts[\"recB_in\"])\n counts[\"f1B_out\"] = 0 if (counts[\"prec_out\"] + counts[\"recB_out\"]) == 0 else 2 * counts[\"prec_out\"] * counts[\n \"recB_out\"] / (counts[\"prec_out\"] + counts[\"recB_out\"])\n counts[\"f1B_all\"] = 0 if (counts[\"prec_all\"] + counts[\"recB_all\"]) == 0 else 2 * counts[\"prec_all\"] * counts[\n \"recB_all\"] / (counts[\"prec_all\"] + counts[\"recB_all\"])\n\n submissionName = rawsubmission.split(\"/\")[-1]\n results[submissionName] = counts\n\n print(lgpair.upper())\n print()\n print(\n \"Submission\\t\\tInPos\\tInNeg\\tInUnk\\tInCoverage\\tInPrecision\\tInRecallA\\tInRecallB\\tInFscoreA\\tInFscoreB\\t\"\n \"\\tOutPos\\tOutNeg\\tOutUnk\\tOutCoverage\\tOutPrecision\\tOutRecallA\\tOutRecallB\\tOutFscoreA\\tOutFscoreB\\t\"\n \"\\tAllPos\\tAllNeg\\tAllUnk\\tAllCoverage\\tAllPrecision\\tAllRecallA\\tAllRecallB\\tAllFscoreA\\tAllFscoreB\")\n for submission, result in sorted(results.items(), key=lambda x: x[1][\"f1A_all\"], reverse=True):\n s = submission\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(result[\"pos_in\"],\n result[\"neg_in\"],\n result[\"unk_in\"],\n 100 * result[\"cov_in\"],\n 100 * result[\"prec_in\"],\n 100 * result[\"recA_in\"],\n 100 * result[\"recB_in\"],\n 100 * result[\"f1A_in\"],\n 100 * result[\"f1B_in\"])\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(result[\"pos_out\"],\n result[\"neg_out\"],\n result[\"unk_out\"],\n 100 * result[\"cov_out\"],\n 100 * result[\"prec_out\"],\n 100 * result[\"recA_out\"],\n 100 * result[\"recB_out\"],\n 100 * result[\"f1A_out\"],\n 100 * result[\"f1B_out\"])\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(\n result[\"pos_in\"] + result[\"pos_out\"], result[\"neg_in\"] + result[\"neg_out\"],\n result[\"unk_in\"] + result[\"unk_out\"], 100 * result[\"cov_all\"], 100 * result[\"prec_all\"],\n 100 * result[\"recA_all\"], 100 * result[\"recB_all\"], 100 * result[\"f1A_all\"], 100 * result[\"f1B_all\"])\n print(s)\n print()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--lgpair', type=str, default='en-de')\n parser.add_argument('--keyfileprefix',\n type=str,\n default='txt/en-de',\n help='path of the *.key.txt and *.domain.txt files')\n parser.add_argument('--rawtranslations', type=str,\n help='path of the detokenized translation output')\n parser.add_argument('--output_path', type=str,\n help='path to store the WSD correct/incorrect labels and erroneous token indices')\n\n args = parser.parse_args()\n print(args)\n\n processLanguagePair(args.lgpair, args.keyfileprefix, args.rawtranslations, args.output_path)\n","repo_name":"TuAnh23/MuCoW","sub_path":"WMT2019/translation test suite/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":11986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"70990525265","text":"from __future__ import annotations\n\n# System imports\nfrom copy import copy\n\n# Third-party imports\nimport pytest\n\n# Local imports\nfrom openide.nodes.properties_support import GetterSetterProperty\n\n\nclass RWMethods:\n\n def __init__(self) -> None:\n self.__attr = 0\n\n def get_attr(self) -> int:\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n not_a_method = 45\n\n\ndef test_read_write() -> None:\n rw = RWMethods()\n\n def check(prop: GetterSetterProperty, init_value: int, set_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is True\n\n assert prop.value == init_value\n assert rw.get_attr() == init_value\n prop.value = set_value\n assert prop.value == set_value\n assert rw.get_attr() == set_value\n\n prop = GetterSetterProperty(rw.get_attr, rw.set_attr)\n check(prop, 0, 12)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 12, 24)\n check(prop, 24, 36)\n\n\nclass ROMethods:\n\n def __init__(self, value: int) -> None:\n self.__attr = value\n\n def get_attr(self) -> int:\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n\ndef test_read_only() -> None:\n ro = ROMethods(72)\n\n def check(prop: GetterSetterProperty, init_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is False\n\n assert prop.value == init_value\n\n with pytest.raises(AttributeError):\n prop.value = 12\n\n prop = GetterSetterProperty(ro.get_attr)\n check(prop, 72)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 72)\n\n ro.set_attr(46)\n check(prop, 46)\n check(cloned_prop, 46)\n\n\ndef test_write_only() -> None:\n wo = RWMethods()\n\n def check(prop: GetterSetterProperty, init_value: int, set_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is False\n assert prop.can_write is True\n\n assert wo.get_attr() == init_value\n prop.value = set_value\n assert wo.get_attr() == set_value\n\n with pytest.raises(AttributeError):\n _ = prop.value\n\n prop = GetterSetterProperty(None, wo.set_attr)\n check(prop, 0, 27)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 27, 54)\n check(prop, 54, 81)\n\n\ndef test_not_method() -> None:\n rw = RWMethods()\n\n with pytest.raises(TypeError):\n GetterSetterProperty(rw.not_a_method) # type: ignore\n\n with pytest.raises(TypeError):\n GetterSetterProperty(None, rw.not_a_method) # type: ignore\n\n\ndef test_no_getter_and_setter() -> None:\n with pytest.raises(ValueError):\n GetterSetterProperty()\n\n\nclass NoTypeGetter:\n\n def __init__(self, value: int) -> None:\n self.__attr = value\n\n def get_attr(self): # type: ignore\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n\ndef test_no_type_hint() -> None:\n ro = NoTypeGetter(83)\n\n with pytest.raises(ValueError):\n GetterSetterProperty(ro.get_attr)\n\n def check(prop: GetterSetterProperty, init_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is False\n\n assert prop.value == init_value\n\n with pytest.raises(AttributeError):\n prop.value = 12\n\n prop = GetterSetterProperty(ro.get_attr, value_type=int)\n check(prop, 83)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 83)\n\n ro.set_attr(41)\n check(prop, 41)\n check(cloned_prop, 41)\n","repo_name":"AxelVoitier/openide","sub_path":"tests/nodes/properties/test_getter_setter_property.py","file_name":"test_getter_setter_property.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"15619478174","text":"from bs4 import BeautifulSoup\nimport lxml\nimport requests\n\nclass UrlNotFound(BaseException):\n e_message = \"cannot open url\"\n ...\n\ndef get_urls(main_url: str, paths: list[str]) -> list[str]:\n urls = []\n\n for idx, path in enumerate(paths):\n urls.append(main_url + path)\n\n return urls\n\n\ndef get_xml(url: str = \"\") -> BeautifulSoup:\n\n r:requests.Response = requests.get(url)\n\n if r.status_code == 200:\n content = r.content\n return BeautifulSoup(content, \"lxml-xml\")\n raise UrlNotFound\n\n\n\nif __name__ == '__main__':\n print(get_xml(\"https://www.welt.de/feeds/ooh/out-of-home/bundesliga/news\").prettify())\n","repo_name":"Askil61/Bundesliga","sub_path":"app/py/get_xml.py","file_name":"get_xml.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33752017661","text":"import PyPDF2 as pdf\r\nimport os\r\n\r\ninputdir = r'C:\\Users\\saadbasheer\\Desktop\\Code\\Python\\Projects\\pdfmerger\\input'\r\noutputdir = r'C:\\Users\\saadbasheer\\Desktop\\Code\\Python\\Projects\\pdfmerger\\output'\r\n\r\ndef pdfmerger(input_dir, output_dir):\r\n merger = pdf.PdfMerger()\r\n filename = \"merged_pdf\"\r\n for files in os.listdir(inputdir):\r\n if files.endswith('.pdf'):\r\n merger.append(os.path.join(inputdir, files))\r\n\r\n merger.write(os.path.join(output_dir, filename))\r\n\r\n\r\npdfmerger(inputdir, outputdir)\r\n","repo_name":"Saadbasheer/pdf-merger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71465020305","text":"# Selection sort\n\ndef selection(arr):\n for i in range(len(arr)):\n min = i\n for j in range(i+1,len(arr)):\n if min >arr[j]:\n min = j\n arr[i],arr[min]=arr[min],arr[i]\n return arr\n\ns = [1,5,8,3,21,2,4]\nprint(selection(s))\n\n\nd=[1,1,2,3,4,3,5,2]\nindex = 0\nfor i in range(len(d)):\n for j in range(i+1,len(d)):\n if d[i]==d[j]:\n d.pop(d[j])\n break\n\nprint(d)\nf=[]\nfor i in d:\n if i not in f:\n f.append(i)\nprint(f)\n\ng=[]\nfor i in range(len(d)):\n for j in range(i+1,len(d)):\n if d[i]==d[j] and d[i] not in g:\n g.append(d[i])\n \nprint(\"g is \",g)\n\nresult =0\nfor i in range(len(d)):\n result ^=d[i]\n\nprint(result)\n\nz = [1,2,1,4,5,7,4,5]\n\nclass Solution:\n def threeSum(self, nums):\n n = len(nums)\n nums.sort()\n l=set()\n for i in range(n-2):\n j = i+1\n k = n-1\n while(j0:\n left = generate_balanced_tree_rec(max_depth-1, bitlength, num_attributes, seed)\n right = generate_balanced_tree_rec(max_depth-1, bitlength, num_attributes, seed)\n threshold = random.randint(0, 2**bitlength-1)\n feature = random.randint(0, num_attributes-1)\n t = Internal(threshold, feature, left, right)\n else:\n t = Leaf(random.randint(0, 2**CLASSIFICATION_VALUE_BITLENGTH-1))\n\n return t \n\n\ndef generate_balanced_tree(max_depth, path, bitlength, num_attributes, seed=None):\n if seed is not None:\n random.seed(seed)\n with open(path, 'w') as output_file:\n stack=[0]\n while len(stack) > 0:\n current_depth = stack[0]\n if current_depth == max_depth:\n threshold = random.randint(0, 2**CLASSIFICATION_VALUE_BITLENGTH-1)\n attribute_index = -1\n output_file.write(f'{threshold} {attribute_index}\\n')\n \n stack = stack[1:]\n elif current_depth < max_depth:\n threshold = random.randint(0, 2**bitlength-1)\n attribute_index = random.randint(0, num_attributes-1)\n output_file.write(f'{threshold} {attribute_index}\\n')\n stack = [current_depth+1, current_depth+1] + stack[1:]\n else:\n print(\"This should not happen!\")\n\ndef generate_input(path, bitlength, num_attributes, seed=None):\n if seed is not None:\n random.seed(seed)\n with open(path, 'w') as output_file:\n for _ in range(num_attributes):\n attribute_value = random.randint(0, 2**bitlength-1)\n output_file.write(f'{attribute_value}\\n')\n \n\ndef generate_balanced_tree_from_args(args):\n max_depth = args.max_depth\n path = args.path\n bitlength=args.bitlength\n num_attributes = args.num_attributes\n generate_balanced_tree(max_depth=max_depth,path=path,bitlength=bitlength,num_attributes=num_attributes)\n\n\ndef catalan(n):\n return comb(2*n, n) / (n+1)\n\ndef recursive_uniform_tree_generator(remaining_inner_nodes):\n if remaining_inner_nodes == 0:\n return [True]\n\n remaining_inner_nodes -= 1\n weights = [ catalan(i)*catalan(remaining_inner_nodes-i) for i in range(0,remaining_inner_nodes+1) ]\n \n left_inner_nodes=random.choices(range(0,remaining_inner_nodes+1), weights=weights)[0]\n right_inner_nodes=remaining_inner_nodes-left_inner_nodes\n\n return [False] + recursive_uniform_tree_generator(left_inner_nodes) + recursive_uniform_tree_generator(right_inner_nodes)\n\ndef generate_random_unbalanced_tree(args):\n number_of_nodes=args.number_of_nodes\n preorder_representation=recursive_uniform_tree_generator(number_of_nodes)\n generate_and_write_tree(preorder_representation, args)\n\n# if __name__ == '__main__':\n\n# parser = argparse.ArgumentParser()\n\n# # common for both cases \n# parser.add_argument('--bitlength', type=int, default=32)\n# parser.add_argument('--num_attributes', type=int, default=4)\n# parser.add_argument('--balanced', const=True, default=False, nargs='?')\n\n# # balanced case\n# parser.add_argument('--max_depth', type=int, default=4)\n# parser.add_argument('--path', type=str, required=True)\n\n# # unbalanced case\n# parser.add_argument('--number_of_nodes', type=int, default=31)\n\n# args = parser.parse_args()\n\n# if args.balanced:\n# print(\"Balanced!\")\n# generate_balanced_tree_from_args(args)\n# else:\n# print(\"Not Balanced!\")\n# generate_random_unbalanced_tree(args)\n \n\nimport os \nimport json\n\nWORKSPACE_DIR='/home/r5akhava/private-decision-tree-evaluation/experiments'\n\nif __name__ == '__main__':\n write_path = 'datasets_synthetic'\n for max_depth in range(2,21):\n for bitlength in [8, 12, 16, 24, 32]:\n for num_attributes in range(2, 200, 10):\n t = generate_balanced_tree_rec(max_depth, bitlength, num_attributes)\n with open(os.path.join(WORKSPACE_DIR, write_path, f'tree_depth_{max_depth}_n_{bitlength}_attr_{num_attributes}.json'), 'w+') as f:\n f.write(json.dumps(t.__dict__))\n\n","repo_name":"RasoulAM/private-decision-tree-evaluation","sub_path":"experiments/generate_random_tree.py","file_name":"generate_random_tree.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"35325417345","text":"import yaml\nimport sys, os\nimport optparse\nfrom lxml import etree\n\nrpm_ns=\"http://linux.duke.edu/metadata/rpm\"\npattern_ns=\"http://novell.com/package/metadata/suse/pattern\"\nNSMAP = {None : pattern_ns, \"rpm\": rpm_ns}\n\nNSMAP_GROUP = {None : pattern_ns, \"rpm\": rpm_ns, \"patterns\": pattern_ns}\n\ndef process_yaml(stream, version, release, xmlroot, nsmap_name, newobsapi):\n\t\"Process all documents in the yaml stream and return a count of number handled\"\n\n\tall_docs = yaml.load_all(stream, Loader = yaml.SafeLoader)\n\t\n\tfor y in all_docs:\n\t\t# \n\t\tproot = etree.SubElement(xmlroot, \"pattern\", nsmap=nsmap_name)\n\t\t\n\t\t# \n\t\tetree.SubElement(proot, \"name\").text = y['Name']\n\n\t\t# Old OBS isn't able to handle these options.\n\t\tif newobsapi:\n\t\t\t# \n\t\t\tif 'Version' in y or version:\n\t\t\t\tentry = etree.SubElement(proot, \"version\")\n\t\t\t\tver = \"0\"\n\t\t\t\tif version:\n\t\t\t\t\tver = version\n\t\t\t\telse:\n\t\t\t\t\tver = y['Version']\n\n\t\t\t\t# Set to 0 by default as that is what OBS expects.\n\t\t\t\tepoch = \"0\"\n\t\t\t\tif 'Epoch' in y:\n\t\t\t\t\tepoch = y['Epoch']\n\n\t\t\t\t# As above...\n\t\t\t\trel = \"0\"\n\t\t\t\tif release:\n\t\t\t\t\trel = release\n\t\t\t\tif 'Release' in y:\n\t\t\t\t\trel = y['Release']\n\n\t\t\t\tentry.set('ver', \"%s\" % ver)\n\t\t\t\tentry.set('epoch', \"%s\" % epoch)\n\t\t\t\tentry.set('rel', \"%s\" % rel)\n\n\t\t\t# \n\t\t\tif 'Arch' in y:\n\t\t\t\tetree.SubElement(proot, \"arch\").text = \"%s\" % y['Arch']\n\n\t\t# \n\t\tetree.SubElement(proot, \"summary\").text = y['Summary']\n\t\t# \n\t\tetree.SubElement(proot, \"description\").text = y['Description']\n\t\t# \n\t\tetree.SubElement(proot, \"uservisible\")\n\t\t# \n\t\tcat = etree.SubElement(proot, \"category\")\n\t\tcat.text = \"Base Group\"\n\t\tcat.set(\"lang\", \"en\")\n\n\t\tpackage_keys = ['Packages','Conflicts', 'Requires', 'Recommends', 'Suggests', 'Provides', 'Obsoletes']\n\t\tfor key in package_keys:\n\t\t\tif key not in y:\n\t\t\t\tcontinue\n\n\t\t\tcollect = y[key]\n\t\t\tif key == \"Packages\":\n\t\t\t\t# Support obsoleted keys, this should be removed in the future\n\t\t\t\tkey = \"Requires\"\n\t\t\t\tprint (\"WARNING: Oboleted key 'Packages' in .yaml please change to 'Requires'.\")\n\t\t\t\n\t\t\treq = etree.SubElement(proot, \"{%s}%s\" % (rpm_ns,key.lower()))\n\n\t\t\tfor p in collect:\n\t\t\t\tif type(p).__name__=='dict':\n\t\t\t\t\tprint (\"ERROR: Found dict and expected string value. '%s'\" % (p))\n\t\t\t\t\tsys.exit(1)\n\t\t\t\tentry = etree.SubElement(req, \"{%s}entry\" %rpm_ns)\n\n\t\t\t\tname = p\n\t\t\t\tver = None\n\t\t\t\top_in = [\">=\", \"<=\", \">\", \"<\", \"=\"]\n\t\t\t\top_out = [\"GE\", \"LE\", \"GT\", \"LT\", \"EQ\"]\n\t\t\t\topc = 0\n\t\t\t\tfor op in op_in:\n\t\t\t\t\tif op in p:\n\t\t\t\t\t\tname, ver = p.split(op)\n\t\t\t\t\t\tbreak\n\t\t\t\t\topc = opc + 1\n\n\t\t\t\tentry.set(\"name\", name.strip())\n\t\t\t\tif ver:\n\t\t\t\t\tentry.set(\"flags\", \"%s\" % (op_out[opc]))\n\t\t\t\t\tentry.set(\"ver\", \"%s\" % (ver.strip()))\n\ndef create_patterns(patterns_dir, version, release, outputdir, newobsapi):\n\tdirlist = os.listdir(patterns_dir)\n\tdirlist.sort()\n\tfor f in dirlist:\n\t\tif not f.endswith('.yaml'):\n\t\t\tcontinue\n\t\t\n\t\tstream = open(\"%s/%s\" %(patterns_dir,f), 'r')\n\t\txmlroot = etree.Element(\"temporary_root\", nsmap=NSMAP)\n\t\t\n\t\tprocess_yaml(stream, version, release, xmlroot, NSMAP, newobsapi)\n\n\t\tfor pattern in xmlroot.findall(\"pattern\"):\n\t\t\t\n\t\t\tname = pattern.find(\"name\")\n\t\t\tif name == None:\n\t\t\t\tprint (\"Pattern didn't have name skipping.\")\n\t\t\t\tcontinue\n\t\t\toutput_file = \"%s/%s.xml\" % (outputdir,name.text.lower())\n\t\t\tprint (\"Working on %s\" % (output_file))\n\n\t\t\tetree.ElementTree(pattern).write(output_file, pretty_print=True)\n\ndef merge_patterns(patterns_dir, version, release, outputdir, newobsapi):\n\txmlroot = etree.Element(\"patterns\")\n\toutput_file = \"%s/patterns.xml\" % (outputdir)\n\tdirlist = os.listdir(patterns_dir)\n\tdirlist.sort()\n\n\tfor f in dirlist:\n\t\tif not f.endswith('.yaml'):\n\t\t\tcontinue\n\t\tprint (\"Merging %s to %s.\" % (f,output_file))\n\t\tstream = file(\"%s/%s\" %(patterns_dir,f), 'r')\n\t\tprocess_yaml(stream, version, release, xmlroot, NSMAP_GROUP, newobsapi)\n\n\tpatterns = xmlroot.findall(\"pattern\")\n\txmlroot.set('count', \"%d\" % (len(patterns)))\n\n\tetree.ElementTree(xmlroot).write(output_file, pretty_print=True)\n\nif __name__ == '__main__':\n\tparser = optparse.OptionParser()\n\n\tparser.add_option(\"\", \"--patternxml\", action=\"store_true\", dest=\"patternxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create separated pattern XML file for each pattern.\")\n\tparser.add_option(\"\", \"--patternsxml\", action=\"store_true\", dest=\"patternsxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create merged patterns.xml from all the available patterns.\")\n\tparser.add_option(\"\", \"--groupxml\", action=\"store_true\", dest=\"groupxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create group.xml.\")\n\tparser.add_option(\"-p\", \"--patterndir\", type=\"string\", dest=\"patterndir\",\n\t\t\tdefault=None,\n\t\t\thelp=\"Directory where the pattern .yaml files are located.\")\n\tparser.add_option(\"-o\", \"--outputdir\", type=\"string\", dest=\"outputdir\",\n\t\t\tdefault=\".\",\n\t\t\thelp=\"Output directory where the resulting .xml files are created.\")\n\tparser.add_option(\"\", \"--old-obs-xml-format\", action=\"store_false\", dest=\"newobsapi\",\n\t\t\tdefault=True,\n\t\t\thelp=\"The old OBS api isn't able to handle the newer xml format.\")\n\tparser.add_option(\"--version\", type=\"string\", dest=\"version\", default=None, help=\"Version number\")\n\tparser.add_option(\"--release\", type=\"string\", dest=\"release\", default=None, help=\"Release number\")\n\t\n\t(options, args) = parser.parse_args()\n\t\n\tif (options.groupxml):\n\t\tprint (\"ERROR: Groupxml isn't supported atm.\")\n\t\texit(1)\n\n\tif (not options.patternsxml and not options.patternxml):\n\t\t# Default to patternxml.\n\t\toptions.patternxml = True\n\t\n\tif (not options.patterndir or not os.path.exists(options.patterndir)):\n\t\tprint (\"Error: Pattern dir '%s' doesn't exist.\" % (options.patterndir))\n\t\texit(1)\n\t\n\tif options.outputdir and not os.path.exists(options.outputdir):\n\t\tos.makedirs(options.outputdir)\n\t\n\tif options.patternxml:\n\t\tcreate_patterns(options.patterndir, options.version, options.release, options.outputdir, options.newobsapi)\n\n\tif options.patternsxml:\n\t\tmerge_patterns(options.patterndir, options.version, options.release, options.outputdir, options.newobsapi)\n\n","repo_name":"sailfishos/repomd-pattern-builder","sub_path":"repomd-pattern-builder.py","file_name":"repomd-pattern-builder.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26336782497","text":"'''\nReplace U+2019 RIGHT SINGLE QUOTATION MARK with U+02BC MODIFIER LETTER APOSTROPHE in Taos entries.\n'''\n\nimport argparse\nimport itertools\nimport re\n\nimport pywikibot\nimport pywikibot.pagegenerators\nimport wikitextparser\n\nquote_mark = '\\N{RIGHT SINGLE QUOTATION MARK}'\nmod_letter = '\\N{MODIFIER LETTER APOSTROPHE}'\nmove_summary = 'Replace curly quotes (U+2019) with modifier letter apostrophes (U+02BC) per [[Wiktionary:Requests for moves, mergers and splits#Entries in CAT:Taos lemmas with curly apostrophes|discussion]].'\ntext_summary = move_summary\ncategory_names = ['Taos lemmas', 'Taos non-lemma forms', 'Taos noun forms']\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-l', '--limit', default=-1, type=int)\n\tparser.add_argument('-d', '--dry-run', action='store_true')\n\targs = parser.parse_args()\n\n\tsite = pywikibot.Site()\n\tpage_generators = [pywikibot.pagegenerators.CategorizedPageGenerator(pywikibot.Category(site, name)) for name in category_names]\n\twith open('taos_skipped.txt', 'w') as skipped_file:\n\t\tfor i, page in enumerate(itertools.chain.from_iterable(page_generators)):\n\t\t\tif 0 < args.limit <= i:\n\t\t\t\tprint(f'Limit reached.')\n\t\t\t\tbreak\n\n\t\t\tparsed_page = wikitextparser.parse(page.text)\n\t\t\tsections = parsed_page.get_sections(level=2)\n\t\t\tif len(sections) == 1 and sections[0].title.strip() == 'Taos':\n\t\t\t\t# Replace in page title\n\t\t\t\tif quote_mark in page.title():\n\t\t\t\t\tnew_title = page.title().replace(quote_mark, mod_letter)\n\t\t\t\t\tif args.dry_run:\n\t\t\t\t\t\tprint(f'Would move {page.title(as_link=True)} to [[{new_title}]].')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(f'Moving {page.title(as_link=True)} to [[{new_title}]].')\n\t\t\t\t\t\tpage.move(new_title, reason=move_summary)\n\t\t\t\t\t\t# prepare to read through the new page\n\t\t\t\t\t\tpage = pywikibot.page.Page(site, new_title)\n\t\t\t\t\t\tparsed_page = wikitextparser.parse(page.text)\n\t\t\t\t\t\tsections = parsed_page.get_sections(level=2)\n\n\t\t\t\t# Replace in page text\n\t\t\t\tsection_lines = sections[0].contents.splitlines()\n\t\t\t\tsection_sub_count = 0\n\t\t\t\tprint(f'Reading {page.title(as_link=True)}...')\n\t\t\t\tfor j, line in enumerate(section_lines):\n\t\t\t\t\tsection_lines[j], line_sub_count = re.subn(f'(?<=\\\\w){quote_mark}(?=\\\\w)', mod_letter, line)\n\t\t\t\t\tif line_sub_count:\n\t\t\t\t\t\tprint(f'Before: ' + line.encode('unicode-escape').decode())\n\t\t\t\t\t\tprint(f' After: ' + section_lines[j].encode('unicode-escape').decode())\n\t\t\t\t\t\tsection_sub_count += line_sub_count\n\t\t\t\tif section_sub_count:\n\t\t\t\t\tsections[0].contents = '\\n'.join(section_lines)\n\t\t\t\t\tpage.text = str(parsed_page)\n\t\t\t\t\tif args.dry_run:\n\t\t\t\t\t\twith open(f'{i}-{page.title()}.wiki', 'w') as saveFile:\n\t\t\t\t\t\t\tsaveFile.write(page.text)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpage.save(summary=text_summary, botflag=True, quiet=False)\n\t\t\telse:\n\t\t\t\tprint(page.title(), file=skipped_file)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"excarnateSojourner/wiktionary-bot","sub_path":"taos_apostrophes.py","file_name":"taos_apostrophes.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"140677335","text":"from collections import namedtuple\n\nimport sympy as sp\n\nExprResult = namedtuple('ExprResult',\n 'symbols,expr,domains,state,kwargs')\n\n\nclass MetricResult(object):\n \"\"\"Serializable result storing metrics\"\"\"\n __slots__ = 'effect_symbols', 'effect_latex'\n\n def __init__(self,\n symbols,\n effect):\n if not isinstance(symbols, (list, tuple)):\n symbols = (symbols,)\n\n symbols = [s.name if hasattr(s, 'name') else s\n for s in symbols]\n self.effect_symbols = symbols\n self.effect_latex = (effect if isinstance(effect, str) else\n sp.latex(effect))\n\n def as_dict(self):\n return {\n 'effect_symbols': self.effect_symbols,\n 'effect_latex': self.effect_latex,\n }\n\n @classmethod\n def from_dict(cls, d):\n obj = cls.__new__(cls)\n return obj\n","repo_name":"craymichael/PostHocExplainerEvaluation","sub_path":"posthoceval/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"13624630584","text":"import random\nfrom datetime import datetime\n\nfrom django.views.generic import TemplateView\n\nfrom registration import settings\nfrom users.models import RegistrationCode\n\n\nclass IndexView(TemplateView):\n template_name = 'base.html'\n\n\nclass BotUrlView(TemplateView):\n template_name = 'bot.html'\n\n def get_context_data(self, **kwargs):\n context = super(BotUrlView, self).get_context_data()\n context['bot_name'] = settings.TELEGRAM_BOT_NAME\n if self.request.GET.get('generate') == 'yes':\n code = random.randint(10_000, 100_000)\n context['reg_code'] = code\n user = self.request.user\n\n if RegistrationCode.objects.filter(user__exact=user):\n user.reg_code.code = code\n user.reg_code.created_at = datetime.now()\n user.reg_code.save()\n else:\n code = RegistrationCode(code=code)\n user.reg_code = code\n code.save()\n return context\n","repo_name":"sch0nik/registration","sub_path":"registration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11784215976","text":"##################################################\r\n# This file contains classification algorithms.\r\n# Callable function is classification, which returns an array of accuracies corresponding to the classifiers array.\r\n##################################################\r\n\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn import tree\r\nimport os\r\nimport sys\r\nimport warnings\r\nfrom sklearn import preprocessing\r\nfrom inoutmd import read\r\nimport numpy as np\r\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\r\nimport tensorflow as tf\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\n\r\n# Principal classification function, given a path where the DB is, an array of classifiers, and an amount of folds, it returns\r\n# an array of accuracies, with length = length(classifiers)\r\n\r\n\r\ndef classification(i, j, k, n, classifiers, folds):\r\n \"\"\"\r\n This function takes as input path indices and a list of classifier indices and returns a list of\r\n accuracies generated from the file represented by the indices and each one of the classifiers.\r\n\r\n :param i: DB index\r\n :param j: MDT index\r\n :param k: Instance index\r\n :param n: IM index\r\n :param classifiers: list of classifier indices (usually range(0, 15))\r\n :param folds: Number of folds. Fixed beforehand.\r\n :return: List of accuracies generated from the different combinations of the previous parameters\r\n \"\"\"\r\n res = None\r\n try:\r\n for c in classifiers: # Foreach classifier,\r\n if not res:\r\n res = classify(i, j, k, n, c, folds)\r\n else:\r\n res.append(classify(i, j, k, n, c, folds)[1]) # Classify\r\n res = np.array(res)\r\n np.savetxt(\"ClassificationsMD/{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), str(n)), res, fmt='%i')\r\n except:\r\n print(\"{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), str(n)))\r\n\r\n return res\r\n\r\n\r\ndef classify(i, j, k, n, c, folds):\r\n \"\"\"\r\n This function reads the file represented by i, j, k, and n indices and generates an accuracy using \"folds\" folds\r\n and the classifier indexed by c.\r\n :param i: DB index\r\n :param j: MDT index\r\n :param k: Instance index\r\n :param n: IM index\r\n :param c: classifier index\r\n :param folds: Number of folds. Fixed beforehand.\r\n :return: Accuracy generated from the use of classifier c on the data described in the file represented by the\r\n indices.\r\n \"\"\"\r\n\r\n # ##############Select classifier ################# #\r\n if c == 0:\r\n clf = LogisticRegression(penalty=\"l1\")\r\n elif c == 1:\r\n clf = LogisticRegression(penalty=\"l2\")\r\n elif c == 2:\r\n clf = LinearDiscriminantAnalysis(solver=\"lsqr\")\r\n elif c == 3:\r\n clf = QuadraticDiscriminantAnalysis(reg_param=0.01)\r\n elif c == 4:\r\n x = read(\"DataMD/{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), \"0\"), delimiter=\",\")\r\n size = x.shape[0]\r\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=x.shape[1]-1)]\r\n clf = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[int(size/3*2), int(size/3)], n_classes=len(set(x[:, -1]))+1)\r\n #elif c == 5:\r\n #clf = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10])\r\n elif c == 6-1:\r\n clf = SVC(kernel='linear', C=1.0, tol=0.001, probability=True)\r\n elif c == 7-1:\r\n clf = SVC(kernel='poly', C=1.0, tol=0.01, probability=False, degree=2, cache_size=20000)\r\n elif c == 8-1:\r\n clf = SVC(kernel='rbf', C=1.0, gamma=0.10000000000000001, coef0=0, shrinking=True, probability=True)#RBFN\r\n elif c == 9-1:\r\n clf = GaussianNB() \r\n elif c == 10-1:\r\n clf = GradientBoostingClassifier(n_estimators=100, max_depth=11, subsample=1.0)\r\n elif c == 11-1:\r\n clf = RandomForestClassifier(n_estimators=10)\r\n elif c == 12-1:\r\n clf = tree.DecisionTreeClassifier() # CART, similar to c4.5\r\n elif c == 13-1:\r\n clf = KNeighborsClassifier(n_neighbors=1)\r\n elif c == 14-1:\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n #######################################################################\r\n\r\n full_predictions = []\r\n full_y = []\r\n\r\n for fold in range(0, folds):\r\n # For each stratified fold, we have one file. We read it.\r\n path = \"DataMD/\" + str(i) + \"-\" + str(j) + \"-\" + str(k) + \"-\" + str(fold) + \"-\" + str(n) + \".data\"\r\n x = read(path)\r\n x = preprocessing.Imputer().fit_transform(x)\r\n x[x == np.nan] = 0\r\n # Separate class (always in last position, watch impute.py)\r\n y = x[:, len(x[0, :])-1]\r\n x = np.delete(x, len(x[0, :])-1, 1)\r\n\r\n # If class is string, transform to numeric labels\r\n if isinstance(y[0], str):\r\n le = preprocessing.LabelEncoder()\r\n le.fit(y)\r\n y = le.transform(y)\r\n # Set where the limit between the train and testing is. Remember that we always write first the training\r\n # part and then the testing part.\r\n\r\n lim = int(x.shape[0] / folds * (folds - 1))\r\n x_train = x[:lim,:]\r\n x_test = x[lim:,:]\r\n y_train = y[:lim].astype(int)\r\n y_test = y[lim:].astype(int)\r\n\r\n def get_train_inputs():\r\n x = tf.constant(x_train)\r\n y = tf.constant(y_train)\r\n\r\n return x, y\r\n\r\n def get_test_inputs():\r\n x = tf.constant(x_test)\r\n y = tf.constant(y_test)\r\n\r\n return x, y\r\n\r\n def get_predict_input():\r\n x = tf.constant(x_test)\r\n\r\n return x\r\n\r\n if c == 4:\r\n model = clf.fit(input_fn=get_train_inputs, max_steps=20000) # Model creation\r\n predictions = model.predict(input_fn=get_predict_input)\r\n # acc = clf.evaluate(input_fn=get_test_inputs, steps=1)[\"accuracy\"]\r\n else:\r\n model = clf.fit(x_train, y_train) # Model creation\r\n predictions = model.predict(x_test)\r\n\r\n full_predictions += list(predictions)\r\n full_y += y_test.tolist()\r\n #print(full_predictions)\r\n # print(accuracy_score(full_y, full_predictions))\r\n return [full_y, full_predictions]\r\n\r\n\r\ndef nn(x_train, y_train, x_test, y_test):\r\n\r\n \"\"\"\r\n A Convolutional Network implementation example using TensorFlow library.\r\n This example is using the MNIST database of handwritten digits\r\n (http://yann.lecun.com/exdb/mnist/)\r\n\r\n Author: Aymeric Damien\r\n Project: https://github.com/aymericdamien/TensorFlow-Examples/\r\n \"\"\"\r\n\r\n # Parameters\r\n learning_rate = 0.001\r\n training_iters = 200000\r\n batch_size = 128\r\n\r\n # Network Parameters\r\n n_input = x_train.shape[1]\r\n n_classes = len(set(y_train)) # MNIST total classes (0-9 digits)\r\n dropout = 0.75 # Dropout, probability to keep units\r\n\r\n\r\n # tf Graph input\r\n x = tf.placeholder(tf.float32, [None, n_input])\r\n y = tf.placeholder(tf.float32, [None, n_classes])\r\n keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\r\n\r\n\r\n # Create some wrappers for simplicity\r\n def conv2d(x, W, b, strides=1):\r\n # Conv2D wrapper, with bias and relu activation\r\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\r\n x = tf.nn.bias_add(x, b)\r\n return tf.nn.relu(x)\r\n\r\n\r\n def maxpool2d(x, k=2):\r\n # MaxPool2D wrapper\r\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\r\n padding='SAME')\r\n\r\n\r\n # Create model\r\n def conv_net(x, weights, biases, dropout):\r\n\r\n\r\n # Convolution Layer\r\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\r\n # Max Pooling (down-sampling)\r\n conv1 = maxpool2d(conv1, k=2)\r\n\r\n # Convolution Layer\r\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\r\n # Max Pooling (down-sampling)\r\n conv2 = maxpool2d(conv2, k=2)\r\n\r\n # Fully connected layer\r\n # Reshape conv2 output to fit fully connected layer input\r\n fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\r\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\r\n fc1 = tf.nn.relu(fc1)\r\n # Apply Dropout\r\n fc1 = tf.nn.dropout(fc1, dropout)\r\n\r\n # Output, class prediction\r\n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\r\n return out\r\n\r\n # Store layers weight & bias\r\n weights = {\r\n # 5x5 conv, 1 input, 32 outputs\r\n 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\r\n # 5x5 conv, 32 inputs, 64 outputs\r\n 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\r\n # fully connected, 7*7*64 inputs, 1024 outputs\r\n 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\r\n # 1024 inputs, n outputs (class prediction)\r\n 'out': tf.Variable(tf.random_normal([1024, n_classes]))\r\n }\r\n\r\n biases = {\r\n 'bc1': tf.Variable(tf.random_normal([32])),\r\n 'bc2': tf.Variable(tf.random_normal([64])),\r\n 'bd1': tf.Variable(tf.random_normal([1024])),\r\n 'out': tf.Variable(tf.random_normal([n_classes]))\r\n }\r\n\r\n # Construct model\r\n pred = conv_net(x, weights, biases, keep_prob)\r\n\r\n # Define loss and optimizer\r\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n # Evaluate model\r\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\n # Initializing the variables\r\n init = tf.global_variables_initializer()\r\n\r\n # Launch the graph\r\n with tf.Session() as sess:\r\n sess.run(init)\r\n step = 1\r\n # Keep training until reach max iterations\r\n while step * batch_size < training_iters:\r\n batch_x = x[((step-1) * batch_size):(step * batch_size),:], batch_y = y[((step-1) * batch_size):(step * batch_size)]\r\n # Run optimization op (backprop)\r\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\r\n keep_prob: dropout})\r\n step += 1\r\n print(\"Optimization Finished!\")\r\n\r\n # Calculate accuracy for 256 mnist test images\r\n return \"Testing Accuracy:\", \\\r\n sess.run(accuracy, feed_dict={x: x_test,\r\n y: y_test,\r\n keep_prob: 1.})\r\n\r\n\"\"\"\r\npath = \"Data/\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \".data\"\r\nx = read(path)\r\nx = preprocessing.Imputer().fit_transform(x)\r\n# Separate class (always in last position, watch impute.py)\r\ny = x[:, len(x[0, :])-1]\r\nx = np.delete(x, len(x[0, :])-1, 1)\r\n\r\na = nn(x[:int(x.shape[0]/5*4),:], y[:int(y.shape[0]/5*4)], x[int(x.shape[0]/5*4):,:], y[int(y.shape[0]/5*4):])\r\nprint(a)\r\n\"\"\"\r\n","repo_name":"unaigarciarena/Discrete","sub_path":"Escritorio/DiscreteCode/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"7162717903","text":"\nclass Solution:\n def segregate0and1(self, arr, n):\n low = 0\n high = len(arr)-1\n while (low <= high):\n if arr[low] == 0:\n low = low +1\n else:\n arr[low],arr[high]=arr[high],arr[low]\n high = high -1\n return arr\n \n\n\nif __name__ == '__main__':\n tc = int(input())\n while tc > 0:\n n = int(input())\n arr = list(map(int, input().strip().split()))\n ob = Solution()\n ob.segregate0and1(arr, n)\n print(*arr)\n tc -= 1\n\n","repo_name":"zaidjubapu/dsa450problems","sub_path":"companiesproblems/04Segregate0An1.py","file_name":"04Segregate0An1.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71725137426","text":"\"\"\"\n给出一个由无重复的正整数组成的集合,找出其中最大的整除子集,子集中任意一对 (Si,Sj) 都要满足:Si % Sj = 0 或 Sj % Si = 0。\n\n如果有多个目标子集,返回其中任何一个均可。\n\n \n\n示例 1:\n\n输入: [1,2,3]\n输出: [1,2] (当然, [1,3] 也正确)\n示例 2:\n\n输入: [1,2,4,8]\n输出: [1,2,4,8]\n\"\"\"\n\nclass Solution(object):\n def largestDivisibleSubset(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n if not nums and len(nums) == 0:\n return []\n\n nums.sort()\n # 我们假设每次都从当前数字开始,看看能形成的最长方案是多少\n dp = [1] * len(nums)\n\n # k记录着我们能形成的最大子集的下标在nums的哪里\n k = 0\n for i in range(len(nums)):\n for j in range(0, i):\n\n if nums[i] % nums[j] == 0:\n dp[i] = max(dp[i], dp[j] + 1)\n # 我们把能形成的最大子集下标更新到k\n if dp[i] > dp[k]:\n k = i\n\n # 我们先把最大的那一位给加进答案\n res = [nums[k]]\n\n while dp[k] > 1:\n for i in range(k):\n # 我们要把刚刚的递推顺序从后往前找出来\n if nums[k] % nums[i] == 0 and dp[k] == dp[i] + 1:\n # 然后把上一个怎么变过来的\n # 加到res里去\n res.append(nums[i])\n k = i\n break\n\n # res就是最长的递增子集方案了\n return res\n\n# https://www.acwing.com/video/1754/","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/DP/368m. 最大整除子集(同求最大递增集合300).py","file_name":"368m. 最大整除子集(同求最大递增集合300).py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26421154198","text":"from django.shortcuts import render,redirect\r\nfrom .forms import UserCreateForm,UserProfileForm,FriendRequestForm\r\nfrom django.contrib.auth import authenticate,login,logout\r\nfrom django.contrib.auth.models import User\r\nfrom .models import UserProfile,FriendRequest\r\nfrom django.contrib.auth.decorators import login_required\r\n# Create your views here.\r\n\r\n@login_required(login_url='login')\r\ndef HomePage(request):\r\n profile=UserProfile.objects.get(user=request.user)\r\n friends=profile.friends.all()\r\n frs=[]\r\n try :\r\n frr=FriendRequest.objects.filter(receiver=request.user,is_active=True)\r\n frs=[]\r\n for i in frr:\r\n frs.append(i.sender.username)\r\n except:\r\n frs=['No Friends']\r\n context={'friends':friends,'frs':frs}\r\n\r\n return(render(request,'home.html',context))\r\n\r\n\r\n\r\n\r\ndef LoginPage(request):\r\n if(request.user.is_authenticated):\r\n return redirect('home')\r\n if(request.method==\"POST\"):\r\n username=request.POST.get('username')\r\n password=request.POST.get('password')\r\n user=authenticate(request,username=username,password=password)\r\n if user is not None:\r\n login(request,user)\r\n return redirect('home')\r\n return(render(request,'Login.html'))\r\n\r\n\r\n@login_required(login_url='login')\r\ndef SendFrReq(request):\r\n frf = FriendRequestForm()\r\n msg=''\r\n if (request.method == \"POST\"):\r\n fr = FriendRequestForm(request.POST)\r\n if(fr.is_valid()):\r\n freq=fr.save(commit=False)\r\n freq.sender=request.user\r\n freq.save()\r\n msg=\"Friend Request Sent\"\r\n fr.sender = request.user\r\n\r\n return (render(request, 'SendFriendReq.html', {'friendRF': frf,'msg':msg}))\r\n\r\n\r\n\r\ndef RegisterPage(request):\r\n if(request.user.is_authenticated):\r\n return redirect('home')\r\n if(request.method==\"POST\"):\r\n form=UserCreateForm(request.POST)\r\n profile_form=UserProfileForm(request.POST)\r\n\r\n if(form.is_valid() and profile_form.is_valid()):\r\n user=form.save()\r\n profile=profile_form.save(commit=False)\r\n profile.user=user\r\n profile.save()\r\n return redirect('login')\r\n\r\n form = UserCreateForm()\r\n user_profile_form=UserProfileForm()\r\n return(render(request,'Register.html',{'form':form,'userprofile':user_profile_form}))\r\n\r\n@login_required(login_url='login')\r\ndef logoutPage(request):\r\n logout(request)\r\n return redirect('login')\r\n\r\n\r\ndef SearchChat(request):\r\n\r\n return(render(request,'ChatSearch.html'))\r\n\r\n@login_required(login_url='login')\r\ndef AcceptFR(request,username):\r\n usr=User.objects.get(username=username)\r\n frr=FriendRequest.objects.get(sender=usr,receiver=request.user)\r\n frr.accept()\r\n frr.save()\r\n return redirect('SendFR')\r\n","repo_name":"Murgowt/WeConnect","sub_path":"Backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"42511747114","text":"input_string = 'dog goat dad duck doodle never'\ninput_string2 = 'racecar'\ninput_string3 = 'A Toyota! Race fast, safe car!'\n\ndef palindrome_search(input_str):\n result = list()\n result = palindromes(input_str, result)\n input_str = input_str.replace(' ', '').lower().replace(',', '').replace('!', '')\n result = palindromes(input_str, result)\n result = sorted(set(result))\n return result\n\ndef palindromes(string, search_result):\n length = 3\n while length <= len(string):\n for i in range(0, len(string) - 1, 1):\n x = string[i:i + length:1]\n if x == x[:: -1]:\n search_result.append(x)\n length += 1\n return search_result\n\n\n\n\nprint(palindrome_search(input_string))\nprint(palindrome_search(input_string2))\nprint(palindrome_search(input_string3))\n","repo_name":"green-fox-academy/FarkasLaszlo","sub_path":"week-02/day-05/03 Palindrome searcher.py","file_name":"03 Palindrome searcher.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71725328146","text":"\"\"\"\n给定一个数组 A[0,1,…,n-1],请构建一个数组 B[0,1,…,n-1],\n其中 B 中的元素 B[i]=A[0]×A[1]×…×A[i-1]×A[i+1]×…×A[n-1]。不能使用除法。\n\n\n示例:\n输入: [1,2,3,4,5]\n输出: [120,60,40,30,24]\n\"\"\"\n\n\nclass Solution(object):\n def constructArr(self, a):\n \"\"\"\n :type a: List[int]\n :rtype: List[int]\n \"\"\"\n\n length = len(a)\n b = [1 for _ in range(length)]\n\n for i in range(1, length):\n b[i] = b[i - 1] * a[i - 1]\n\n temp = 1\n # 因为对于我们来说,最开始的话,是要从最边缘既是a[i+1]开始的\n # b[i] a[i+1], i+1 = length-1\n for i in range(length - 2, -1, -1):\n temp *= a[i + 1]\n b[i] *= temp\n\n return b","repo_name":"Andrewlearning/Leetcoding","sub_path":"剑指offer/面试题66. 构建乘积数组(倒三角相乘238).py","file_name":"面试题66. 构建乘积数组(倒三角相乘238).py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71816851666","text":"limitToResultsContaining = 'NC'\n\nimport rhinoscriptsyntax as rs\nfhand = open('brickstacking/data analysis/yelp_academic_dataset_business.json')\nxCoords = []\nyCoords = []\npoints = []\n\nfor line in fhand:\n if limitToResultsContaining in line:\n line.strip()\n pos = line.find('latitude')\n pos2 = line.find(',',pos)\n pos3 = line.find('longitude')\n pos4 = line.find(',',pos3)\n xCoords.append(float(line[pos+10:pos2]))\n yCoords.append(float(line[pos3+11:pos4]))\n\nfor i in range(len(xCoords)):\n points.append(rs.AddPoint(xCoords[i],yCoords[i],0))\n\nfor i in range(len(points)):\n\tclosest = rs.PointArrayClosestPoint(points,points[i])\n\tdistance = rs.VectorLength(rs.VectorCreate(closest,points[i]))\n\t'''if distance > 1 :\n\t\tpoints.pop(i)'''","repo_name":"rachelalutes/ideas_seminar","sub_path":"brickstacking/data_analysis/Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34796896969","text":"\n\n\nclass FunctionTableEntry:\n def __init__(self,id ,name, type, scope = None, belongsTo = None):\n self.id = id\n self.name = name\n self.type = type\n self.scope = scope\n self.belongsTo = belongsTo\n def __str__(self):\n return '{0} {1} {2} {3} {4}'.format(self.id,self.name, self.type, self.scope, self.belongsTo)\n\nclass FunctionTable:\n def __init__(self):\n self.entries = []\n abort = FunctionTableEntry(1,\"abort\",\"Object\", 1,\"Object\")\n type_name= FunctionTableEntry(2,\"type_name\",\"String\", 1,\"Object\")\n copy = FunctionTableEntry(3,\"copy\",\"OBJECT\", 1,\"Object\")\n out_string = FunctionTableEntry(4,\"out_string\",\"IO\", 1,\"IO\")\n out_int = FunctionTableEntry(5,\"out_int\",\"IO\", 1,\"IO\")\n in_string = FunctionTableEntry(6,\"in_string\",\"String\", 1,\"IO\")\n in_int = FunctionTableEntry(7,\"in_int\",\"Int\", 1,\"IO\")\n length = FunctionTableEntry(8,\"length\",\"Int\", 1,\"String\")\n concat = FunctionTableEntry(9,\"concat\",\"String\", 1,\"String\")\n substr = FunctionTableEntry(10,\"substr\",\"String\", 1,\"String\")\n self.entries.append(abort)\n self.entries.append(type_name)\n self.entries.append(copy)\n self.entries.append(out_string)\n self.entries.append(out_int)\n self.entries.append(in_string)\n self.entries.append(in_int)\n self.entries.append(length)\n self.entries.append(concat)\n self.entries.append(substr)\n\n def addEntry(self, FunctionTableEntry):\n if self.findEntryByName(FunctionTableEntry.name, FunctionTableEntry.belongsTo) is None:\n self.entries.append(FunctionTableEntry)\n return True\n else:\n return False\n\n def findEntryByName(self, name, belongsTo):\n for entry in self.entries:\n if entry.name == name and entry.belongsTo == belongsTo:\n return entry \n return None\n \n def findEntryByID(self, id):\n for entry in self.entries:\n if entry.id == id:\n return entry\n return None\n\n","repo_name":"michelebenvenuto/compiladores2022","sub_path":"tables/FunctionTable.py","file_name":"FunctionTable.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11633500592","text":"import math\nimport threading\nfrom typing import List, Optional\n\nfrom consts import *\nfrom messaging import send_message, Message, MessageType\nfrom utils import create_timeout, log\nimport copy\n\n\nclass Cluster:\n def __init__(self, ip_network: str, ip_offset: int, nodes_count: int, current_node: int):\n self.ip_network = ip_network\n self.ip_offset = ip_offset\n self.nodes_count = nodes_count\n self.current_node = current_node\n\n self.leader = None\n\n self.ips = [self.ip_network + \".\" + str(self.ip_offset + i) for i in range(self.nodes_count)]\n self.colors: List[Optional[str]] = [None for i in range(self.nodes_count)]\n self.alive = [False for i in range(self.nodes_count)]\n\n self.color_nodes_timer = None\n self.colors_check_timer: threading.Timer = create_timeout(COLORS_CHECK_INTERVAL, self.color_check)\n\n def get_higher_ids(self, higher_than) -> List[int]:\n return [i for i in range(higher_than + 1, self.nodes_count)]\n\n def get_lower_ids(self, lower_than) -> List[int]:\n return [i for i in range(0, lower_than)]\n\n def convert_to_ips(self, ids: List[int]):\n return [self.ips[i] for i in ids]\n\n def leader_changed(self, new_leader):\n self.leader = new_leader\n self.color_check()\n\n def color_check(self):\n self.colors_check_timer: threading.Timer = create_timeout(COLORS_CHECK_INTERVAL, self.color_check)\n\n if self.leader == self.current_node:\n self.check_alive()\n self.color_nodes_timer = create_timeout(ALIVE_TIMEOUT, self.color_nodes) # check for check alive to finish\n\n def check_alive(self):\n for i in range(self.nodes_count):\n self.alive[i] = False\n send_message(self.ips[i], Message(MessageType.PING, self.current_node, \"\"))\n\n def pong_received(self, node_id):\n self.alive[node_id] = True\n\n def color_nodes(self):\n if self.leader is None:\n return\n\n changed = self.__determine_coloring()\n\n if changed:\n for i in range(self.nodes_count):\n send_message(self.ips[i], Message(MessageType.COLOR, self.current_node, self.colors[i]))\n\n def __determine_coloring(self):\n total_live = self.alive.count(True)\n log(\"alive: \" + str(self.alive) + \" (count: \" + str(total_live) + \")\")\n\n green_nodes_left = math.ceil(total_live * GREEN_COLOR_REQUIRED)\n\n new_colors = [None for i in range(self.nodes_count)]\n\n new_colors[self.leader] = \"GREEN\" # leader always green\n green_nodes_left -= 1\n\n for i in range(self.nodes_count):\n if i == self.leader:\n continue\n\n if self.alive[i]:\n if i < green_nodes_left:\n new_colors[i] = \"GREEN\"\n green_nodes_left -= 1\n else:\n new_colors[i] = \"RED\"\n else:\n new_colors[i] = \"-\"\n\n if self.colors == new_colors:\n log(\"coloring ok\")\n log(str(self.colors))\n return False\n else:\n self.colors = new_colors\n log(\"new colors assigned\")\n log(str(self.colors))\n return True\n","repo_name":"lukasvlc3k/ds-01","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33124948930","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\nimport os\nimport math\nimport numpy as np\n\nclass Drawing:\n\n def __init__(self):\n return None\n\n def Baseline(self):\n for i in range(200):\n glLineWidth(2.0)\n glBegin(GL_LINES)\n if i==100:\n glColor3f(1,1,0) # x축\n glVertex3fv([1,0,0])\n glVertex3fv([0,0,0])\n glColor3f(0,1,0) # z축\n glVertex3fv([0,0,1])\n glVertex3fv([0,0,0])\n \n glColor3f(0.5,0.5,0.5)\n glVertex3fv([100,0,0])\n glVertex3fv([1,0,0])\n glVertex3fv([0,0,0])\n glVertex3fv([-100,0,0])\n glVertex3fv([0,0,100])\n glVertex3fv([0,0,1])\n glVertex3fv([0,0,0])\n glVertex3fv([0,0,-100])\n else :\n glColor3f(0.5,0.5,0.5)\n glVertex3fv([-100+i,0,-100])\n glVertex3fv([-100+i,0,100])\n glVertex3fv([100,0,-100+i])\n glVertex3fv([-100,0,-100+i])\n glEnd()\n\n glLineWidth(2.0)\n glBegin(GL_LINES) \n glColor3f(0,0,1)\n glVertex3fv([0,1,0])\n glVertex3fv([0,0,0])\n glEnd()\n\n\n def Box(self,degree):\n glutSolidCube(1.0)\n\n def Sphere(self,degree):\n glutSolidSphere(degree,30,30)\n\n def Draw_Skeleton(self,Pos,Euler,Scale,Name):\n glPushMatrix()\n glTranslatef(Pos[0],Pos[1],Pos[2])\n glMultMatrixf(Euler.T)\n glScalef(Scale[0],Scale[1],Scale[2])\n\n if Name == \"Ground\":\n glColor3f(0.1,0.1,0.0)\n self.Box(1.0)\n\n else :\n glColor3f(0.0,0.5,0.2)\n self.Sphere(1.0)\n glPopMatrix()\n glFlush()\n","repo_name":"Winteradio/JJOL_JAK","sub_path":"Drawing.py","file_name":"Drawing.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"43978725022","text":"#Step 1. Welcome the user\r\nprint('welcome to the multiplication table')\r\n#2)Step 2. Print out instructions\r\nprint('please enter the number you want to view its multiplication table')\r\n#3)Step 3. Ask the user to enter a value\r\nvalue= int(input('please enter a number:'))\r\n#4)Step 4. Print out the multiplication table\r\nfor i in range(1,13,1):\r\n print(value, '*', i, '=', value * i)\r\n#5)Step 5. Stop\r\nprint('stop')\r\n\r\nupper_limit= int(input('please input an upper limit:'))\r\nnumber_list = []\r\nprime_number_list = []\r\nupper_limit = upper_limit +1\r\nfor i in range(2, upper_limit, 1):\r\n number_list.append(i)\r\n print(len(number_list))\r\n\r\nprime_number= 2\r\n\r\nwhile(len(number_list)>0):\r\n for i in number_list:\r\n if(i % prime_number == 0):\r\n number_list.remove(i)\r\n print(len(number_list))\r\n prime_number_list.append(prime_number)\r\n prime_number = number_list.pop(0)\r\nprime_number_list.append(prime_number)\r\nprint(prime_number_list)\r\n\r\n \r\n","repo_name":"abdullateef28/c_l_project_shoyinka_lateef","sub_path":"codemultable.py","file_name":"codemultable.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34586487862","text":"#- Készítsen magyarországi mobilszám ellenőrző fv-t:\n#\t- Lehetséges mobil formátumok: \"06 20 123-45 67\", \"+36 301234567\", van más ??\n#\t- \"körzetszámok\": 20 30 70 50 31 ?\n\n\ndef mobilszamellenorzes(szam: str):\n szam = szam.replace(' ','')\n szam = szam.replace(' -','')\n if szam[0]=='+':\n szam = szam.replace('+ ','00')\n if szam[0:4] =='0036':\n szam=szam.replace('0036', '06')\n if szam[0:2] != '06':\n return False\n if szam[2:4] != '20'and szam[2:4] !='30' and szam[2:4] !='70' and szam[2:4] !='50':\n return False\n if len(szam) !=11:\n return False\n return True\n\n\nszam= input('Adja meg a telefonszámot:')\nif mobilszamellenorzes(szam) == True:\n print('A mobiltelefon szám helyes')\nelse:\n print('A mobiltelefon szám helyes ')","repo_name":"fabrykevin/agazati","sub_path":"agazatialapvizsgagyujt/python + web kész feladatok + leírás (saját)/Python/20230215/mobil.py","file_name":"mobil.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"16804024904","text":"import os, sys\nimport numpy as np \n\ndef parse_args_from_config(config_path):\n\n import importlib.util\n \n spec = importlib.util.spec_from_file_location(\"get_hpyerparameters\", config_path )\n\n modulevar = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(modulevar)\n\n return modulevar\n\n\ndef tensor_rgb2bgr(image):\n permute = [2, 1, 0]\n return image[:, permute]\n\n\ndef print_dataset_dist(sample_dict, prefix):\n WARNING = '\\033[93m'\n ENDC = '\\033[0m'\n\n num_total_samples = 0\n\n for k, v in sample_dict.items():\n num_total_samples += v\n \n class_indices = list(sample_dict.keys())\n class_indices.sort()\n\n thres = 100 / len(sample_dict.keys())\n\n print('='*5, prefix, str(num_total_samples), '='*5)\n\n for class_idx in class_indices:\n k = class_idx\n v = sample_dict[class_idx]\n ratio = (v/num_total_samples) * 100\n message = '{} : {} ({:.4f}%)'.format(k, v, ratio)\n\n if ratio < thres:\n color = WARNING\n else:\n color = ENDC\n\n print ('{}{}{}'.format(color, message, ENDC))\n print ('') \n\ndef tensor2numpy(tensor_image):\n return tensor_image.cpu().numpy().astype(np.float32).transpose(1,2,0)","repo_name":"jeonggyu-kang/age_prediction","sub_path":"utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14268360968","text":"import discord, random\nfrom discord.ext import commands\nimport os\nimport getpass\nos.system('cls')\nprint(r'''\n .-._ _,-,\n `._`-._ _,-'_,'\n `._ `-._ _,-' _,'\n `._ `-._ __.-----.__ _,-' _,'\n `._ `#===\"\"\" \"\"\"===#' _,'\n `._/) ._ _. (\\_,'\n )*' **.__ __.** '*( \n # .==..__ \"\" \"\" __..==, # \nxo#1010 # `\"._(_). .(_)_.\"' # EV1L IN5IDE\nogu: nemo discord: xo#1010\nig: w3ax github: scxr''')\nTOKEN = getpass.getpass('Enter your token here : ')\nprefix = '!'\nbot = commands.Bot(command_prefix=prefix, self_bot=True)\n\n\n@bot.event\nasync def on_ready():\n print(\"Bot presence t u r n e d on ( ͡° ͜ʖ ͡°)\")\n\n\n@bot.command()\nasync def embed(ctx, *, message):\n message_arr = message.split('\\n')\n if len(message_arr) < 2:\n print('''[ERROR] Your message format should be the following:\\n\n !embed\n title here (required)\n description here (required)\n embed_thumbnail (optional)''')\n return\n title = message_arr[0]\n thumbnail_url = None\n description = message_arr[1]\n\n if len(message_arr) == 3:\n thumbnail_url = message_arr[2]\n\n embed = discord.Embed(title=title, description=description, colour=random.randint(0, 0xFFFFFF))\n if thumbnail_url != None:\n embed.set_thumbnail(url=thumbnail_url)\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\n\n\nbot.run(TOKEN, bot=False)","repo_name":"scxr/selfbots","sub_path":"message_embedder.py","file_name":"message_embedder.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1857386914","text":"#doesn't work\nfile = open(\"taming.in\", \"r\")\noutput = open(\"taming.out\", \"w\")\nn=int(file.readline())\nbreakdays=file.readline()\nfile.close()\nbreakdays = list(breakdays.split(\" \"))\nif int(breakdays[0]) > 0:\n output.write('-1')\n\nbreakdays[0]='0'\nt=-1\nreq=0\npos=0\nj=n\nfor i in range(0,n):\n\n j-=1\n #if t != -1 and breakdays[j] != -1 and breakdays[j] #!= t:\n #output.write('-1')\n if(t == -1):\n\t t = int(breakdays[j])\n\t\n if(int(breakdays[j]) == -1):\n\t breakdays[i] = t\n\t\n if(int(breakdays[j]) == 0):\n\t req+=1\n\t\n if(int(breakdays[j]) == -1):\n\t pos+=1\n\t\n if(t > -1):\n\t t-=1\nans=str(req)+' '+str(req+pos)\n\noutput.write(ans)\noutput.close()\n","repo_name":"funnoodle11/USACO","sub_path":"2018FebBronze/taming.py","file_name":"taming.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"42828037166","text":"#https://www.w3resource.com/python-exercises/python-basic-exercises.php\r\n#22. Write a Python program to count the number 4 in a given list.\r\n\r\nlist = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]\r\ncount = 0\r\n\r\nfor x in list:\r\n\tif x == 4:\r\n\t\tcount = count + 1\r\n\t\t\r\nprint(\"Found \" + str(count) + \" instances of 4.\")","repo_name":"benryan03/Python-Practice","sub_path":"basic-part1-exercise022-count.py","file_name":"basic-part1-exercise022-count.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9913942442","text":"from marshmallow import Schema, fields\nfrom marshmallow import ValidationError\n\nimport typing as t\nimport json\n\n\nclass InvalidInputErrot(Exception):\n \"\"\"Invalid model input.\"\"\"\n\n\n# List of column names to change before validation\nSYNTAX_ERROR_FIELD_MAP = {}\n\n\nclass TitanicDataRequestSchema(Schema):\n pclass = fields.Integer()\n sex = fields.Str()\n age = fields.Float(allow_none=True)\n sibsp = fields.Integer(allow_none=True)\n parch = fields.Integer(allow_none=True)\n fare = fields.Float(allow_none=True)\n cabin = fields.Str(allow_none=True)\n embarked = fields.Str(allow_none=True)\n title = fields.Str(allow_none=True)\n\n\ndef _filter_error_rows(errors: dict,\n validated_input: t.List[dict]) -> t.List[dict]:\n \"\"\"Remove input data rows with errors.\"\"\"\n\n indexes = errors.keys()\n # Delete them in reverse order to don't\n # throw off the subsequent indexes\n for index in sorted(indexes, reverse=True):\n del validated_input[index]\n\n return validated_input\n\n\ndef validate_inputs(input_data):\n \"\"\"Check prediction inputs against schema.\"\"\"\n\n # set many=True to allow passing in a list\n schema = TitanicDataRequestSchema(strict=True, many=True)\n\n # Convert syntax error field names (beginning with numbers)\n for dict in input_data:\n for key, value in SYNTAX_ERROR_FIELD_MAP.items():\n dict[value] = dict[key]\n del dict[key]\n\n errors = None\n try:\n schema.load(json.loads(input_data))\n except ValidationError as exc:\n errors = exc.messages\n print(f\"ERROR MSG: {exc.messages}\")\n print(f\"ERROR DATA: {exc.data}\")\n print(f\"ERROR FIELDS: {exc.fields}\")\n\n # convert syntax error field names back\n # NOTE: Never name your data fields with\n # numbers as the first letter\n for dict in input_data:\n for key, value in SYNTAX_ERROR_FIELD_MAP.items():\n dict[key] = dict[value]\n del dict[value]\n\n if errors:\n validated_input = _filter_error_rows(\n errors=errors, validated_input=input_data\n )\n else:\n validated_input = input_data\n\n return validated_input, errors\n","repo_name":"JCupe17/deploying-ml-test","sub_path":"packages/ml_api/api/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2848635925","text":"#!/usr/bin/python\n\nimport sys\nimport superclass\n\nclass PrettyPrint( superclass.MetaPrettyPrinter ):\n\n NAME = 'swapon-pp'\n DESCRIPTION=\"\"\"Show swap areas in canonical style.\"\"\"\n\n def __init__( self ):\n super( PrettyPrint, self ).__init__()\n return\n\n def pre_begin_file( self, name = None ):\n self.titles = []\n self.areas = dict()\n self.widths = dict()\n return\n\n def next_line( self, line ):\n tokens = line.split()\n L = len( tokens )\n if L > 0:\n if not self.titles:\n self.titles = tokens\n self.widths = map(\n len,\n self.titles\n )\n elif L == len(self.titles):\n mountpoint = tokens[0]\n self.areas[ mountpoint ] = tokens\n self.widths = map(\n lambda i : max(\n self.widths[i],\n len( tokens[i] )\n ),\n range( L )\n )\n return\n\n def report( self, final = False ):\n if final:\n pass\n elif len( self.areas ) > 0:\n N = len( self.widths )\n fmts = map(\n '{{0:<{0}}}'.format( self.widths[ i ] ),\n range( N )\n )\n titles = map(\n fmts[i].format( self.titles[i] ),\n range( N )\n )\n self.println()\n self.println( ' '.join( titles ) )\n for mountpoint in sorted(\n self.areas,\n # Sort by name within priority\n key = lambda f : '{0:d} {1}'.format( f[4], f[0] )\n ):\n tokens = self.areas[ mountpoint ]\n columns = map(\n fmts[i].format( tokens[i] ),\n range( len( tokens ) )\n )\n self.println( ' '.join( columns ) )\n pass\n return\n","repo_name":"megacoder/generic-prettyprinter","sub_path":"genpp/swapon-plugin.py","file_name":"swapon-plugin.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"72981572305","text":"from common import raw\n\nimport collections\n\n\nages = collections.deque([0] * 9)\n\nfor age in raw.split(','):\n ages[int(age)] += 1\n\nfor i in range(80):\n ages.rotate(-1) # that's a rotate!\n ages[6] += ages[8]\n\nprint('Part 1:', sum(ages))\n\nfor i in range(256 - 80):\n ages.rotate(-1)\n ages[6] += ages[8]\n\nprint('Part 2:', sum(ages))\n","repo_name":"avayert/aoc2021","sub_path":"src/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"18139392015","text":"import os \nimport time\n\nfrom numpy import record\n\nimport gui_related\nfrom num_to_record import Recorder\nfrom screen_capture import Screen_Capture\nimport utilities\nfrom visual_processing import Visual_Process\nfrom config_reader import Config_Reader\nfrom gui_related import GUI_functions\nimport pyautogui\n\nclass Execute:\n def __init__(self):\n \"\"\"\n process to create required folders.\n call utilities for that.\n \"\"\"\n utils = utilities.Utilities()\n self.config = Config_Reader().get_config()\n utils.create_folder(\"captured_pics\")\n utils.create_folder(\"pic_to_extract\")\n utils.create_folder(\"buy_sell_particles\")\n self.csv_path = self.config[\"path\"][\"csv_path\"]\n\n def main(self):\n recorder = Recorder()\n screen_capture = Screen_Capture()\n picture_save_path = self.config[\"path\"][\"captured_pics\"]\n path_data_picture = os.path.join(\"pic_to_extract\",\"data.png\")\n screen_capture.capture(picture_save_path)\n GUI_func = GUI_functions()\n sell_image_path = os.path.join(\"buy_sell_particles\",\"sell.png\")\n buy_image_path = os.path.join(\"buy_sell_particles\",\"buy.png\")\n coordinates_sell = GUI_func.get_coordinates(sell_image_path)\n coordinates_buy = GUI_func.get_coordinates(buy_image_path)\n cropped_buy = screen_capture.crop_captured(coordinates_buy)\n cropped_sell = screen_capture.crop_captured(coordinates_sell)\n visual_process = Visual_Process()\n screen_capture.save_image(cropped_sell,path_data_picture)\n result1 = visual_process.read_pic_from_path(path_data_picture) \n price = visual_process.easyocr_result_interpreter(result1)\n print(price)\n #visual_process.easyocr_result_interpreter(result2)\n recorder.record(price,self.csv_path)\n\n\n def test_main(self):\n test_image_path_sell = os.path.join(\"test_pics\",\"sell.png\")\n test_image_path_buy = os.path.join(\"test_pics\",\"buy.png\")\n utils = utilities.Utilities()\n utils.create_folder(\"captured_pics\")\n config = Config_Reader().get_config()\n sell_image_path = os.path.join(\"buy_sell_particles\",\"sell.png\")\n buy_image_path = os.path.join(\"buy_sell_particles\",\"buy.png\")\n picture_save_path = config[\"path\"][\"captured_pics\"]\n print(picture_save_path)\n print(type(picture_save_path))\n Screen_Capture().capture(picture_save_path)\n GUI_func = GUI_functions()\n coordinates_sell = GUI_func.get_coordinates(sell_image_path)\n coordinates_buy = GUI_func.get_coordinates(buy_image_path)\n print(coordinates_buy)\n print(coordinates_buy[0],coordinates_buy[1],\"coords xy\")\n print(coordinates_sell)\n print(type(coordinates_buy))\n visual_process = Visual_Process()\n result1 = visual_process.read_pic(test_image_path_buy)\n result2 = visual_process.read_pic(test_image_path_sell)\n visual_process.easyocr_result_interpreter(result1)\n visual_process.easyocr_result_interpreter(result2)\n cropped_buy = Screen_Capture().crop_captured(coordinates_buy)\n cropped_sell = Screen_Capture().crop_captured(coordinates_sell)\n\n \"\"\"\n GUI_func.click_pic(sell_image_path)\n time.sleep(3)\n GUI_func.click_pic(buy_image_path)\n \"\"\"\n\n\nif __name__ == \"__main__\":\n executer = Execute()\n while True:\n executer.main()\n time.sleep(60)","repo_name":"LulutasoAI/financial_data_gathering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"37528737230","text":"import cv2\r\ndef split_frames(filename, output_str):\r\n capture = cv2.VideoCapture(filename)\r\n i = 0\r\n while(capture.isOpened()):\r\n ret, frame = capture.read() \r\n if ret == False:\r\n break\r\n if i == 20:\r\n break \r\n cv2.imwrite(output_str.format(i),frame)\r\n i += 1\r\n capture.release()\r\nsplit_frames('xilogravura.mp4', 'frame-{}.jpg')\r\nsrc = cv2.imread('frame-{}.jpg', cv2.IMREAD_UNCHANGED)\r\n#percent by which the image is resized\r\nscale_percent = 50\r\n#calculate the 50 percent of original dimensions\r\nwidth = int(src.shape[1] * scale_percent / 100)\r\nheight = int(src.shape[0] * scale_percent / 100)\r\n# dsize\r\ndsize = (width, height)\r\n# resize image\r\noutput = cv2.resize(src, dsize)\r\ncv2.imwrite('D:/cv2-resize-image-50.png',output) \r\ncv2.imwrite('c:/ArteMaisComp/novo_frame={}.jpg',output) \r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n","repo_name":"KinsleyDavis/ArtMaisComp","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"37086377493","text":"import re\nfrom datetime import datetime\n\n# Чтение исходного файла\nwith open('dates.txt', 'r') as file:\n text = file.read()\n\n# Поиск дат в формате ДД.ММ.ГГГГ �� ДД/ММ/ГГГГ\npattern1 = r'\\b\\d{2}\\.\\d{2}\\.\\d{4}\\b'\npattern2 = r'\\b\\d{2}/\\d{2}/\\d{4}\\b'\ndates1 = re.findall(pattern1, text)\ndates2 = re.findall(pattern2, text)\n\n# Подсчет количества дат в каждом формате\ncount1 = len(dates1)\ncount2 = len(dates2)\n\n# Найти даты февраля в формате ДД/ММ/ГГГГ\nfebruary_dates = []\nfor date in dates2:\n try:\n parsed_date = datetime.strptime(date, '%d/%m/%Y')\n if parsed_date.month == 2:\n february_dates.append(date)\n except ValueError:\n pass\n\n# Запись дат февраля в формате ДД/ММ/ГГГГ в новый файл\nwith open('february_dates.txt', 'w') as file:\n file.write('\\n'.join(february_dates))\n\n# Вывод результатов\nprint(f\"Количество дат в формате ДД.ММ.ГГГГ: {count1}\")\nprint(f\"Количество дат в формате ДД/ММ/ГГГГ: {count2}\")\nprint(\"Даты февраля в формате ДД/ММ/ГГГГ сохранены в файл 'february_dates.txt'.\")\n","repo_name":"Caucasus1/Proi_1sem_Sungurov","sub_path":"pz_14/pz_14.py","file_name":"pz_14.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"42130560517","text":"#import python libraries\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport astropy.wcs as wcs\nimport os \nimport time\n#from astropy.nddata import Cutout2D\nfrom scipy import ndimage\nimport astropy.constants as K\nimport astropy.units as u\nfrom astropy.cosmology import Planck15 as p15\nimport scipy.ndimage\nfrom lmfit import minimize, Parameters, report_fit\nfrom heapq import nlargest\nimport concurrent.futures\n\n\npath = os.path.dirname(os.path.abspath('__file__'))\n\n\nfilefits_data = 'NGC6810_crop.fits'\nfilefits_antenna = 'NGC6810_antenna.fits'\ndatacube = fits.open(path+'/file/'+filefits_data)[0]\ndatacube_antenna = fits.open(path+'/file/'+filefits_antenna)[0]\ndatacube.data = np.squeeze(datacube.data)\ndatacube_antenna.data = np.squeeze(datacube_antenna.data)\nNz,Ny,Nx = datacube.shape\nprint (Nz, Ny, Nx)\n\n\n#define the z-axis which corresponds to frequency\nnaxis3 = datacube.header['NAXIS3']\ncrpix3 = datacube.header['CRPIX3']\ncrval3 = datacube.header['CRVAL3']\ncdelt3 = datacube.header['CDELT3']\n\nkk = 1+np.arange(naxis3)\n \nfrequency = crval3+cdelt3*(kk-crpix3) #Hz\nfrequency /= 1e9 #GHz\n\nprint(frequency[:10])\n\n\n#define the z-axis in velocity units \n#average frequency\nfrequency_mean = np.mean(frequency)*u.GHz\nprint(frequency_mean)\n\n\n\n\n#z = v/c = (nu_emit - nu_obs)/nu_obs \nvelocity_unit = ((frequency_mean- (frequency*u.GHz))/(frequency*u.GHz))*K.c.to('km/s')\nprint(velocity_unit[:10])\nvelocity = velocity_unit.value\nprint(velocity[:10])\ndv = velocity[0]-velocity[1]\n\n#location of the target\nx0,y0 = 250, 250\n#size of the square aperture \ndl = 100\n#extract the spectrum\n#total spectrum\nspectrum = np.nansum(datacube.data[:,y0-dl:y0+dl,x0-dl:x0+dl],axis = (1,2))\n#1plot: frequency - spectrum\n\n\n\n\n\n## RMS DETERMINATION WITH THE POWER RESPONSE \n\n#data/power response\nnoise_cube = datacube.data / datacube_antenna.data\n\n#Choosing an empty region\nx0, y0 = 294, 143\ndl = 20\nnoise = noise_cube[:,y0-dl:y0+dl,x0-dl:x0+dl]\nerror = np.std(noise[1:,:,:])\n\nprint(\"rms = {:2f} mJy\".format(error))\nprint(\"####################\")\n\n\n\n\n\n\n\n## Multi-gaussians model\ndef residual(pars, x, p, data=None, sigma=None):\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n\n\n if p == 1:\n model = pars['amp_g1'] * np.exp(-argu1) \n if p == 2:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2'] * np.exp(-argu2))\n if p == 3:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3'] * np.exp(-argu3))\n if p == 4:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4))\n if p == 5:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n argu5 = (x - pars['cen_g5'])**2 / (2*(pars['wid_g5'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4) + pars['amp_g5']*np.exp(-argu5))\n \n if data is None:\n return model\n if sigma is None:\n return model - data\n return (model - data) / sigma\n\nx = velocity\ndata = spectrum\n\n \n\n##Making the Spiral grid for fitting\ndef invers_spiral(A):\n return A[::-1] #inverting the array, so it starts from the center \n\ndef spiral_mat_to_vect(A):\n v = []\n while(A.size != 0):\n v.append(A[0,:])\n A = A[1:,:].T[::-1]\n return np.concatenate(v)\n\ndef spiral_vect_to_mat(v):\n L = int(np.sqrt(v.size)) # lunghezza del pezzo da aggiungere\n l = L\n A = np.zeros((L,L))\n i = 3 # parto da 3 per fare in modo che la coordinata x aumenti al secondo step\n x = 0 # coordinata x del nuovo pezzo\n y = 0 # coordinata y del nuovo pezzo\n \n A[x,y:l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n\n while(v.size != 0):\n i += 1 # Ad ogni step ruoto e riempio la prima riga della matrice\n if i % 2 == 0: # Ogni due rotazioni si accorcia la lunghezza l\n l -= 1\n if (i + 1) % 4 == 0: # Ogni 4 rotazioni x aumenta\n x += 1\n if i % 4 == 0: # Ogni 4 rotazioni y aumenta con un ritardo di 1 step rispetto a x\n y += 1\n A[x,y:y+l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n \n for rotations in range(i % 4): # Faccio le rotazioni che mancano per rimettere la matrice nel verso giusto\n A = A.T[::-1]\n \n return A\n\n \n#Generating moments map\n\n# datacube.data = np.where(datacube.data<0, 0*datacube.data, datacube.data)\nmask_cube = np.where(datacube.data > 3*error, datacube.data, np.nan)\nM0 = np.nansum(datacube.data, axis = (0))*dv\n\nM1 = np.nansum(datacube.data[:,:,:]*velocity[:,np.newaxis,np.newaxis], axis=0)*dv / M0\nthr = 3*error\nM0[np.where(M0 3*error): #fitspiral1\n if flux_map_tmp[jj,ii]>5*error:\n spec_tmp = datacube.data[:,jj,ii]\n spec_tmp = np.nan_to_num(spec_tmp)\n spec_tmp[0]=0\n \n ##FIT CON 1 GAUSSIANA\n if jj > 1.14777 * ii + 20: #the pixel above the galaxy diagonal\n velmax = 50 #limit to select the blueshifted pixel\n velmin = -300\n else:\n velmax = 300\n velmin=-50\n \n fit_params1gx.add('cen_g1', value=M1[jj,ii], min = velmin, max= velmax)\n fit_params1gx.add('wid_g1', value=M2[jj,ii], min = 10, max = 300)\n fit_params2gx.add('cen_g1', value=M1[jj,ii], min = velmin, max= velmax)\n \n\n tmp_res1 = executor.submit(compute1, residual, fit_params1g, x, 1, spec_tmp, error)\n tmp_res2 = executor.submit(compute2, residual, fit_params2g, x, 2, spec_tmp, error)\n tmp_res1x = executor.submit(compute1x, residual, fit_params1gx, x, 1, spec_tmp, error)\n tmp_res2x = executor.submit(compute2x, residual, fit_params2gx, x, 2, spec_tmp, error)\n out_1g, fit1, bic_1g = tmp_res1.result()\n out_2g, fit2, bic_2g = tmp_res2.result()\n out_1gx, fit1x, bic_1gx = tmp_res1x.result()\n out_2gx, fit2x, bic_2gx = tmp_res2x.result()\n \"\"\"\n out1, fit1 = compute(residual, fit_params1g, x, 1, spec_tmp, error)\n out2, fit2 = compute(residual, fit_params2g, x, 2, spec_tmp, error)\n out1x, fit1x = compute(residual, fit_params1gx, x, 1, spec_tmp, error)\n out2x, fit2x = compute(residual, fit_params2gx, x, 2, spec_tmp, error)\n \"\"\"\n \n mod1[:,jj,ii] = fit1\n fit_params1g.add('amp_g1', value=out_1g[0], min = 0.0025, max= 0.1)\n fit_params1g.add('cen_g1', value=out_1g[1], min = velmin, max= velmax)\n fit_params1g.add('wid_g1', value=out_1g[2], min = 10, max = 300)\n\n mod2[:,jj,ii] = fit2\n \n fit_params2g.add('amp_g1', value=out_2g[0], min = 0.0025, max= 0.1)\n fit_params2g.add('cen_g1', value=out_2g[1], min = velmin, max= velmax)\n fit_params2g.add('wid_g1', value=out_2g[2], min = 10, max = 200)\n fit_params2g.add('amp_g2' , value=out_2g[3], min= 0.0025, max= 0.1)\n fit_params2g.add(name=('cen_g2'), expr='peak_split+cen_g1')\n fit_params2g.add('wid_g2', value=out_2g[5], min =10, max= 200)\n\n \n ##FIT CON 1 GAUSSIANA con velocità iniziale data dal momento 1\n \n mod1x[:,jj,ii] = fit1x\n fit_params1gx.add('amp_g1', value=out_1gx[0], min = 0.0025, max= 0.1)\n\n \n ##FIT CON 2 GAUSSIANE con velocità iniziale data dal momento1\n mod2x[:,jj,ii] = fit2x\n \n fit_params2gx.add('amp_g1', value=out_2gx[0], min = 0.0025, max= 0.1)\n fit_params2gx.add('wid_g1', value=out_2gx[2], min = 10, max = 200)\n fit_params2gx.add('amp_g2' , value=out_2gx[3], min= 0.0025, max= 0.1)\n fit_params2gx.add(name=('cen_g2'), expr='peak_split+cen_g1')\n fit_params2gx.add('wid_g2', value=out_2gx[5], min =10, max= 200)\n \n if jj in range1 and ii in range2:\n # if bic_1g < bic_2g and bic_1g < bic_3g and bic_2g - bic_1g > 2.3 and bic_3g - bic_1g > 2.3:\n bic_min = np.min([bic_1g, bic_2g, bic_1gx, bic_2gx])\n if bic_1g == bic_min: \n flux_map[jj,ii] = np.nansum(fit1) * dv\n vel_map[jj,ii] = np.nansum((fit1*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit1*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod1[:,jj,ii]\n elif bic_2g == bic_min: \n flux_map[jj,ii] = np.nansum(fit2) * dv\n vel_map[jj,ii] = np.nansum((fit2*velocity)) * dv/flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit2*(velocity-vel_map[jj,ii])**2)) * dv /flux_map[jj,ii] \n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod2[:,jj,ii]\n elif bic_1gx == bic_min:\n flux_map[jj,ii] = np.nansum(fit1x) * dv \n vel_map[jj,ii] = np.nansum((fit1x*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit1x*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod1x[:,jj,ii]\n elif bic_2gx == bic_min:\n flux_map[jj,ii] = np.nansum(fit2x) * dv \n vel_map[jj,ii] = np.nansum((fit2x*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit2x*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod2x[:,jj,ii]\n# flux_map[flux_map_tmp<5*error] = np.nan\n# vel_map[flux_map_tmp < 5*error] = np.nan\n# vdisp_map[flux_map_tmp < 5*error] = np.nan \n \nplt.figure(figsize = (12,4))\n\nplt.subplot(131)\nplt.imshow(flux_map, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(132)\nplt.imshow(vel_map, origin = 'lower', vmin = -300, vmax = 300, cmap ='jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(133)\nplt.imshow(vdisp_map, origin = 'lower', vmin = 0, vmax =200, cmap = 'jet')\nplt.colorbar(shrink = 0.7)\n\nflux = flux_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n flux[jj,ii] = np.nan\n\nvel = vel_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n vel[jj,ii] = np.nan\n \ndisp = vdisp_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n disp[jj,ii] = np.nan\n\nplt.figure(figsize = (12,4))\n\nplt.subplot(131)\nplt.imshow(flux, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(132)\nplt.imshow(vel, origin = 'lower', vmin = -300, vmax = 300, cmap ='jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(133)\nplt.imshow(disp, origin = 'lower', vmin = 0, vmax =200, cmap = 'jet')\nplt.colorbar(shrink = 0.7)\n\nhdu = fits.PrimaryHDU(mod)\nhdul = fits.HDUList([hdu])\nhdul.writeto('model_3_2gx.fits')\n\n\nhdu = fits.PrimaryHDU(flux_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('flux_map_spiral_3_2gx.fits')\nhdu = fits.PrimaryHDU(vel_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('vel_map_spiral_3_2gx.fits')\nhdu = fits.PrimaryHDU(vdisp_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('vdisp_map_spiral_3_2gx.fits')\n\n","repo_name":"taurosss/Galaxy_outflow_final","sub_path":"spiral_fit_2gx_parallel.py","file_name":"spiral_fit_2gx_parallel.py","file_ext":"py","file_size_in_byte":17155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14231984166","text":"# Noesis Gravity Rush 2 .evb Extractor\n\nfrom inc_noesis import *\nimport noesis\nimport rapi\nimport os\n\ndebug = False\nglobal_scale = 100\n\ndef registerNoesisTypes():\n\n handle = noesis.register('Gravity Rush 2 evb database', '.evb')\n noesis.setHandlerTypeCheck(handle, noepyCheckType)\n noesis.setHandlerLoadModel(handle, noepyLoadModel)\n if debug:\n noesis.logPopup() # please comment out when done.\n return 1\n\n\ndef noepyCheckType(data):\n file = NoeBitStream(data)\n if len(data) < 4:\n return 0\n header = file.readBytes(4).decode('ASCII').rstrip(\"\\0\")\n if header == 'FBKK':\n return 1\n return 0\n\n# loading the bones!\n\n\ndef noepyLoadModel(data, mdlList):\n global bs\n bs = NoeBitStream(data)\n\n global bones\n bones = []\n\n bs.seek(0x38, NOESEEK_ABS)\n file_name = loadStringFromPointer(bs.readUInt())\n print(\"Filename: \" + file_name)\n bs.seek(0x24, NOESEEK_REL)\n num_of_data_chunk = bs.readUInt()\n bs.seek(bs.readUInt() - 4, NOESEEK_REL)\n for dataChunkIndex in range(num_of_data_chunk):\n readDataChunk(bs.readUInt())\n\n mdl = NoeModel()\n mdl.setBones(bones)\n mdlList.append(mdl)\n return 1\n\n\ndef readDataChunk(offset):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n print(\"Loading Data Chunk at \" + hex(bs.tell()))\n # print(\"Upstream - \" + hex(origonal_offset))\n # print(\"offset - \" + hex(offset))\n bs.seek(0x08, NOESEEK_REL)\n name = loadStringFromPointer(bs.readUInt())\n print(\"Data Chunk name: \" + name)\n bs.seek(0x24, NOESEEK_REL)\n subdata_chunk_count = bs.readUInt()\n subindex_chunk_location = bs.tell() + bs.readUInt()\n bs.seek(0x18, NOESEEK_REL)\n # Loading root bone\n rotation = NoeQuat.fromBytes(bs.readBytes(16))\n translation = NoeVec3.fromBytes(bs.readBytes(12)) * NoeVec3((global_scale, global_scale, global_scale))\n bs.seek(4, NOESEEK_REL)\n scale = NoeVec3.fromBytes(bs.readBytes(12))\n boneMat = rotation.toMat43(transposed=1)\n boneMat[3] = translation\n boneIndex = len(bones)\n bones.append(NoeBone(boneIndex, name, boneMat))\n bs.seek(0x18, NOESEEK_REL)\n parent_name = loadStringFromPointer(bs.readUInt())\n print(\"Parent name: \" + parent_name)\n # Loading Sub Index Chunk\n bs.seek(subindex_chunk_location, NOESEEK_ABS)\n for subDataChunkIndex in range(subdata_chunk_count):\n readSubDataChunk(bs.readUInt(), boneIndex)\n bs.seek(origonal_offset, NOESEEK_ABS)\n return\n\n\ndef readSubDataChunk(offset, parentBoneIndex):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n print(\"Loading Sub Data Chunk at \" + hex(bs.tell()))\n bs.seek(0x08, NOESEEK_REL)\n name = loadStringFromPointer(bs.readUInt())\n print(\"Sub Data Chunk name: \" + name)\n bs.seek(0x0C, NOESEEK_REL)\n bs.seek(bs.readUInt() - 4, NOESEEK_REL)\n # Loading bone\n rotation = NoeQuat.fromBytes(bs.readBytes(16))\n translation = NoeVec3.fromBytes(bs.readBytes(12)) * NoeVec3((global_scale, global_scale, global_scale))\n bs.seek(4, NOESEEK_REL)\n scale = NoeVec3.fromBytes(bs.readBytes(12))\n boneMat = rotation.toMat43(transposed=1)\n boneMat[3] = translation\n #boneMat *= bones[parentBoneIndex].getMatrix() \n boneIndex = len(bones)\n bones.append(NoeBone(boneIndex, name, boneMat, None, parentBoneIndex))\n\n bs.seek(origonal_offset, NOESEEK_ABS)\n return\n\n\ndef loadStringFromPointer(offset):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n string = bs.readBytes(64).split(b'\\x00')[0].decode('UTF8')\n bs.seek(origonal_offset, NOESEEK_ABS)\n return string\n","repo_name":"Team-Alua/GR2-evb-extractor","sub_path":"GravityRush2_evb.py","file_name":"GravityRush2_evb.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73202436625","text":"from keras import models\nfrom keras import layers\nfrom keras.datasets import boston_housing\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_model():\n\tmodel = models.Sequential()\n\tmodel.add(layers.Dense(64, activation=\"relu\", input_shape=(train_data.shape[1],)))\n\tmodel.add(layers.Dense(64, activation=\"relu\"))\n\t# No activation on the last layer as it is purely linear value\n\tmodel.add(layers.Dense(1))\n\t# mse - mean squared error - good for regression problems\n\t# mae + mean absolute error - the absolute value of the difference between predictions and targets\n\tmodel.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"mae\"])\n\treturn model\n\n\ndef smooth_curve(points, factor=0.9):\n\tsmoothed_points = []\n\tfor point in points:\n\t\tif smoothed_points:\n\t\t\tprevious = smoothed_points[-1]\n\t\t\tsmoothed_points.append(previous * factor + point * (1 - factor))\n\t\telse:\n\t\t\tsmoothed_points.append(point)\n\treturn smoothed_points\n\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\n\nmean = train_data.mean(axis=0)\ntrain_data -= mean\nstd = train_data.std(axis=0)\ntrain_data /= std\n\n\n# k-fold validation\nk = 4\nnum_val_samples = len(train_data) // k\nnum_epochs = 100\nall_mae_histories = []\n\n\nfor i in range(k):\n\tprint(\"Processing fold #\", i)\n\tval_data = train_data[i * num_val_samples: (i +1) * num_val_samples]\n\tval_targets = train_targets[i * num_val_samples: (i +1) * num_val_samples]\n\n\tpartial_train_data = np.concatenate(\n\t\t[train_data[:i * num_val_samples],\n\t\ttrain_data[(i + 1) * num_val_samples:]],\n\t\taxis=0)\n\n\tpartial_train_targets = np.concatenate(\n\t\t[train_targets[:i * num_val_samples],\n\t\ttrain_targets[(i + 1) * num_val_samples:]],\n\t\taxis=0)\n\n\tprint(\"Num val samples: \", num_val_samples)\n\tprint(\"Partial train: \", len(partial_train_data))\n\n\tmodel = get_model()\n\thistory = model.fit(\n\t\tpartial_train_data, \n\t\tpartial_train_targets, \n\t\tvalidation_data=(val_data, val_targets),\n\t\tepochs=num_epochs, \n\t\tbatch_size=1, \n\t\tverbose=0)\n\n\tprint(history.history)\n\tmae_history = history.history[\"mean_absolute_error\"]\n\tall_mae_histories.append(mae_history)\n\naverage_mae_history = [np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]\n\nplt.plot(range(1, len(average_mae_history) + 1), average_mae_history)\nplt.xlabel([\"Epochs\"])\nplt.ylabel([\"Validation MAE\"])\nplt.show()\n\nplt.clf()\n\nsmooth_mae_history = smooth_curve(average_mae_history[10:])\nplt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)\nplt.xlabel([\"Epochs\"])\nplt.ylabel([\"Validation MAE\"])\nplt.show()\n\n","repo_name":"KasparPeterson/deep-learning-with-python","sub_path":"house_prices.py","file_name":"house_prices.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2117636411","text":"from js9 import j\n\n\ndef get_stats_collector(service):\n stats_collectors_services = service.consumers.get('stats_collector')\n if stats_collectors_services:\n return stats_collectors_services[0]\n\n\ndef get_statsdb(service):\n statsdb_services = service.aysrepo.servicesFind(role='statsdb')\n if statsdb_services:\n return statsdb_services[0]\n\n\ndef get_version(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n service = job.service\n if service.model.data.status != 'running':\n version = ''\n else:\n node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))\n pong = node.client.ping()\n version = pong.split('Version: ')[1] if pong else ''\n\n service.model.data.version = version\n service.saveAll()\n return version\n\n\ndef input(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_configuration, get_jwt_token\n\n args = job.model.args\n ip = args.get('redisAddr')\n node = Node(ip, args.get('redisPort'), get_jwt_token(job.service.aysrepo))\n\n config = get_configuration(job.service.aysrepo)\n version = node.client.info.version()\n core0_version = config.get('0-core-version')\n core0_revision = config.get('0-core-revision')\n\n if (core0_version and core0_version != version['branch']) or \\\n (core0_revision and core0_revision != version['revision']):\n raise RuntimeError(\n 'Node with IP {} has a wrong version. Found version {}@{} and expected version {}@{} '.format(\n ip, version['branch'], version['revision'], core0_version, core0_revision))\n\n\ndef init(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n service = job.service\n node = Node.from_ays(service, get_jwt_token(service.aysrepo))\n job.logger.info('create storage pool for fuse cache')\n poolname = '{}_fscache'.format(service.name)\n\n storagepool = node.ensure_persistance(poolname)\n storagepool.ays.create(service.aysrepo)\n\n statsdb_service = get_statsdb(service)\n if statsdb_service:\n stats_collector_actor = service.aysrepo.actorGet('stats_collector')\n args = {\n 'node': service.name,\n 'port': statsdb_service.model.data.port,\n 'ip': statsdb_service.parent.model.data.redisAddr,\n\n }\n stats_collector_service = stats_collector_actor.serviceCreate(instance=service.name, args=args)\n stats_collector_service.consume(service)\n\n\ndef getAddresses(job):\n service = job.service\n networks = service.producers.get('network', [])\n networkmap = {}\n for network in networks:\n networkmap[network.name] = network.executeAction('getAddresses', args={'node_name': service.name})\n return networkmap\n\n\ndef install(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n # at each boot recreate the complete state in the system\n service = job.service\n node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))\n get_version(job)\n job.logger.info('mount storage pool for fuse cache')\n poolname = '{}_fscache'.format(service.name)\n node.ensure_persistance(poolname)\n\n # Set host name\n node.client.system('hostname %s' % service.model.data.hostname).get()\n node.client.bash('echo %s > /etc/hostname' % service.model.data.hostname).get()\n\n job.logger.info('configure networks')\n for network in service.producers.get('network', []):\n network.executeAction('configure', args={'node_name': service.name})\n\n stats_collector_service = get_stats_collector(service)\n statsdb_service = get_statsdb(service)\n if stats_collector_service and statsdb_service and statsdb_service.model.data.status == 'running':\n stats_collector_service.executeAction('install', context=job.context)\n node.client.bash('modprobe ipmi_si && modprobe ipmi_devintf').get()\n\n\ndef monitor(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.sal.healthcheck import HealthCheckObject\n from zeroos.orchestrator.configuration import get_jwt_token, get_configuration\n\n service = job.service\n config = get_configuration(service.aysrepo)\n token = get_jwt_token(job.service.aysrepo)\n job.context['token'] = token\n\n install_action = service.model.actionsState['install']\n if install_action != 'ok' and install_action != 'error':\n return\n\n healthcheck_service = job.service.aysrepo.serviceGet(role='healthcheck',\n instance='node_%s' % service.name,\n die=False)\n if healthcheck_service is None:\n healthcheck_actor = service.aysrepo.actorGet('healthcheck')\n healthcheck_service = healthcheck_actor.serviceCreate(instance='node_%s' % service.name)\n service.consume(healthcheck_service)\n\n nodestatus = HealthCheckObject('nodestatus', 'Node Status', 'Node Status', '/nodes/{}'.format(service.name))\n\n node = Node.from_ays(service, token, timeout=5)\n state = node.is_running()\n\n if state:\n service.model.data.status = 'running'\n configured = node.is_configured(service.name)\n if not configured:\n service.executeAction('install', context=job.context)\n for consumer in service.getConsumersRecursive():\n consumer.self_heal_action('monitor')\n stats_collector_service = get_stats_collector(service)\n statsdb_service = get_statsdb(service)\n\n # Check if statsdb is installed on this node and start it if needed\n if (statsdb_service and str(statsdb_service.parent) == str(job.service)\n and statsdb_service.model.data.status != 'running'):\n statsdb_service.executeAction('start', context=job.context)\n\n # Check if there is a running statsdb and if so make sure stats_collector for this node is started\n if (stats_collector_service and stats_collector_service.model.data.status != 'running'\n and statsdb_service.model.data.status == 'running'):\n stats_collector_service.executeAction('start', context=job.context)\n\n # healthchecks\n nodestatus.add_message('node', 'OK', 'Node is running')\n update_healthcheck(job, healthcheck_service, node.healthcheck.openfiledescriptors())\n update_healthcheck(job, healthcheck_service, node.healthcheck.cpu_mem())\n update_healthcheck(job, healthcheck_service, node.healthcheck.rotate_logs())\n update_healthcheck(job, healthcheck_service, node.healthcheck.network_bond())\n update_healthcheck(job, healthcheck_service, node.healthcheck.interrupts())\n update_healthcheck(job, healthcheck_service, node.healthcheck.context_switch())\n update_healthcheck(job, healthcheck_service, node.healthcheck.threads())\n update_healthcheck(job, healthcheck_service, node.healthcheck.qemu_vm_logs())\n update_healthcheck(job, healthcheck_service, node.healthcheck.network_load())\n update_healthcheck(job, healthcheck_service, node.healthcheck.disk_usage())\n update_healthcheck(job, healthcheck_service, node.healthcheck.ssh_cleanup(job=job))\n\n flist = config.get('healthcheck-flist', 'https://hub.gig.tech/gig-official-apps/healthcheck.flist')\n with node.healthcheck.with_container(flist) as cont:\n update_healthcheck(job, healthcheck_service, node.healthcheck.node_temperature(cont))\n update_healthcheck(job, healthcheck_service, node.healthcheck.powersupply(cont))\n update_healthcheck(job, healthcheck_service, node.healthcheck.fan(cont))\n\n # check network stability of node with the rest of the nodes ! TODO\n else:\n if service.model.data.status != 'rebooting':\n service.model.data.status = 'halted'\n nodestatus.add_message('node', 'ERROR', 'Node is halted')\n update_healthcheck(job, healthcheck_service, nodestatus.to_dict())\n get_version(job)\n service.saveAll()\n\n\ndef update_healthcheck(job, health_service, healthchecks):\n import time\n\n service = job.service\n\n interval = service.model.actionGet('monitor').period\n new_healthchecks = list()\n if not isinstance(healthchecks, list):\n healthchecks = [healthchecks]\n defaultresource = '/nodes/{}'.format(service.name)\n for health_check in healthchecks:\n for health in health_service.model.data.healthchecks:\n # If this healthcheck already exists, update its attributes\n if health.id == health_check['id']:\n health.name = health_check.get('name', '')\n health.resource = health_check.get('resource', defaultresource) or defaultresource\n health.messages = health_check.get('messages', [])\n health.category = health_check.get('category', '')\n health.lasttime = time.time()\n health.interval = interval\n health.stacktrace = health_check.get('stacktrace', '')\n break\n else:\n # healthcheck doesn't exist in the current list, add it to the list of new\n health_check['lasttime'] = time.time()\n health_check['interval'] = interval\n new_healthchecks.append(health_check)\n\n old_healthchecks = health_service.model.data.to_dict().get('healthchecks', [])\n old_healthchecks.extend(new_healthchecks)\n health_service.model.data.healthchecks = old_healthchecks\n\n\ndef reboot(job):\n import time\n import redis\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n token = get_jwt_token(job.service.aysrepo)\n job.context['token'] = token\n service = job.service\n service._recurring_tasks['monitor'].stop()\n try:\n start = time.time()\n # Make sure any running monitor action finishes before we reboot\n while time.time() < start + 60:\n if not j.core.jobcontroller.db.jobs.list(\n actor='node.zero-os', action='monitor', state='running', service=service.name):\n break\n time.sleep(1)\n else:\n raise j.exceptions.RuntimeError('Failed to reboot node. Waiting for monitoring action for too long')\n\n force_reboot = service.model.data.forceReboot\n vms = service.consumers.get('vm') or []\n for vm in vms:\n if vm.model.data.status != 'halted':\n if not force_reboot:\n raise j.exceptions.RuntimeError(\n 'Failed to reboot node. Force reboot is not enabled and some vms are not halted')\n else:\n vm.executeAction('shutdown', context=job.context)\n service.model.data.status = 'rebooting'\n job.logger.info('reboot node {}'.format(service))\n node = Node.from_ays(service, job.context['token'])\n node.client.raw('core.reboot', {})\n finally:\n start = time.time()\n while time.time() < start + 10:\n try:\n node = Node.from_ays(service, token, timeout=5)\n node.client.testConnectionAttempts = 0\n node.client.ping()\n except (RuntimeError, ConnectionError, redis.TimeoutError, TimeoutError):\n break\n time.sleep(1)\n else:\n job.logger.info(\"Could not wait within 10 seconds for node to reboot\")\n service._recurring_tasks['monitor'].start()\n\n\ndef uninstall(job):\n from zeroos.orchestrator.configuration import get_jwt_token\n\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n service = job.service\n stats_collector_service = get_stats_collector(service)\n if stats_collector_service:\n stats_collector_service.executeAction('uninstall', context=job.context)\n\n statsdb_service = get_statsdb(service)\n if statsdb_service and str(statsdb_service.parent) == str(service):\n statsdb_service.executeAction('uninstall', context=job.context)\n\n bootstraps = service.aysrepo.servicesFind(actor='bootstrap.zero-os')\n if bootstraps:\n bootstraps[0].executeAction('delete_node', args={'node_name': service.name})\n\n # Remove etcd_cluster if this was the last node service\n node_services = service.aysrepo.servicesFind(role='node')\n if len(node_services) > 1:\n return\n\n for etcd_cluster_service in service.aysrepo.servicesFind(role='etcd_cluster'):\n etcd_cluster_service.executeAction('delete', context=job.context)\n etcd_cluster_service.delete()\n\n\ndef watchdog(job):\n from zeroos.orchestrator.sal.Pubsub import Pubsub\n from zeroos.orchestrator.configuration import get_jwt_token\n from asyncio import sleep\n import asyncio\n import re\n import traceback\n\n service = job.service\n watched_roles = {\n 'nbdserver': {\n 'level': 20,\n 'message': (re.compile('.*'),),\n 'eof': True\n },\n 'tlogserver': {\n 'eof': True,\n },\n 'ork': {\n 'level': 20,\n 'instance': job.service.name,\n 'service': 'node',\n 'eof': False,\n 'message': (re.compile('.*'),),\n 'handler': 'ork_handler',\n },\n 'kvm': {\n 'level': 20,\n 'instance': job.service.name,\n 'service': 'node',\n 'eof': False,\n 'message': (re.compile('.*'),),\n 'handler': 'vm_handler',\n 'sub_id': 'events',\n },\n 'cloudinit': {\n 'eof': True,\n },\n 'http': {\n 'eof': True,\n },\n 'dhcp': {\n 'eof': True,\n },\n 'storage_engine': {\n 'eof': True,\n },\n \"etcd\": {\n \"eof\": True,\n },\n 'stats_collector': {\n 'eof': True,\n },\n 'zerostor': {\n 'eof': True,\n },\n 'container': {\n \"eof\": True,\n },\n }\n\n async def callback(jobid, level, message, flag):\n if '.' not in jobid:\n return\n\n role, sub_id = jobid.split('.', 1)\n if (role not in watched_roles or\n watched_roles[role].get('level', level) != level\n or watched_roles[role].get('sub_id', sub_id) != sub_id):\n return\n\n service_role = watched_roles[role].get('service', role)\n instance = watched_roles[role].get('instance', sub_id)\n\n eof = flag & 0x6 != 0\n\n valid_message = False\n matched_messages = watched_roles[role].get('message', ())\n for msg in matched_messages:\n if msg.match(message):\n valid_message = True\n\n if not valid_message and not (watched_roles[role]['eof'] and eof):\n return\n\n srv = service.aysrepo.serviceGet(role=service_role, instance=instance, die=False)\n if srv:\n args = {'message': message, 'eof': eof, 'level': level}\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n handler = watched_roles[role].get('handler', 'watchdog_handler')\n await srv.asyncExecuteAction(handler, context=job.context, args=args)\n\n async def check_node(job):\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n try:\n cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)\n await cl.ping()\n service.model.data.status = 'running'\n except (RuntimeError, OSError) as e:\n service.model.data.status = 'halted'\n\n async def streaming(job):\n # Check if the node is runing\n while service.model.actionsState['install'] != 'ok':\n await sleep(5)\n\n while str(service.model.data.status) != 'running':\n await sleep(5)\n\n # Add the looping here instead of the pubsub sal\n cl = None\n subscribed = None\n\n while True:\n if str(service.model.data.status) != 'running':\n await sleep(5)\n continue\n if cl is None:\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)\n\n try:\n if not subscribed:\n queue = await cl.subscribe('ays.monitor')\n subscribed = True\n await cl.global_stream(queue)\n except asyncio.TimeoutError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except OSError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except RuntimeError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except Exception as e:\n job.logger.error(traceback.format_exc())\n await check_node(job)\n cl = None\n subscribed = None\n\n return streaming(job)\n\n\ndef nic_shutdown(job, message):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n service = job.service\n node = Node.from_ays(service, get_jwt_token(service.aysrepo))\n interface = message['name']\n\n if interface.startswith('cont'):\n container_id = interface.split('-')[0].replace('cont', '')\n for container in node.containers.list():\n if str(container.id) == container_id:\n container_service = service.aysrepo.serviceGet(role='container', instance=container.name)\n container_service.model.data.status = 'networkKilled'\n container_service.saveAll()\n return\n else:\n vms = node.client.kvm.list()\n for vm in vms:\n if interface in vm['ifctargets']:\n vm_service = service.aysrepo.serviceGet(role='vm', instance=vm['name'])\n vm_service.model.data.status = 'networkKilled'\n vm_service.saveAll()\n return\n\n job.logger.info('Failed to find vm/container interface matching %s' % interface)\n\n\ndef ork_handler(job):\n import json\n from zeroos.orchestrator.utils import send_event\n\n message = job.model.args.get('message')\n if not message:\n return\n\n message = json.loads(message)\n send_event('ork', message, job.service.aysrepo)\n\n if message['event'] == 'NIC_SHUTDOWN':\n nic_shutdown(job, message)\n elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'WARNING':\n job.logger.info('VM %s exceeded cpu threshold and will be quarantined soon' % message['name'])\n elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'SUCCESS':\n job.logger.info('Vm %s has been quarantined' % message['name'])\n elif message['event'] == 'VM_UNQUARANTINE' and message['state'] == 'SUCCESS':\n job.logger.info('Vm %s has been released from quarantine' % message['name'])\n\n\ndef start_vm(job, vm):\n import asyncio\n from zeroos.orchestrator.configuration import get_jwt_token\n\n if vm.model.data.status == 'running':\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n asyncio.ensure_future(vm.asyncExecuteAction('start', context=job.context), loop=job.service._loop)\n\n\ndef shutdown_vm(job, vm):\n import asyncio\n from zeroos.orchestrator.configuration import get_jwt_token\n\n if vm.model.data.status == 'running':\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n asyncio.ensure_future(vm.asyncExecuteAction('shutdown', context=job.context), loop=job.service._loop)\n\n\ndef vm_handler(job):\n import json\n import asyncio\n\n message = job.model.args.get('message')\n if not message:\n return\n\n message = json.loads(message)\n vm = job.service.aysrepo.serviceGet(role='vm', instance=message['name'])\n if not vm:\n return\n\n if message['event'] == 'stopped' and message['detail'] == 'failed':\n asyncio.ensure_future(start_vm(job, vm))\n\n if message['event'] == 'stopped' and message['detail'] == 'shutdown':\n asyncio.ensure_future(shutdown_vm(job, vm))\n\n\ndef processChange(job):\n service = job.service\n args = job.model.args\n node_data = service.model.data.to_dict()\n if 'forceReboot' in args and node_data.get('forceReboot') != args['forceReboot']:\n service.model.data.forceReboot = args['forceReboot']\n service.saveAll()\n","repo_name":"0xIslamTaha/0-orchestrator","sub_path":"templates/node.zero-os/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":20936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"5471695131","text":"# from . import schemas, models\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends, HTTPException, status, APIRouter, Response\nfrom typing import Optional, List\n\nfrom models.index import get_db, Course\nfrom schemas.course import Course as SCourse, CoursePost, CoursePostResponse, CourseTutor, CourseQuestion\nfrom auth import auth\n\n\nrouter = APIRouter()\n\n\n@router.get(\"\", response_model=List[CourseTutor], status_code=200)\ndef get_courses(db: Session = Depends(get_db)):\n courses = db.query(Course).all()\n return courses\n\n\n@router.get(\"/{id}\", response_model=SCourse, status_code=status.HTTP_200_OK)\ndef get_course(id: int, db: Session = Depends(get_db)):\n course = db.query(Course).filter(Course.id == id).first()\n if course is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Course with ID doesn't exist\",\n )\n return course\n\n\n\n@router.get(\"/{id}/questions\", response_model=CourseQuestion, status_code=status.HTTP_200_OK)\ndef get_course_questions(id: int, db: Session = Depends(get_db)):\n course = db.query(Course).filter(Course.id == id).first()\n if course is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Course with ID doesn't exist\",\n )\n return course\n\n \n\n\n@router.post(\"\", response_model=CoursePostResponse, status_code=status.HTTP_201_CREATED)\n# @router.post(\"\", status_code=status.HTTP_201_CREATED)\ndef create_course(\n course: CoursePost, auth=Depends(auth), db: Session = Depends(get_db)\n):\n # return auth\n if auth.role != \"tutor\":\n raise HTTPException(\n status_code=403, detail=\"You have to be a tutor to create a course\"\n )\n\n course.tutor_id = auth.id\n db_course = db.query(Course).filter(Course.name == course.name).first()\n # return auth\n if db_course is not None:\n raise HTTPException(status_code=400, detail=\"Course with name already exists\")\n\n new_course = Course(**course.dict())\n\n db.add(new_course)\n db.commit()\n\n return new_course\n\n\n# @router.put('/{id}',response_model=SCourse,status_code=status.HTTP_200_OK)\n# def update_course(id:int, course:SCourse, db: Session = Depends(get_db)):\n# db_course=db.query(Course).filter(Course.id==id).first()\n# # db_course.email=course.email\n# # db_course.firstname=course.firstname\n# # db_course.lastname=course.lastname\n# db_course.phone = course.phone\n\n# db.commit()\n\n# return db_course\n","repo_name":"leyume/learnducate","sub_path":"backend/app/routes/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"24844461810","text":"from math import sqrt\n\n\ndef reverse_list(s):\n print(type(s), s)\n temp_list = list(s)\n print(type(temp_list), temp_list)\n test = temp_list.reverse()\n print(temp_list)\n print(type(test))\n return ''.join(temp_list)\n\n\nword = \"oki\"\n# print(reverse_list(word))\n\ntab = ['test', 'oki', 'doki']\ntab.reverse()\ntesttab = tab\n# print(testtab)\n\n# *---------------------------------------\n# Al-Khwarizmi\nbu_s = 14\nbu_n = 20\nbu_o = 1775\n\nroot = 34\nnum = 40*1775\n\nmid_root = root/2\nresult_by_self = mid_root*mid_root\nresult = result_by_self+num\nnew_result = sqrt(result)\nfinish = new_result - mid_root\n# print(finish)\n\n# *------------------------------------------------\n# *Calendar\n# TODO :\n# on se donne la liste des noms des mois, la liste des longueurs des mois, l'année, le premier jour de l'année sous la forme :\n# 0 pour lundi\n# 1 pour mardi\n# ...\n# 6 pour dimanche\npremier_jour = 0\nannee = 2023\nmois_j = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nmois_n = [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"jun\",\n \"jul\", \"aou\", \"sep\", \"oct\", \"nov\", \"dec\"]\nlist_j = [\"lu\", \"ma\", \"me\",\n \"je\", \"ve\", \"sa\", \"di\"]\n# *-----------------------------\n# modifier la liste des longueurs de mois pour le cas d'une année bisextile\nmois_bis = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n# *-------------------------------------\n# imprimer les 365 numéros de jours...\n# à la suite (1 2 3 4 ... 31 1 2 3 ... 28 ... 1 2 3 ... 31 1 2 ... 31)\n# sans chercher à grouper, juste les jours les uns après les autres, sur 365 lignes..\n# for j in range(len(mois_n)):\n# for i in range((mois_j[j])):\n# if i % 7 == 0:\n# print(end=\"\\n\")\n# print(i+1, \"\", end=\"\")\n# print(\"\", end=\"\\n\")\n# *-----------------------------------\n# même chose que ci-dessus, en revenant à la ligne tous les 7 jours :\n# print(4) : passe à la ligne\n# print(5, end=\"\") : ne passe pas à la ligne\n# le résultat ne ressemble toujouors pas à grand chose... mais on y arrive\n# *-----------------------------------\n# on peut maintenant ajouter un séparateur rudimentaire comme \"----------------------\"\n# ansi que le nom du mois, et une ligne d'entête : print(\"lun mar mer jeu ven sam dim\")\nfirst_day = 0\nfor j in range(len(mois_n)):\n print(\" \", mois_n[j])\n print(\" \".join(list_j), end=\"\\n\")\n if first_day > 0:\n for x in range(first_day):\n print(\" \", end=\"\")\n for i in range((mois_j[j])):\n # print(i+first_day)\n if (i+first_day) % 7 == 0:\n print(end=\"\\n\")\n if (i < 10):\n print(i+1, \" \", end=\"\")\n elif (i >= 10):\n print(i+1, \"\", end=\"\")\n print(\"\", end=\"\\n\")\n print('---------')\n # print(mois_j[j]-28)\n first_day = mois_j[j]-28\n# *-----------------------------------\n# comment démarrer un mois ?\n# on va \"offsetter\"\n# essayons juste avec un mois éprouvette de 31 jours\n# jours = [x+1 for x in range(31)]\n# print(jours)\n\n\noffset = 3 # le mois démarre un jeudi\n# lundi = 0 , mardi = 1 ... dimanche = 6\n# on peut utiliser format\n# *-----------------------------------\n# calcul de l'offset suivant du mois suivant\noffset = 3\njours = 31\n\n\n# print(new_offset)\n# *-----------------------------------\n# on reprend tout ça.\npremier_jour = 0\nmois_j = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nmois_n = [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"jun\",\n \"jul\", \"aou\", \"sep\", \"oct\", \"nov\", \"dec\"]\n# *-----------------------------------\n# on fait une fonction affiche_mois() qui prends en paramètres :\n# le numéro du mois (1 à 12)\n# l'offset\n# et qui renvoie le nouvel offset\n\n\ndef affiche_mois(indice, offset):\n return\n# *-----------------------------------\n# on utilise affiche mois pour afficher les mois de 1 à 12.\n","repo_name":"Pierre-OlivierB/ibm-python-day3","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2977567539","text":"import requests\nimport json\n\n# Get input currency from user.\ncurrent_code = input().lower()\n\n# Create a cache dictionary for used rates.\ncache = {'usd': 0, 'eur': 0}\n\n# Loop for continue checking.\nwhile True:\n target_code = input().lower()\n if not target_code:\n break\n money = int(input())\n\n # Get currency rates from server.\n response = requests.get(f\"http://www.floatrates.com/daily/{current_code}.json\")\n json_str = response.content.decode('utf-8')\n rates_json = json.loads(json_str)\n\n # Cache desired results\n rates = ['usd', 'eur']\n for r in rates:\n try:\n cache[r] = rates_json[r]['rate']\n except KeyError:\n continue\n\n # Convert money and print result.\n print(\"Checking the cache...\")\n if target_code in cache:\n print(\"Oh! It is in the cache!\")\n rate = cache[target_code]\n else:\n print(\"Sorry, but it is not in the cache!\")\n # Update cache with new code retrieved.\n rate = rates_json[target_code]['rate']\n cache[target_code] = rate\n \n conv_money = round(money * rate, 2)\n print(f\"You received {conv_money} {target_code.upper()}.\")\n\n\n\n","repo_name":"facufrau/beginner-projects-solutions","sub_path":"hyperskill_projects/currency_converter/currency_converter-6-6.py","file_name":"currency_converter-6-6.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"32831771211","text":"\"\"\"\nCreate Parser object that can Parse Question and Document\n\"\"\"\nimport spacy\nfrom spacy.tokens import Span\nfrom spacy.matcher import PhraseMatcher, Matcher\nimport inflect\nimport re\n\nclass Parser(object):\n def __init__(self, text, custom_pipe):\n self.text = text\n self.nlp = custom_pipe.nlp\n\n def docParse(self):\n \"\"\"\n To parse the whole documents but only for simple sentences at the moment.\n Return a list of triples of Subject-Verb-Object (SVO)\n \"\"\"\n text = self.text\n text = self.simplify(text)\n nlp = self.nlp\n full_doc = nlp(text)\n \n # Slit into sentences and find Simple sentences\n sent_doc_ls = list(sent for sent in full_doc.sents)\n spl_ls = self.simple_find(sent_doc_ls)\n doc_ls = list(nlp.pipe(spl_ls))\n\n print(\"Finding triples (Subject-Verb-Object) from your doc...\\n\")\n # Our triples will be (ent1, rel, ent2)\n triples = self.all_triples(doc_ls) \n return triples\n\n def questionParse(self):\n \"\"\"\n To parse question only. \n Return a set of entities ents_set and a set of relations rels_set\n \"\"\"\n text = self.text\n text = text.lower()\n nlp = self.nlp\n doc = nlp(text)\n print(\"Finding entities set and relations set...\\n\")\n ents_set = set(str(ent) for ent in doc.ents)\n rels_list = self.get_relation(doc)\n rels_set = set(str(rel[-1]) for rel in rels_list)\n return ents_set, rels_set\n\n def get_relation(self, doc):\n \"\"\"\n Parsing a doc object to find the Relations (\"key verbs\")\n Return a set of relations rels_set (, )\n \"\"\"\n nlp = self.nlp\n # Matcher class object \n matcher = Matcher(nlp.vocab)\n\n #define the pattern (both patterns will be looking for a VERB followed by a PREPOSITION)\n ROOT_pattern = [{'DEP':'ROOT'}, \n {'DEP':'prep','OP':\"?\"},\n {'DEP':'agent','OP':\"?\"},\n {'DEP':'acomp','OP':\"?\"},\n ] \n\n acl_pattern = [{'DEP':'acl'}, \n {'DEP':'prep','OP':\"?\"},\n {'DEP':'agent','OP':\"?\"},\n {'DEP':'acomp','OP':\"?\"},\n ]\n\n relations = []\n matcher.add(\"relations\", None, ROOT_pattern, acl_pattern)\n # After the matcher is added, let's run on our Doc to see what it can find\n matches = matcher(doc)\n\n # Store it in the relations list\n for match_id, start, end in matches:\n matched_span = doc[start:end]\n relation_tuple = (start, matched_span.lemma_)\n relations.append(relation_tuple)\n\n # Check if there is duplication, we will remove the duplication\n # Examples: \"determine\" and \"determine by\" will both be relations but we only need the longer one\n for start, relation1 in relations:\n if len(relation1.split()) != 1:\n continue\n else:\n # comparing our 1st relation to our 2nd relation \n for _, relation2 in relations:\n # if 2nd relation also 1 word, won't be a duplicate\n if len(relation2.split()) == 1:\n continue\n # if 1st relation is a substring of 2nd relation --> duplicate\n if relation2.find(relation1) != -1:\n relations.remove((start, relation1))\n break\n return relations\n\n def simplify(self, text):\n \"\"\"\n Remove all 'a', 'the' from the text since this is not important to build the KG.\n Also remove '\\n' and '=' which is used to format the sub-headings\n Also turn all to lowercase.\n Also remove all texts within brackets (those adding extra information)\n Also clean up 'he she we they this these those that' since we have yet found a way to parse earlier info.\n \"\"\"\n text = text.lower()\n to_replace_with_space = [' a ', ' the ', ' he ', ' she ', ' we ', ' they ', ' this ', ' that ', ' these ', ' those ']\n to_remove = ['\\n', '=']\n source = text\n source = re.sub(\" [\\(\\[].*?[\\)\\]]\", \"\", source)\n source = re.sub(\"[=].*? [=]\", \"\", source)\n for dummy in to_remove:\n source = source.replace(dummy, '')\n # clean up period as this punctuation get a bit messy after all the previous replacements\n n = 3\n for i in range(n):\n dummy = '.' + ' '*(n-i)\n source = source.replace(dummy, '.')\n source = source.replace('.', '. ')\n for dummy in to_replace_with_space:\n source = source.replace(dummy, ' ')\n return source\n \n def simple_find(self, doc_ls):\n \"\"\"\n Only to find Simple sentences to parse.\n Currently ignore Compound and Complex sentences\n Return a list of string object for Simple sentence spl_text_ls.\n \"\"\"\n spl_text_ls = []\n\n for doc in doc_ls:\n is_simple = False\n nsubj_tok = [tok for tok in doc if tok.dep_ == \"nsubj\" or tok.dep_ == \"nsubjpass\"]\n mark_tok = [tok for tok in doc if tok.dep_ == \"mark\"]; \n\n if len(nsubj_tok) == 1 and len(mark_tok) == 0:\n is_simple = True\n\n if is_simple == True:\n spl_text_ls.append(doc.string.strip())\n\n return spl_text_ls\n\n def all_triples(self, doc_ls):\n \"\"\"\n Find all triples from the document object\n Return a list of triples\n \"\"\"\n triples = []\n for doc in doc_ls:\n ent_rel_list = self.ordered_entity_relation(doc)\n triple = self.find_triple(ent_rel_list)\n triples += triple \n return triples\n\n def ordered_entity_relation(self, doc):\n '''\n Parse a `doc` object and return entities and the relations between them in order\n '''\n ent_list = []; relation_list = []\n for ent in doc.ents:\n ent_tuple = (ent.end - 1, ent.lemma_, \"ents\")\n ent_list.append(ent_tuple)\n\n relations = self.get_relation(doc)\n for start, relation in relations:\n relation_tuple = (start, relation, \"rels\")\n relation_list.append(relation_tuple)\n\n combined_list = ent_list + relation_list\n ordered_list = sorted(combined_list, key=lambda x: x[0])\n # check ordered list for tuple of ents follow by rels:\n # if ents.end >= rels.start, False, remove relations\n n = len(ordered_list)\n remove_list = []\n for i in range(n-1):\n tuple1 = ordered_list[i]\n tuple2 = ordered_list[i+1]\n if tuple1[-1] == 'ents' and tuple2[-1] == 'rels':\n if tuple1[0] >= tuple2[0]:\n remove_list.append(tuple2)\n for trash in remove_list:\n ordered_list.remove(trash)\n ordered_list = sorted(ordered_list, key=lambda x: x[0])\n return ordered_list\n\n\n def find_triple(self, ent_rel_list):\n '''\n Filter only entities that have relation between them. And return a list of tuples of (ent1, rel, ent2).\n This is hardecoded and only work well for simple sentence.\n '''\n l = len(ent_rel_list)\n span = 3\n triple = []\n\n for i in range(l-span+1):\n ind1 = ent_rel_list[i]\n ind2 = ent_rel_list[i + 1]\n ind3 = ent_rel_list[i + 2]\n\n if ind1[-1] == 'ents' and ind2[-1] == 'rels' and ind3[-1] == 'ents':\n triple_tuple = (ind1[1], ind2[1], ind3[1])\n triple.append(triple_tuple)\n\n return triple","repo_name":"TNBL265/NLPQueryBot","sub_path":"QueryParserApp/KeywordsParser.py","file_name":"KeywordsParser.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10571459142","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ScalarModel(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(ScalarModel, self).__init__()\n self.loss = nn.MSELoss(reduction=\"none\")\n\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, hidden_size)\n self.fc3 = nn.Linear(hidden_size, 1)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n","repo_name":"Kabumba/PhysicsLearner","sub_path":"ScalarModel.py","file_name":"ScalarModel.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"16835500747","text":"from __future__ import print_function, division\n\nimport numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest, skip_check_grad_ci\nimport paddle\npaddle.enable_static()\n\n\nclass TestNPUReciprocal(OpTest):\n def setUp(self):\n self.op_type = \"reciprocal\"\n self.set_npu()\n self.init_dtype()\n\n np.random.seed(1024)\n x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)\n out = np.reciprocal(x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad_with_place(\n self.place, ['X'], 'Out', max_relative_error=0.01)\n\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestNPUReciprocalFp64(TestNPUReciprocal):\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n\n def init_dtype(self):\n self.dtype = np.float64\n\n\n@skip_check_grad_ci(\n reason=\"The backward test is not supported for float16 type on NPU.\")\nclass TestNPUReciprocalFp16(TestNPUReciprocal):\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n self.__class__.no_need_check_grad = True\n\n def init_dtype(self):\n self.dtype = np.float16\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"EnnSou/ooss-paddle2.3","sub_path":"python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py","file_name":"test_reciprocal_op_npu.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"33456093454","text":"from flask import Blueprint, jsonify\n\nfrom services.ftx import list_markets, get_price\n\nftx_bp = Blueprint('ftx_bp', __name__)\n\n\n@ftx_bp.route('')\ndef list_market():\n stocks = list_markets()\n return jsonify(stocks['result'])\n\n\n@ftx_bp.route('')\ndef get_market(market):\n market = market.replace('---', '/')\n stock = get_price(market)\n return jsonify(stock['result'])\n","repo_name":"StephaneConq/TradingToolBack","sub_path":"blueprints/ftx.py","file_name":"ftx.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15574447575","text":"import numpy as np\nimport copy\nimport math\nimport map_processor\n\n\nclass TrajectoryProcessor(map_processor.MapProcessor):\n def __init__(self, n_x_lattice):\n super().__init__(n_x_lattice)\n self.transition_mat = None\n \n def generate(self, len_traj):\n if self.transition_mat is None:\n print(\"you must initially load\")\n \n traj = []\n \n start = self._choice()\n \n for i in range(len_traj):\n \n traj.append(start)\n \n cur_posi = np.zeros((1, self.size))\n cur_posi[0, start] = 1\n dist = np.dot(cur_posi, self.transition_mat)\n \n if dist.sum() == 0:\n continue\n \n next_posi = np.random.choice(range(self.size), p=dist[0])\n \n start = next_posi\n return traj \n \n def compute_possible_set(self, prior, delta=0):\n \n if delta == 0:\n \n state_nos = np.where(prior>0)[0]\n n_possible_loc = len(state_nos)\n #print(\"n_possible_loc\", n_possible_loc)\n \n return state_nos\n \n else:\n \n state_nos, delta_X = self.compute_delta_set(prior, delta)\n return state_nos\n \n def update_graph_mat(self, possible_states):\n updated_graph_mat = copy.deepcopy(self.graph_mat)\n for state in range(len(updated_graph_mat)):\n if state not in possible_states:\n updated_graph_mat[state,:] = 0\n updated_graph_mat[:, state] = 0\n \n return updated_graph_mat\n \n def compute_delta_set(self, prior, delta):\n \n temp_prior = copy.deepcopy(prior)\n \n while (np.sum(temp_prior) > 1-delta):\n\n temp_prior[temp_prior == 0] = float(\"inf\")\n min_ind = np.argmin(temp_prior)\n min_prob = prior[min_ind]\n temp_prior[min_ind] = 0\n temp_prior[temp_prior == float(\"inf\")] = 0\n \n if np.sum(temp_prior != 0) == 0:\n break\n \n if prior[min_ind] > 0:\n temp_prior[min_ind] = min_prob\n \n n_possible_loc = np.sum(temp_prior>0)\n \n \n deltaX = np.zeros((n_possible_loc, self.size))\n \n state_nos = np.where(temp_prior>0)[0]\n for i, state_no in enumerate(state_nos):\n deltaX[i,state_no] = 1\n \n return state_nos, deltaX\n \n \n def compute_posterior_distribution(self, prior):\n if self.transition_mat is None:\n print(\"you must initially load\")\n \n posterior = np.dot(prior, self.transition_mat)\n \n if posterior.sum() == 0:\n print(\"end\")\n \n return posterior\n \n def traj_to_states(self, traj):\n state_traj = []\n for latlon in traj:\n if not self._is_in_from_latlon(latlon):\n continue\n state = self._find_nearest_state_from_latlon_in_all_states(latlon)\n state_traj.append(state)\n return state_traj\n \n def trajs_to_state_trajs(self, trajs):\n state_trajs = []\n for traj in trajs:\n state_traj = self.traj_to_states(traj)\n if len(state_traj) != 0:\n state_trajs.append(state_traj)\n return state_trajs\n \n def make_transmat_from_state_trajs(self, state_trajs):\n transition_mat = np.zeros((self.n_state, self.n_state))\n for state_traj in state_trajs:\n pre_state = state_traj[0]\n for state in state_traj[1:]:\n transition_mat[pre_state, state] += 1\n pre_state = state\n self.transition_mat = self._normalize(transition_mat)\n \n def make_transmat_from_trajs(self, trajs):\n transition_mat = np.zeros((self.n_state, self.n_state))\n for traj in trajs:\n pre_state = self._find_nearest_state_from_latlon_in_all_states(traj[0])\n for latlon in traj:\n if self._is_in_from_latlon(latlon):\n state = self._find_nearest_state_from_latlon_in_all_states(latlon)\n transition_mat[pre_state, state] += 1\n \n pre_state = state\n else:\n break \n self.transition_mat = self._normalize(transition_mat)\n \n def _normalize(self, transition_mat):\n transition_mat = copy.deepcopy(transition_mat)\n for i, transition_prob in enumerate(transition_mat):\n sum_ = np.sum(transition_prob)\n if sum_ != 0:\n transition_mat[i,:] = transition_mat[i,:]/sum_\n return transition_mat\n \n \n def load_trans_mat(self, path_transition_mat, traj, threashold=1e-4):\n \n transition_mat = np.loadtxt(path_transition_mat)\n self.transition_mat = self._threash(transition_mat, threashold)\n self._modify_for_test_traj(traj)\n self.size = len(self.transition_mat)\n \n \n def _threash(self, transition_mat, threashold):\n transition_mat = copy.deepcopy(transition_mat)\n transition_mat = transition_mat * (transition_mat >= threashold)\n transition_mat = self._normalize(transition_mat)\n return transition_mat\n \n \n def _modify_for_test_traj(self, test_traj):\n for i in range(len(test_traj) - 1):\n pre_loc = test_traj[i]\n pos_loc = test_traj[i+1]\n if pre_loc != pos_loc:\n self.transition_mat[pre_loc][pos_loc] += 0.1","repo_name":"tkgsn/PGLP","sub_path":"src/trajectory_processor.py","file_name":"trajectory_processor.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"5329192380","text":"from kapteyn import maputils\nimport numpy\nfrom service import *\n\nfignum = 35\nfig = plt.figure(figsize=figsize)\nframe = fig.add_axes(plotbox)\ntitle = r\"\"\"COBE quadrilateralized spherical cube projection (CSC) oblique with:\n$(\\alpha_p,\\delta_p) = (0^\\circ,30^\\circ)$, $\\phi_p = 75^\\circ$ also: \n$(\\phi_0,\\theta_0) = (0^\\circ,90^\\circ)$. (Cal. fig.34d)\"\"\"\nheader = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,\n 'CTYPE1' : 'RA---CSC',\n 'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,\n 'CTYPE2' : 'DEC--CSC',\n 'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,\n 'LONPOLE': 75.0,\n 'PV1_1' : 0.0, 'PV1_2' : 90.0,\n }\nX = numpy.arange(0,370.0,30.0)\nY = numpy.arange(-60,90,30.0)\nf = maputils.FITSimage(externalheader=header)\nannim = f.Annotatedimage(frame)\ngrat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),\n startx=X, starty=Y)\ngrat.setp_lineswcs0(0, lw=2)\ngrat.setp_lineswcs1(0, lw=2)\n# Take border from non-oblique version\nheader['CRVAL2'] = 0.0\ndel header['PV1_1']\ndel header['PV1_2']\ndel header['LONPOLE']\nborder = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),\n skipx=True, skipy=True)\nperimeter = getperimeter(border)\nlon_world = list(range(0,360,30))\nlat_world = [-60, -30, 30, 60]\nlabkwargs0 = {'color':'r', 'va':'center', 'ha':'left'}\nlabkwargs1 = {'color':'b', 'va':'top', 'ha':'center'}\ndoplot(frame, fignum, annim, grat, title,\n lon_world=lon_world, lat_world=lat_world,\n labkwargs0=labkwargs0, labkwargs1=labkwargs1,\n perimeter=perimeter, markerpos=markerpos)\n","repo_name":"kapteyn-astro/kapteyn","sub_path":"doc/source/EXAMPLES/allskyf35.py","file_name":"allskyf35.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"12489151931","text":"'''\nCreate attention heatmaps per paragraph sentence for all images,\nheatmaps are created with objects which were linked with noun phrase,\none can choose which linking method to take and visualise (check parameters in the main loop).\n'''\n\nimport argparse\nimport base64\nimport json\nimport tqdm\nimport numpy as np\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport cv2\n\nimport spacy\nspacy_nlp = spacy.load('en_core_web_sm')\n\n\n\ndef image_box_resize(image,\n bboxes):\n\n \"\"\"Resize bounding boxes from original coordinates to the scaled ones.\n Args:\n image: original image\n bboxes: (x, y, xmax, ymax) coordinates of bounding boxes\n Returns:\n img: original image, resized (scaled)\n boxes_scaled: newly scaled bounding boxes\n \"\"\"\n\n boxes_scaled = []\n image_to_show = cv2.imread(image, 3)\n y_dim = image_to_show.shape[0]\n x_dim = image_to_show.shape[1]\n target_size = 2000\n x_scale = target_size / x_dim\n y_scale = target_size / y_dim\n img = cv2.resize(image_to_show, (target_size, target_size))\n img = np.array(img)\n for box in bboxes:\n origleft, origtop, origright, origbottom = box[0], box[1], box[2], box[3]\n x_scaled = int(np.round(origleft * x_scale))\n y_scaled = int(np.round(origtop * y_scale))\n xmax = int(np.round(origright * x_scale))\n ymax = int(np.round(origbottom * y_scale))\n boxes_scaled.append([x_scaled, y_scaled, xmax, ymax])\n return (\n img,\n boxes_scaled\n )\n\n\n\n\ndef image_vis(this_image,\n boxes_to_visualise,\n all_boxes,\n sent_id,\n image_path,\n save_path):\n\n \"\"\"Visualisation of linked bounding boxes on top of the image.\n Args:\n this_image: image id\n boxes_to_visualise: ids of linked bounding boxes\n all_boxes: coordinates of original bounding boxes\n sent_id: current sentence id in the paragraph\n image_path: path to ADE20k images\n Returns:\n saves heatmaps per sentence per image\n \"\"\"\n\n # open correct image; val image ids are > 100000\n this_image = int(this_image)\n if this_image > 100000:\n this_image = this_image - 100000\n val_this_image = \"%08d\" % (this_image,)\n val_this_image = f'ADE_val_{str(val_this_image)}.jpg'\n image = image_path + str(val_this_image)\n else:\n train_this_image = \"%08d\" % (this_image,)\n train_this_image = f'ADE_train_{str(train_this_image)}.jpg'\n image = image_path + str(train_this_image)\n boxes_filtered = [all_boxes[k] for k in boxes_to_visualise]\n # transform last two values in each box into xmax and ymax\n transformed_boxes = []\n for (x_coord, y_coord, width, height) in boxes_filtered:\n xmax = x_coord + width\n ymax = y_coord + height\n transformed_boxes.append([x_coord, y_coord, xmax, ymax])\n # adjust bounding boxes based on the resized image\n img_rescaled, boxes_rescaled = image_box_resize(image, transformed_boxes)\n # controlling how stretched the bounding box should be\n figure(figsize=(12, 18), dpi=80)\n plt.axis('off')\n plt.tight_layout()\n img = Image.fromarray(img_rescaled)\n white_img = 255 * np.ones((img.size[1], img.size[0] , 3), np.uint8)\n plt.imshow(white_img)\n for bbox in boxes_rescaled:\n if bbox[0] == 0:\n bbox[0] = 1\n if bbox[1] == 0:\n bbox[1] = 1\n plt.gca().add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n (bbox[2] - bbox[0]) - bbox[0],\n (bbox[3] - bbox[1]) - bbox[1], fill=True,\n linewidth=2, alpha=1, color='#00008B')\n )\n\n sid = str(int(sent_id) + 1)\n plt.savefig(f'{save_path}/s{sid}-' + str(this_image) + '.jpg')\n plt.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f',\n '--feat_path',\n help='Path to the image features',\n default='/scratch/nikolai/tmm_dataset/frcnn_tellmemore/',\n required=False)\n parser.add_argument('-i',\n '--image_path',\n help='Path to the tell me more images',\n default='/scratch/nikolai/tmm_dataset/tell_me_more/',\n required=False)\n parser.add_argument('-l',\n '--linking_method_type',\n help='Choose linking method to visualise;\\\n full set can be found in res_formatted.json',\n default='L-(A)(N)-1-M',\n required=False)\n parser.add_argument('-m',\n '--filter_method',\n help='Pick a filtering method that was used with linking,\\\n they should be identical',\n default='(A)(N)',\n required=False)\n parser.add_argument('-r',\n '--results_file',\n help='Path to the file with formatted results of linking',\n default='./res_formatted_run-20220813-162422.json',\n required=False)\n parser.add_argument('-o',\n '--output_path',\n help='Path to save heatmaps for all images for the specific sentence id',\n default='./where',\n required=False)\n args = vars(parser.parse_args())\n\n with open(args['results_file'], 'r', encoding='UTF-8') as a:\n links = json.load(a)\n\n for num, (image_id, v) in tqdm.tqdm(enumerate(links.items())):\n feat_file = args['feat_path'] + str(image_id) + '.npz'\n feat_loaded = np.load(feat_file)\n boxes = np.frombuffer(base64.b64decode(feat_loaded['boxes']),\n dtype=np.float32).reshape(36, 4).copy()\n nps = v['NPS-OBJ']\n objs = v[f'{args[\"linking_method_type\"]}']\n # per sentence\n for sentid in range(5):\n nouns = [(iid, i[1]) for iid, i in enumerate(nps) if i[0] == sentid]\n noun_ids = [iid for iid, i in nouns]\n objids = []\n for p in noun_ids:\n if isinstance(objs[str(p)], list):\n for pp in objs[str(p)]:\n if pp != 'NONE':\n objids.append(pp)\n else:\n if objs[str(p)] != 'NONE':\n objids.append(objs[str(p)])\n boxes_to_show = [k for (k, kk) in v[f'{args[\"filter_method\"]}']]\n boxes_to_show_for_sent = [i for i in boxes_to_show if i in objids]\n image_vis(image_id,\n boxes_to_show_for_sent,\n boxes,\n sentid,\n args['image_path'],\n args['output_path'])\n","repo_name":"GU-CLASP/discourse-and-decodings","sub_path":"scripts/where_vis.py","file_name":"where_vis.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"39370779376","text":"import logging\nfrom typing import Callable, Type\n\nfrom tenacity import Retrying, RetryError\nfrom tenacity.stop import stop_after_attempt\nfrom tenacity.wait import wait_exponential\n\nfrom app.domain import events, commands\nfrom app.service_layer import handlers, unit_of_work\n\n\nMessage = commands.Command | events.Event\nlogger = logging.getLogger(__name__)\n\n\ndef handle(message: Message, uow: unit_of_work.AbstractUnitOfWork):\n results = []\n queue = [message]\n \n while queue:\n\n if isinstance(message, events.Event):\n handle_event(message, queue=queue, uow=uow)\n\n elif isinstance(message, commands.Command):\n cmd_result = handle_command(message, queue=queue, uow=uow)\n results.append(cmd_result)\n\n else:\n raise Exception(f\"{message} was not an Event or Command\")\n\n return results\n\n\ndef handle_event(event: events.Event, queue: list[Message], uow: unit_of_work.AbstractUnitOfWork):\n for handler in EVENT_HANDLERS[type(event)]:\n try:\n for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_exponential()):\n\n with attempt:\n logger.debug(f\"handling event {event} with handler {handler}\")\n handler(event, uow=uow)\n queue.extend(uow.collect_new_events())\n\n except RetryError as retry_failure:\n logger.exception(f\"Не получилось обработать событие {retry_failure.last_attempt.attempt_number} раз, отказ !\")\n continue\n\n\ndef handle_command(command: command.Command, queue: list[Message], uow: unit_of_work.AbstractUnitOfWork):\n logger.debug(f\"handling command {command}\")\n \n try:\n handler = COMMAND_HANDLERS[type(command)]\n result = handler(command, uow=uow)\n queue.extend(uow.collect_new_events())\n return result\n except Exception:\n logger.exception(f\"Exception handling command {command}\")\n raise\n\n\ndef send_out_of_stock_notification(event: events.OutOfStock):\n email.send_mail(\n \"stock@made.com\",\n f\"Артикула {event.sku} нет в наличии.\",\n )\n\n\nEVENT_HANDLERS: dict[Type[events.Event], list[Callable]] = {\n events.OutOfStock: [send_out_of_stock_notification],\n}\n\n\nCOMMAND_HANDLERS: dict[Type[commands.Command], Callable] = {\n commands.Allocate: handlers.allocate,\n commands.CreateBatch: handlers.add_batch,\n commands.ChangeBatchQuantity: handlers.change_batch_quantity,\n}\n\n","repo_name":"maximovd/architecture-patterns-python","sub_path":"app/service_layer/messagebus.py","file_name":"messagebus.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"27611912436","text":"from email import message\nimport rsa\n\npublicKey, privateKey = rsa.newkeys(512)\n\nmessage = \"Hello Promesa\"\n\nencMessage = rsa.encrypt(message.encode(), publicKey)\n\nprint(\"Original String: \", message)\nprint(\"Encrypted String: \", encMessage)\n\ndecMessage = rsa.decrypt(encMessage, privateKey).decode\n\n","repo_name":"iampromesa/PYTHON-PROJECTS","sub_path":"RELEARNING/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"13926488868","text":"import main\nfrom interface.types import type_extension\nfrom interface.standard import Save_file, Validate_format\nfrom fastapi import UploadFile, File\nfrom pathlib import Path\nimport os\nimport shutil\nfrom helpers.text import convert_pdf_to_text, convert_docx_to_text, convert_pptx_to_text, convert_rtf_to_text, convert_text_to_object, clean_text\n\ndef save_file(file: UploadFile = File(...)) -> Save_file:\n is_valid_format = validate_format(str(file.filename))\n if is_valid_format.status:\n file_destination = Path(main.url_location, \"static\", \"documents\", file.filename)\n if not os.path.exists(file_destination):\n os.makedirs(os.path.dirname(file_destination), exist_ok=True)\n with open(file_destination, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n return Save_file(status= True, url_file= file_destination)\n return Save_file(status= False, url_file= '')\n\ndef validate_format(extension_name:str) -> Validate_format:\n status = False\n type_file = ''\n for ext in type_extension:\n if extension_name.endswith(type_extension[ext]):\n status = True\n type_file = type_extension[ext]\n break\n return Validate_format(status=status, type_file= type_file)\n\ndef file_converter(url_file:Path):\n file = validate_format(str(url_file))\n if file.status:\n if file.type_file == type_extension['PDF']:\n text = convert_pdf_to_text(url_file)\n elif file.type_file == type_extension['WORD']:\n text = convert_docx_to_text(url_file)\n elif file.type_file == type_extension['PWP']:\n text = convert_pptx_to_text(url_file)\n elif file.type_file == type_extension['RTF']:\n text = convert_rtf_to_text(url_file)\n return convert_text_to_object(clean_text(text))","repo_name":"quiku2021/wiku","sub_path":"helpers/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"7647663089","text":"\"\"\"Custom CloudFormation Resource to describe details of an existing SMStudio domain\n\nYou might want to do this if you:\n- Know a SageMaker Studio domain is present but don't know its ID\n- Need to know some other attribute not listed in AWS::SageMaker::Domain outputs\n\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-domain.html\n\nThis resource will fail if no SMStudio domain is present in the region, or a DomainID is passed which does\nnot exist.\n\"\"\"\n\n# Python Built-Ins:\nimport logging\nimport traceback\n\n# External Dependencies:\nimport boto3\nimport cfnresponse\n\nsmclient = boto3.client(\"sagemaker\")\n\n# The set of DescribeDomain response props that will be passed through to cfn output:\nSUPPORTED_PROPS = {\n \"DomainArn\",\n \"DomainId\",\n \"DomainName\",\n \"HomeEfsFileSystemId\",\n \"SingleSignOnManagedApplicationInstanceId\",\n \"Status\",\n \"AuthMode\",\n \"DefaultUserSettings\",\n \"AppNetworkAccessType\",\n \"HomeEfsFileSystemKmsKeyId\",\n \"SubnetIds\",\n \"Url\",\n \"VpcId\",\n \"KmsKeyId\",\n}\n\ndef lambda_handler(event, context):\n try:\n request_type = event[\"RequestType\"]\n if request_type == \"Create\":\n handle_create(event, context)\n elif request_type == \"Update\":\n handle_update(event, context)\n elif request_type == \"Delete\":\n handle_delete(event, context)\n else:\n cfnresponse.send(\n event,\n context,\n cfnresponse.FAILED,\n {},\n error=f\"Unsupported CFN RequestType '{request_type}'\",\n )\n except Exception as e:\n logging.error(\"Uncaught exception in CFN custom resource handler - reporting failure\")\n traceback.print_exc()\n cfnresponse.send(\n event,\n context,\n cfnresponse.FAILED,\n {},\n error=str(e),\n )\n raise e\n\n\nclass NoStudioDomains(RuntimeError):\n pass\n\n\ndef infer_domain_id():\n domains_resp = smclient.list_domains()\n if \"NextToken\" in domains_resp:\n logging.warning(\n f\"Ignoring NextToken on sagemaker:ListDomains response - pagination not implemented\"\n )\n domain_ids = [d[\"DomainId\"] for d in domains_resp[\"Domains\"]]\n\n if not (len(domain_ids) > 0):\n # If the domain has been deleted, the user must necessarily have been deleted too!\n raise NoStudioDomains(f\"No SageMaker Studio domain exists in this region!\")\n elif len(domain_ids) > 1:\n logging.warning(\n f\"Found {len(domain_ids)} Studio domains in this region: assuming first is target. {domain_ids}\"\n )\n return domain_ids[0]\n\n\ndef handle_create(event, context):\n logging.info(\"**Received create request\")\n domain_id = event[\"ResourceProperties\"].get(\"DomainId\")\n if domain_id is None:\n logging.info(\"Inferring domain ID\")\n domain_id = infer_domain_id()\n\n logging.info(f\"Querying domain {domain_id}\")\n desc = smclient.describe_domain(DomainId=domain_id)\n result = { k: desc[k] for k in desc.keys() if k in SUPPORTED_PROPS }\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n result,\n physicalResourceId=domain_id,\n )\n\n\ndef handle_delete(event, context):\n logging.info(\"**Received delete event\")\n logging.info(\"Descriptive resource - nothing to delete\")\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n {},\n physicalResourceId=event[\"PhysicalResourceId\"],\n )\n\n\ndef handle_update(event, context):\n \"\"\"Literally same process as create, for now - so as to always re-describe in case of changes\n \"\"\"\n logging.info(\"**Received update event\")\n domain_id = event[\"ResourceProperties\"].get(\"DomainId\")\n if domain_id is None:\n logging.info(\"Inferring domain ID\")\n domain_id = infer_domain_id()\n\n logging.info(f\"Querying domain {domain_id}\")\n desc = smclient.describe_domain(DomainId=domain_id)\n result = { k: desc[k] for k in desc.keys() if k in SUPPORTED_PROPS }\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n result,\n physicalResourceId=domain_id,\n )\n","repo_name":"apac-ml-tfc/intro-to-mlops","sub_path":".infrastructure/fn-describedomain/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"5564871497","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom . import serializers\nfrom django.contrib.auth.models import User\nfrom game.models import Game\nfrom accounts.models import Profile\nfrom . import handler\nfrom django.contrib.auth import authenticate, login\nimport uuid\nfrom .models import Token\n\ndef random__hash():\n return uuid.uuid4().hex\n\n@api_view(['GET'])\ndef index(request):\n f = open('api_methods.txt')\n file = f.read().split('\\n')\n f.close()\n obj = {'info': file}\n return Response(obj)\n\n@api_view(['GET'])\ndef list_users(request):\n users = User.objects.all()\n serializer = serializers.UserSerializer(users, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef list_games(request):\n queryset = Game.objects.all()\n serializer = serializers.GameSerializer(queryset, many=True)\n return Response(serializer.data)\n\n####\n###########TOKENS\n####\n\n@api_view(['POST'])\ndef get_new_token(request):\n username = request.data.get('username')\n password = request.data.get('password')\n print('username:',username,'password:',password)\n user = authenticate(request,username=username, password=password)\n if not user:\n return Response({'error': 2, 'message': 'invalid credential'})\n #login(request, user)\n token = Token.objects.create(user=user,hash=random__hash(),scope=1)\n obj = {\n 'username': user.username,\n 'token': token.hash\n }\n return Response(obj)\n\n@api_view(['GET'])\ndef delete_token(request,pk):\n try:\n token = Token.objects.get(hash__exact = pk)\n token.delete()\n except:\n return Response({'error_code': 1,'message':'invalid token'})\n\n@api_view(['GET'])\ndef token_status(request,pk):\n try:\n token = Token.objects.get(hash__exact = pk)\n except:\n return Response({'error_code': 1, 'message': 'invalid token'})\n return Response({'success':'token is active','owner':token.user.username})\n\n@api_view(['POST'])\ndef token_list(request):\n username = request.data.get('username')\n passw = request.data.get('password')\n u = authenticate(request,username=username, password=passw)\n if not u:\n return Response('Not Authenticated')\n user_tokens = []\n for token in Token.objects.all():\n if token.user.username == u.username:\n user_tokens.append(token.hash)\n # todo: Если у него вообще нет токенов\n answer = {'success': 'Token got', 'tokens': user_tokens}\n return Response(answer)\n\n#####\n################## IN-GAME\n#####\n\n@api_view(['GET'])\ndef game_by_id(request,id):\n game = Game.objects.get(id=id)\n serializer = serializers.GameSerializer(game)\n game_obj = handler.read_game(id)\n new_players = []\n for player_json in game_obj['players']:\n new_player_obj = User.objects.get(username__exact = player_json['username'])\n serializer2 = serializers.UserSerializer(new_player_obj)\n new_players.append(serializer2.data)\n game_obj['players'] = new_players\n return Response(game_obj)\n\n@api_view(['POST'])\ndef create_game(request):\n try: \n user_token = request.data.get('token')\n except:\n return Response({\"error_code\":3,'message':'Token required'})\n try:\n token = Token.objects.get(hash__exact = user_token)\n except:\n return Response({'error_code': 1,'message': 'Invalid token'}) \n \n owner = User.objects.get(id = token.user.id)\n game = Game.objects.create(game_owner=owner.id,players=owner.username)\n in_game_players = []\n for player_name in list(game.players.split(\" \")):\n player = User.objects.get(username__exact = player_name)\n p = serializers.UserSerializer(player)\n in_game_players.append(p.data)\n jsong_obj = {\n 'PokerFold: Game': \"version 0.01\",\n 'id':game.id,\n 'status':game.status,\n 'owner':game.game_owner,\n 'players': in_game_players,\n }\n handler.write_game(game.id,jsong_obj)\n return Response({'success':'game created', 'game_id':game.id,'game_owner': game.game_owner})\n\n@api_view(['POST'])\ndef join_the_game(request):\n try:\n game = Game.objects.get(id = request.data.get('gameid'))\n except:\n return Response({'error_code':4,'message': 'That game never existed','requested_game':request.data.get('gameid')})\n \n # todo: Проверка на переполненную игру\n try:\n user_token = request.data.get('token')\n except:\n return Response({'error_code': 3, 'message': 'Token required'})\n \n try:\n token = Token.objects.get(hash = user_token)\n except:\n return Response({'error_code':1, 'message':'invalid token'})\n\n game_file = handler.read_game(request.data.get('gameid'))\n new_player_obj = User.objects.get(id = token.user.id)\n serializer2 = serializers.UserSerializer(new_player_obj)\n # todo: Проверку на то, что он уже в игре\n game_file['players'].append(serializer2.data)\n handler.write_game(request.data.get('gameid'),game_file)\n return Response({'success':'u r in da game'})\n\n@api_view(['POST'])\ndef leave_the_game(request):\n try:\n game = Game.objects.get(id = request.data.get('gameid'))\n except:\n return Response({'error_code':4,'message': 'That game never existed','requested_game':request.data.get('gameid')})\n\n try:\n user_token = request.data.get('token')\n except:\n return Response({'error_code': 3, 'message': 'Token required'})\n\n try:\n token = Token.objects.get(hash = user_token)\n except:\n return Response({'error_code':1, 'message':'invalid token'})\n\n game_file = handler.read_game(request.data.get('gameid'))\n username = User.objects.get(id = token.user.id).username\n #serializer2 = serializers.UserSerializer(new_player_obj)\n # todo: Проверку на то, что он уже не был в игре\n for i in range(len(game_file['players'])):\n if game_file['players'][i]['username'] == username:\n game_file['players'].pop(i)\n break\n handler.write_game(request.data.get('gameid'),game_file)\n return Response({'success':'u r leaved da game'})\n","repo_name":"Icawi/PokerFold","sub_path":"api/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"18089567981","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport drone_tfrecords as tfr\nimport drone_input\n\nBATCH_SIZE = 100 # read how many tfrecords per batch\nMAX_STEPS = 3 # display how many batches\nDISPLAY_PER_BATCH = 2 # display images per batch\nNEW_IMAGE_SIZE = 101 # for viewing resized images\n\n\ndef check_tfrs(data_dir, max_steps, batch_size, type):\n with tf.Session() as sess:\n images, heights, widths, depths, label_ids, label_txts, filenames = drone_input.input_pipeline(\n data_dir, batch_size, type, transform=None)\n\n coord = tf.train.Coordinator()\n # Note: QueueRunner created in drone_input.py\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n step = 0\n while step < max_steps and not coord.should_stop():\n images_r, heights_r, widths_r, depths_r, label_ids_r, label_txts_r, filenames_r = sess.run(\n [\n images, heights, widths, depths, label_ids, label_txts,\n filenames\n ])\n\n anchor = BATCH_SIZE // DISPLAY_PER_BATCH\n for i in range(len(images_r)):\n if (i + 1) % anchor == 0:\n print('height: %d, width: %d, depth: %d' %\n (heights_r[i], widths_r[i], depths_r[i]))\n print('label_id: %s, label_txt: %s, filename: %s' %\n (label_ids_r[i], label_txts_r[i], filenames_r[i]))\n print('label: %d' % (np.argmax(label_ids_r[i])) ) \n #print(images_r[i].size)\n img = images_r[i].reshape(\n [heights_r[i], widths_r[i], depths_r[i]])\n plt.imshow(np.around(img).astype(np.uint8))\n plt.show()\n\n if (heights_r[i] != NEW_IMAGE_SIZE or widths_r[i] != NEW_IMAGE_SIZE):\n re_image = tf.image.resize_images(images_r[i].reshape(\n [heights_r[i], widths_r[i], depths_r[i]]),\n NEW_IMAGE_SIZE, NEW_IMAGE_SIZE)\n img2 = sess.run(re_image)\n plt.imshow(np.around(img2).astype(np.uint8))\n plt.show()\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done check for %d samples' % (step * BATCH_SIZE))\n\n finally:\n # When done, ask the threads to stop\n coord.request_stop()\n\n coord.join(threads)\n\n\ndef run():\n tfr_dir = os.path.join(os.getcwd(), 'drone_data', 'tfrecord')\n print('Check training set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE, drone_input.DataTypes.train)\n print('Check validation set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE,\n drone_input.DataTypes.validation)\n print('Check testing set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE, drone_input.DataTypes.test)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"winston-li/tensorflow_playground","sub_path":"python/drone/display_tfrecords.py","file_name":"display_tfrecords.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"69811573585","text":"# -*- coding:utf-8 -*-\n'''\n求1-N之间所有整数中1出现的次数之和\n'''\nclass Solution:\n def NumberOf1Between1AndN_Solution(self, n):\n # write code here\n return countOne(str(n))\ndef countOne(n):\n if int(n) == 0:\n return 0\n if len(n) == 1:\n return 1\n res = count(n)\n return res + countOne(n[1:])\n\ndef count(n):\n h = int(n[0])\n tail = int(n[1:])\n l = len(n)\n if h ==1:\n oh = tail+1\n else:\n oh = 10**(l-1)\n ot = h*(l-1)*10**(l-2)\n return oh + ot\n","repo_name":"lanpartis/jianzhiOffer_practice","sub_path":"31_numberOf1Between1AndN.py","file_name":"31_numberOf1Between1AndN.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"36022210742","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom urllib.request import Request, urlopen # Python 3\n\ndef read_kansai_csv():\n combined_data = pd.DataFrame()\n for year in range(2016, datetime.now().year + 1):\n # Bypass 403 Forbidden error\n req = Request('https://www.kansai-td.co.jp/denkiyoho/csv/area_jyukyu_jisseki_' +\n str(year) + '.csv')\n req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0')\n content = urlopen(req)\n\n csv = pd.read_csv(content, header = 1, encoding = 'shift_jis')\n\n # Combine multi-year CSVs into one dataframe\n combined_data = combined_data.append(csv, ignore_index=True)\n\n # Drop NaN columns\n combined_data.drop(combined_data.iloc[:, 13:], axis=1, inplace=True)\n\n # Translate Japanese column names to English\n combined_data.columns = ['Date_Time', 'Area_Demand', 'Nuclear', 'Thermal',\n 'Hydraulic', 'Geothermal', 'Biomass', 'Solar(Actual)',\n 'Solar(Output_Control)', 'Wind(Actual)', 'Wind(Output_Control)',\n 'Pumped_Hydro', 'Interconnector']\n\n # assign units and region\n combined_data['Region'] = 'Kansai'\n combined_data['Unit'] = 'MWh'\n\n # Format the datetime\n combined_data['Date_Time']=pd.to_datetime(combined_data['Date_Time'], format='%Y/%m/%d %H:%M')\n\n # get demand data into one df\n demand_df = combined_data[['Date_Time', 'Region', 'Unit', 'Area_Demand']].copy()\n demand_df.sort_values(by=['Date_Time'],ascending=False, inplace=True)\n # Drop NaN rows (not sure why there are NaN rows...)\n demand_df.dropna(inplace = True)\n\n # get supply data into another df\n supply_df = combined_data\n supply_df.drop('Area_Demand', axis=1, inplace=True)\n # Pivot \"wide\" to \"long\" format\n supply_df = pd.melt(combined_data, id_vars=['Date_Time','Region', 'Unit'], var_name='Fuel_Type', value_name='Supply')\n supply_df.sort_values(by=['Date_Time','Fuel_Type'], ascending=False, inplace=True)\n # Drop NaN rows (not sure why there are NaN rows...)\n supply_df.dropna(inplace = True)\n\n return demand_df, supply_df\n\nif __name__ == '__main__':\n demand_df, supply_df = read_kansai_csv()\n# demand_df\n# supply_df\n ","repo_name":"kellyzwang/Grid-Emissions-Data-Scraper-Japan-Vietnam","sub_path":"data_scrapers/japan_kansai/japan_kansai.py","file_name":"japan_kansai.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31156073434","text":"'''\r\n#1.check whether a number is prime number or not a prime number\r\nx=int(input(\"enter the value of x:\"))\r\nfor i in range(2,x):\r\n if (x%i==0):\r\n print(x,\"is a not prime number\")\r\n break\r\nelse:\r\n print(x,\"is a prime number\")\r\n\r\n#2.check whether a number is prime number or not in a given interval\r\nx=int(input(\"enter the value of x:\"))\r\ny=int(input(\"enter the value of y:\"))\r\nu=[]\r\nv=[]\r\nfor i in range(x,y+1):\r\n if i>1:\r\n for j in range(2,i):\r\n if(i%j)==0:\r\n #print(i,\"is not a prime number\")\r\n u.append(i)\r\n\r\n break\r\n else:\r\n v.append(i)\r\n #print(i,\"is a prime number\")\r\nprint(\"List of Non Prime Numbers:\", u)\r\nprint(\"Number of Non Prime Numbers:\", len(u))\r\nprint(\"List of Prime Numbers:\", v)\r\nprint(\"Number of Non Prime Numbers:\", len(v))\r\n\r\n#3.check whether a number is Armstrong or not\r\nx=int(input(\"enter the value of x:\"))\r\nsum=0\r\nnum=x\r\nwhile x>0:\r\n di=x%10\r\n sum=sum+(di*di*di)\r\n x=x//10\r\nif sum==num:\r\n print(\"x is Armstrong number\")\r\nelse:\r\n print(\"x is not Armstrong number\")\r\n\r\n#4.check whether a number is Armstrong in a given interval\r\n\r\na=int(input(\"enter the value of x:\"))\r\nb=int(input(\"enter the value of y:\"))\r\n\r\nc=[]\r\nfor num in range(a,b+1):\r\n a=num\r\n sum=0\r\n while num>0: \r\n di=num%10\r\n sum=sum+(di*di*di)\r\n num=num//10\r\n if (sum==a):\r\n c.append(a)\r\nprint(c)\r\n\r\n#5.find sum of digits of a given number\r\nx=int(input(\"enter the value:\"))\r\n\r\nsum=0\r\nwhile x>0:\r\n di=x%10\r\n sum=sum+di\r\n x=x//10\r\nprint(sum)\r\n'''\r\n#6.product of digits\r\nx=int(input(\"enter the value:\"))\r\n\r\npro=1\r\nwhile x>0:\r\n di=x%10\r\n pro=pro*di\r\n x=x//10\r\nprint(pro)\r\n'''\r\nx=[2,-2,3,-3,8,-8,2]\r\nv=[]\r\nu=[]\r\nfor i in x:\r\n if i>=0:\r\n v.append(i)\r\n\r\n else:\r\n u.append(i)\r\n#print(v)\r\n#print(u)\r\n#z=sum(v)\r\n#print(z)\r\nz1=sum(u)\r\nc=abs(z1)\r\n#print(c)\r\nif z==c:\r\n print(\"equal\")\r\nelse:\r\n print(\"not equal\")\r\n\r\n\r\nx=int(input(\"enter the value of x:\"))\r\ny=int(input(\"enter the value of y:\"))\r\nfor i in range(x,y):\r\n if i%3==0 and i%5==0:\r\n print(i, \"fizz buzz\")\r\n #continue\r\n elif i%3==0:\r\n print(i, \"fizz\")\r\n #continue\r\n elif i%5==0:\r\n print(i, \"buzz\")\r\n #continue\r\n else:\r\n print(i, \"na\")\r\n\r\n\r\n\r\nx=[2,3,4,5,6,7,10,4,10,3,5,6,7,7,8,9,1,2,3,4,5]\r\nu=[]\r\nv=[]\r\nfor i in x:\r\n if i%2==0:\r\n u.append(i)\r\n else:\r\n v.append(i)\r\nprint(u)\r\nprint(v)\r\ns=sum(u)\r\nprint(s)\r\nf=sum(v)\r\nprint(f)\r\nif u==v:\r\n print(\"equal\")\r\nelse:\r\n print(\"not equal\")\r\n \r\nx=int(input(\"enter the value:\"))\r\nfact=1\r\nfor i in range(1,x+1):\r\n fact=fact*i\r\nprint(fact)\r\n\r\n \r\nx=int(input(\"enter the value:\"))\r\nfact=1\r\ni=1\r\nwhile i>fact:\r\n fact=fact*i\r\n i=i+1\r\nprint(fact)\r\n'''\r\n","repo_name":"Sathya2020/Python_Coding","sub_path":"Apr_17.py","file_name":"Apr_17.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34884429574","text":"import mysql.connector\nfrom mysql.connector import errorcode\nimport sys, os\n\ncnx = mysql.connector.connect(user='root', password ='root', \n unix_socket=('/Applications/MAMP/tmp/mysql/mysql.sock'))\n\nDB_NAME = 'dealershipDB'\n\ncursor = cnx.cursor()\n\n#Creates the database, will print error if failed\ndef create_database(cursor, DB_NAME):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print (\"Failed creating database: {}\".format(err))\n exit(1)\n\n#Creates the table for brands.\ndef create_table_brands(cursor):\n create_brands = \"CREATE TABLE `brands` (\" \\\n \" `name` varchar(64) NOT NULL,\" \\\n \" `country` varchar(64),\" \\\n \" `parent_co` varchar(64),\" \\\n \" `ceo` varchar(64),\" \\\n \" PRIMARY KEY (`name`)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table brands: \")\n cursor.execute(create_brands)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Creates the table for cars. \ndef create_table_cars(cursor):\n create_cars = \"CREATE TABLE `cars` (\" \\\n \" `car_id` varchar(64) NOT NULL,\" \\\n \" `brand` varchar(64),\" \\\n \" `model` varchar(64),\" \\\n \" `year` SMALLINT,\" \\\n \" PRIMARY KEY (`car_id`),\" \\\n \" FOREIGN KEY (`brand`) REFERENCES brands(name)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table cars: \")\n cursor.execute(create_cars)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Creates the table for listings. \ndef create_table_listings(cursor):\n create_listings = \"CREATE TABLE `listings` (\" \\\n \" `vin_nr` varchar(64) NOT NULL,\" \\\n \" `car_id` varchar(64),\" \\\n \" `color` varchar(64),\" \\\n \" `miles` INT,\" \\\n \" `price` INT,\" \\\n \" PRIMARY KEY (`vin_nr`),\" \\\n \" FOREIGN KEY (`car_id`) REFERENCES cars(car_id)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table listings: \")\n cursor.execute(create_listings)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Inserts the data from the file brands.csv into the table brands. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'brands.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_brands(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/brands.csv' \"\\\n \"INTO TABLE brands \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into brands:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\n#Inserts the data from the file cars.csv into the table cars. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'cars.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_cars(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/cars.csv' \"\\\n \"INTO TABLE cars \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into cars:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\n#Inserts the data from listings.csv into the table listings. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'listings.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_listings(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/listings.csv' \"\\\n \"INTO TABLE listings \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into listings:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\ntry:\n cursor.execute(\"USE {}\".format(DB_NAME)) #USE dealershipDB\nexcept mysql.connector.Error as err:\n print(\"Database {} does not exist.\".format(DB_NAME))\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor, DB_NAME)\n print(\"Database {} successfully created.\".format(DB_NAME))\n cnx.database = DB_NAME\n create_table_brands(cursor)\n create_table_cars(cursor)\n create_table_listings(cursor)\n insert_into_brands(cursor)\n insert_into_cars(cursor)\n insert_into_listings(cursor)\n else:\n print(err)\n\n#Below is the code for all of the queries depending on the option chosen from the main menu.\ndef query_1(cursor):\n query1_1 = \"CREATE VIEW listings_info AS SELECT\"\\\n \" cars.brand, cars.model, cars.year, listings.color, listings.miles, listings.price\"\\\n \" FROM listings INNER JOIN cars ON listings.car_id=cars.car_id\"\\\n\n query1_2 = \"SELECT * FROM listings_info\"\n \n #Creates the view if not already created\n try:\n cursor.execute(query1_1)\n except Exception:\n pass\n\n cursor.execute(query1_2)\n\n results = cursor.fetchall()\n for row in results:\n print(row[0], row[1], row[2], \",\", row[3], \",\", row[4], \"miles,\", row[5], \"$\")\n\n\ndef query_2(cursor, answer_color):\n query2 = \"SELECT cars.brand, cars.model, cars.year FROM cars, listings\"\\\n \" WHERE listings.car_id=cars.car_id AND listings.color=%s\"\n cursor.execute(query2, (answer_color,))\n results = cursor.fetchall()\n if results:\n for row in results:\n print(row[0], row[1], row[2])\n else:\n print(\"No vehicle has that color.\")\n\ndef query_3(cursor):\n query3 = \"SELECT AVG(price) FROM listings\"\n cursor.execute(query3)\n results = cursor.fetchall()\n for row in results:\n print(\"The average price of a car is:\", row[0], \"$\")\n \ndef query_4(cursor, answer_country):\n query4 = \"SELECT cars.brand, cars.model, cars.year FROM brands, cars, listings\"\\\n \" WHERE listings.car_id=cars.car_id AND cars.brand=brands.name\"\\\n \" AND brands.country =%s\"\n cursor.execute(query4, (answer_country,))\n results = cursor.fetchall()\n if results:\n for row in results:\n print(row[0], row[1], row[2])\n else:\n print(\"No cars from that country.\")\n\ndef query_5(cursor):\n query5 = \"SELECT cars.brand, COUNT(listings.vin_nr) FROM listings, cars\"\\\n \" WHERE listings.car_id=cars.car_id GROUP BY cars.brand\"\n cursor.execute(query5)\n results = cursor.fetchall()\n for row in results:\n print(row[0], \":\", row[1])\n\n\n#Main menu\ndef main_menu():\n print(\"------------------------\")\n print(\"1. Show full info on all listings.\")\n print(\"2. Search for what cars are available in a certain color.\")\n print(\"3. Show average price of a car being sold.\")\n print(\"4. Search for what cars are available from a certain brand country.\")\n print(\"5. Search for how many cars are available from each brand.\")\n print(\"Q. Quit.\")\n print(\"------------------------\")\n print(\"Please choose an option:\")\n answer = str(input())\n return answer\n\n#Function that waits for a user to press a key, this is cross platform,\n#though I have only tested it with macOS as that's what I'm using.\n#This can also be achieved with curses (though I don't know if it's cross platform).\ndef wait():\n key_press = None\n if os.name == 'nt':\n import msvcrt\n key_press = msvcrt.getch()\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n key_press = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n return key_press\n\n\n\n#All the different options, choosing quit will simply stop the while loop.\ndef answer_1():\n query_1(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_2():\n print(\"Enter the color:\")\n answer_color = input()\n query_2(cursor, answer_color)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_3():\n query_3(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_4():\n print(\"Enter the country:\")\n answer_country = input()\n query_4(cursor, answer_country)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_5():\n query_5(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\nwhile True:\n answer = main_menu()\n\n if answer == \"1\":\n answer_1()\n\n elif answer == \"2\":\n answer_2()\n\n elif answer == \"3\":\n answer_3()\n\n elif answer == \"4\":\n answer_4()\n\n elif answer == \"5\":\n answer_5()\n\n elif answer == \"Q\" or answer == \"q\":\n print(\"Quitting.\")\n break","repo_name":"pd222ha/1DV503-Programming-assignment-2","sub_path":"PA2.py","file_name":"PA2.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"39626579395","text":"class SinglyNode:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_at_head(self, d):\n x = SinglyNode(d)\n\n if self.head is None:\n self.head = x\n else:\n x.next = self.head\n self.head = x\n\n def insert_at_tail(self, d):\n tr = self.head\n while tr.next is not None:\n tr = tr.next\n tr.next = SinglyNode(d)\n\n def is_empty(self):\n return self.head is None\n\n def print_all(self):\n while self.head is not None:\n print(self.head.data)\n self.head = self.head.next\n\n def print_all(self, tr):\n while tr is not None:\n print(tr.data)\n tr = tr.next\n\n def reverse_using_recusrion(self, h):\n if h.next is None:\n return h\n r = None\n r = self.reverse_using_recusrion(h.next)\n h.next.next = h\n h.next = None\n return r\n\n def reverse_using_loop(self, h):\n prev = None\n nxt = None\n\n while h is not None:\n nxt = h.next\n h.next = prev\n prev = h\n h = nxt\n return prev\n\n def remove_without_head_brute_force(self, h):\n tr = h\n while (tr.next is not None):\n tr.data = tr.next.data\n tr = tr.next\n while (h.next != tr):\n h = h.next\n h.next = None\n\n def delete_without_head_simple(self, h):\n if h is None or h.next is None:\n return None\n x = h.next.next\n h.data = h.next.data\n h.next = x\n\n\ndef remove_loop(h):\n slow = h\n fast = h\n tr = h\n\n while fast.next is not None:\n\n fast = fast.next\n if fast is None:\n break\n fast = fast.next\n slow = slow.next\n if fast == slow:\n break\n\n while slow.next != tr.next:\n tr = tr.next\n slow = slow.next\n\n slow.next = None\n return h\n\n\n\n","repo_name":"biswarup2444/python_dsa","sub_path":"datastructures/linkedlist/singly/singly_node.py","file_name":"singly_node.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"34134949923","text":"class Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n dit = {} #将列表转为字典\n nums.sort() #将列表排序\n for i in nums: #计算列表元素数量\n dit[i] = dit.get(i,0)+1\n dit = sorted(dit.items(), key=lambda dit:dit[1], reverse=True) #排序列表元素\n res = []\n for x in range(k):\n res.append(dit[x][0])\n return res\n","repo_name":"dgtyuewq/leetcode","sub_path":"解答/前 K 个高频元素.py","file_name":"前 K 个高频元素.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32354156397","text":"import os\nimport pathlib\nimport platform\n\nROOT_PATH = str(pathlib.Path(os.path.dirname(os.path.realpath(__file__))).parent)\nCURRENT_EXPERIMENT = 'bigsarima'\nRUNNING_LOCALLY = 'macOS' in platform.platform()\n\nif RUNNING_LOCALLY:\n OUTPUT_DIR = os.path.join(ROOT_PATH,'localoutput',CURRENT_EXPERIMENT,'output')\nelse:\n OUTPUT_DIR = '/cnvrg/output'\n\n\nif __name__=='__main__':\n print(platform.platform())\n print({'RUNNING_LOCALLY':RUNNING_LOCALLY,'OUTPUT_DIR':OUTPUT_DIR})\n\n\n","repo_name":"microprediction/schooled","sub_path":"schooled/whereami.py","file_name":"whereami.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"20231310937","text":"from glob import glob\nimport uproot3\nimport numpy as np\nimport pandas as pd\nimport tqdm\nfrom RooPandasFunctions import PNanotoDataFrame,PSequential,PColumn,PFilter,PRow,PProcessor,PProcRunner,PInitDir\nfrom collections import OrderedDict\n\n#Define Datasets and corresponding file selections\nfnames={}\n#fnames[\"QCD_HT1000to1500\"] = sorted(glob('/cms/knash/EOS/QCD_HT1000to1500_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"QCD_HT1500to2000\"]= sorted(glob('/cms/knash/EOS/QCD_HT1500to2000_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"QCD_HT2000toInf\"]= sorted(glob('/cms/knash/EOS/QCD_HT2000toInf_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"TT\"] = sorted(glob('/cms/knash/EOS/ZprimeToTT_M2500_W25_TuneCP2_PSweights_13TeV-madgraph-pythiaMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v2_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"HgHg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoHiggs_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"PgPg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoPhotons_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"PgPg_1500400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoPhotons_M1500_M400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"WgWg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoWs_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"WgWg_1500400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoWs_M1500_M400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\n\n\n\n#Do this if accessing over XROOTD\nfileset={}\nfor ffi in fnames:\n #fileset[ffi]=[ffj.replace(\"/eos/uscms/\",\"root://cmsxrootd.fnal.gov///\") for ffj in fnames[ffi]]\n fileset[ffi]=fnames[ffi]\n #fileset[ffi]=fileset[ffi][:10]\n\n#This is the Nano->Parquet file reduction factor\nbatchesperfile={\n \"TT\":3,\n \"HgHg_15001400\":2,\n \"PgPg_15001400\":2,\n \"PgPg_1500400\":2,\n \"WgWg_15001400\":2,\n \"WgWg_1500400\":2,\n \"QCD_HT1500to2000\":5,\n \"QCD_HT1000to1500\":5,\n \"QCD_HT2000toInf\":5}\n\n#Keep only the branches you want \"Jet\",[\"pt\"] would be the branch Jet_pt in the NanoAOD\nbranchestokeep=OrderedDict([(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n#branchestokeep=OrderedDict([(\"LHEPart\",[\"pt\",\"eta\",\"phi\",\"mass\",\"pdgId\",\"status\"]),(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"hadronFlavour\",\"partonFlavour\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n#branchestokeep=OrderedDict([(\"Muon\",[\"pt\",\"eta\",\"phi\",\"mass\"]),(\"Jet\",[\"pt\",\"eta\",\"phi\",\"mass\"]),(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"hadronFlavour\",\"partonFlavour\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n\n\n\n\n#Trim out element indices you dont want (ie only keep top 5 jets etc)\nmind={\"FatJet\":5,\"FatJet\":5,\"\":None}\n\n\n#It is possible to pass a column selection here similar to the analyzer. \n#Clearly the syntax is overly complicated compared to the analyzer -- to improve. \n#This is useful for skimming and calculating a value from collections that you dont want to save.\n#ex/calculate ht from ak4 jets, then drop ak4s:\nclass ColumnSelection():\n def __call__(self,df,EventInfo):\n\n htdf=pd.DataFrame()\n htdf[\"ht\"]=df[\"Jet_pt\"].groupby(level=0).sum()\n htdf['subentry'] = 0\n htdf.set_index('subentry', append=True, inplace=True)\n df=pd.concat((df,htdf),axis=1)\n\n df=df.drop([\"Jet_pt\",\"Jet_eta\",\"Jet_phi\",\"Jet_mass\"],axis=1)\n\n return df\n \nskim= [\n PColumn(ColumnSelection()),\n ]\n\n#Run it. nproc is the number of processors. >1 goes into multiprocessing model\n#PNanotoDataFrame(fileset,branchestokeep,filesperchunk=batchesperfile,nproc=1,atype=\"flat\",dirname=\"RooFlatFull\",maxind=mind,seq=skim).Run()\nPNanotoDataFrame(fileset,branchestokeep,filesperchunk=batchesperfile,nproc=2,atype=\"flat\",dirname=\"RooFlatFull\",maxind=mind).Run()\n\n\n\n\n\n\n","repo_name":"knash/AEAnalyzer","sub_path":"RooPandasAnomalyProcessor.py","file_name":"RooPandasAnomalyProcessor.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73479729424","text":"AR = ['/usr/bin/ar']\nARFLAGS = ['rcs']\nBINDIR = '/usr/local/bin'\nCC = ['/usr/bin/gcc']\nCCLNK_SRC_F = []\nCCLNK_TGT_F = ['-o']\nCC_NAME = 'gcc'\nCC_SRC_F = []\nCC_TGT_F = ['-c', '-o']\nCC_VERSION = ('4', '8', '2')\nCFLAGS_MACBUNDLE = ['-fPIC']\nCFLAGS_PYEMBED = ['-fstack-protector', '-fwrapv']\nCFLAGS_PYEXT = ['-fstack-protector', '-fwrapv']\nCFLAGS_SOY = ['-pthread', '-pthread']\nCFLAGS_cshlib = ['-fPIC']\nCOMPILER_CC = 'gcc'\nCPPPATH_ST = '-I%s'\nCXXFLAGS_PYEMBED = ['-fstack-protector', '-fwrapv']\nCXXFLAGS_PYEXT = ['-fstack-protector', '-fwrapv']\nCXXFLAGS_SOY = ['-pthread', '-pthread']\nDEFINES = ['PYTHONDIR=\"/usr/lib/python3/dist-packages\"', 'PYTHONARCHDIR=\"/usr/lib/python3/dist-packages\"', 'HAVE_PYEMBED=1', 'HAVE_PYEXT=1', 'HAVE_PYTHON_H=1']\nDEFINES_PYEMBED = ['HAVE_PYEMBED=1', 'NDEBUG']\nDEFINES_PYEXT = ['HAVE_PYEXT=1', 'NDEBUG']\nDEFINES_SOY = ['HAVE_SOY=1']\nDEFINES_ST = '-D%s'\nDEST_BINFMT = 'elf'\nDEST_CPU = 'x86_64'\nDEST_OS = 'linux'\nHAVE_PYEMBED = 1\nHAVE_PYEXT = 1\nHAVE_SOY = 1\nINCLUDES_PYEMBED = ['/usr/include/python3.4m']\nINCLUDES_PYEXT = ['/usr/include/python3.4m']\nINCLUDES_SOY = ['/usr/local/include', '/usr/include/glib-2.0', '/usr/lib/x86_64-linux-gnu/glib-2.0/include', '/usr/include/gee-0.8', '/usr/include/dbus-1.0', '/usr/include/librsvg-2.0', '/usr/include/nice', '/usr/lib/x86_64-linux-gnu/dbus-1.0/include', '/usr/include/gdk-pixbuf-2.0', '/usr/include/cairo', '/usr/include/libpng12', '/usr/include/pixman-1', '/usr/include/freetype2', '/usr/include/loudmouth-1.0']\nLIBDIR = '/usr/local/lib'\nLIBPATH_PYEMBED = ['/usr/lib/python3.4/config-3.4m-x86_64-linux-gnu', '/usr/lib']\nLIBPATH_PYEXT = ['/usr/lib/python3.4/config-3.4m-x86_64-linux-gnu', '/usr/lib']\nLIBPATH_SOY = ['/usr/local/lib']\nLIBPATH_ST = '-L%s'\nLIB_PYEMBED = ['pthread', 'dl', 'util', 'm', 'python3.4m']\nLIB_PYEXT = ['pthread', 'dl', 'util', 'm', 'python3.4m']\nLIB_SOY = ['soy', 'gee-0.8', 'dbus-glib-1', 'rsvg-2', 'm', 'loudmouth-1', 'idn', 'nice', 'gthread-2.0', 'GLESv2', 'dbus-1', 'gio-2.0', 'gdk_pixbuf-2.0', 'cairo', 'gobject-2.0', 'glib-2.0']\nLIB_ST = '-l%s'\nLINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']\nLINKFLAGS_PYEMBED = ['-Xlinker', '-export-dynamic', '-Wl,-O1', '-Wl,-Bsymbolic-functions']\nLINKFLAGS_PYEXT = ['-Xlinker', '-export-dynamic', '-Wl,-O1', '-Wl,-Bsymbolic-functions']\nLINKFLAGS_SOY = ['-pthread', '-Wl,-export-dynamic', '-pthread']\nLINKFLAGS_cshlib = ['-shared']\nLINKFLAGS_cstlib = ['-Wl,-Bstatic']\nLINK_CC = ['/usr/bin/gcc']\nLINUX = 1\nPACKAGE = 'pysoy'\nPKGCONFIG = ['/usr/bin/pkg-config']\nPREFIX = '/usr'\nPYC = 1\nPYFLAGS = ''\nPYFLAGS_OPT = '-O'\nPYO = 1\nPYTAG = 'cpython-34'\nPYTHON = ['/usr/bin/python3']\nPYTHONARCHDIR = '/usr/lib/python3/dist-packages'\nPYTHONDIR = '/usr/lib/python3/dist-packages'\nPYTHON_CONFIG = ['/usr/bin/python3-config']\nPYTHON_VERSION = '3.4'\nRPATH_ST = '-Wl,-rpath,%s'\nSHLIB_MARKER = '-Wl,-Bdynamic'\nSONAME_ST = '-Wl,-h,%s'\nSTLIBPATH_ST = '-L%s'\nSTLIB_MARKER = '-Wl,-Bstatic'\nSTLIB_ST = '-l%s'\ncprogram_PATTERN = '%s'\ncshlib_PATTERN = 'lib%s.so'\ncstlib_PATTERN = 'lib%s.a'\ndefine_key = ['PYTHONDIR', 'PYTHONARCHDIR', 'HAVE_PYEMBED', 'HAVE_PYEXT', 'HAVE_PYTHON_H']\nmacbundle_PATTERN = '%s.bundle'\npyext_PATTERN = '%s.cpython-34m.so'\n","repo_name":"couchjd/Playground","sub_path":"python/libraries/pysoy/build/c4che/_cache.py","file_name":"_cache.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71617852947","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport code\nimport opendr\nimport math\nfrom opendr.camera import ProjectPoints\nfrom opendr.renderer import ColoredRenderer, TexturedRenderer\nfrom opendr.lighting import LambertianPointLight\nimport random\n\n\n# Rotate the points by a specified angle.\ndef rotateY(points, angle):\n ry = np.array([\n [np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],\n [-np.sin(angle), 0., np.cos(angle)]\n ])\n return np.dot(points, ry)\n\ndef draw_skeleton(input_image, joints, draw_edges=True, vis=None, radius=None):\n \"\"\"\n joints is 3 x 19. but if not will transpose it.\n 0: Right ankle\n 1: Right knee\n 2: Right hip\n 3: Left hip\n 4: Left knee\n 5: Left ankle\n 6: Right wrist\n 7: Right elbow\n 8: Right shoulder\n 9: Left shoulder\n 10: Left elbow\n 11: Left wrist\n 12: Neck\n 13: Head top\n 14: nose\n 15: left_eye\n 16: right_eye\n 17: left_ear\n 18: right_ear\n \"\"\"\n\n if radius is None:\n radius = max(4, (np.mean(input_image.shape[:2]) * 0.01).astype(int))\n\n colors = {\n 'pink': (197, 27, 125), # L lower leg\n 'light_pink': (233, 163, 201), # L upper leg\n 'light_green': (161, 215, 106), # L lower arm\n 'green': (77, 146, 33), # L upper arm\n 'red': (215, 48, 39), # head\n 'light_red': (252, 146, 114), # head\n 'light_orange': (252, 141, 89), # chest\n 'purple': (118, 42, 131), # R lower leg\n 'light_purple': (175, 141, 195), # R upper\n 'light_blue': (145, 191, 219), # R lower arm\n 'blue': (69, 117, 180), # R upper arm\n 'gray': (130, 130, 130), #\n 'white': (255, 255, 255), #\n }\n\n image = input_image.copy()\n input_is_float = False\n\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n max_val = image.max()\n if max_val <= 2.: # should be 1 but sometimes it's slightly above 1\n image = (image * 255).astype(np.uint8)\n else:\n image = (image).astype(np.uint8)\n\n if joints.shape[0] != 2:\n joints = joints.T\n joints = np.round(joints).astype(int)\n\n jcolors = [\n 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',\n 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white'\n ]\n\n if joints.shape[1] == 19:\n # parent indices -1 means no parents\n parents = np.array([\n 1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, -1, 13, -1, -1, 15, 16\n ])\n # Left is light and right is dark\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 8: 'light_blue',\n 9: 'blue',\n 10: 'blue',\n 11: 'blue',\n 12: 'purple',\n 17: 'light_green',\n 18: 'light_green',\n 14: 'purple'\n }\n elif joints.shape[1] == 14:\n parents = np.array([\n 1,\n 2,\n 8,\n 9,\n 3,\n 4,\n 7,\n 8,\n -1,\n -1,\n 9,\n 10,\n 13,\n -1,\n ])\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 10: 'light_blue',\n 11: 'blue',\n 12: 'purple'\n }\n elif joints.shape[1] == 21: # hand\n parents = np.array([\n -1,\n 0,\n 1,\n 2,\n 3,\n 0,\n 5,\n 6,\n 7,\n 0,\n 9,\n 10,\n 11,\n 0,\n 13,\n 14,\n 15,\n 0,\n 17,\n 18,\n 19,\n ])\n ecolors = {\n 0: 'light_purple',\n 1: 'light_green',\n 2: 'light_green',\n 3: 'light_green',\n 4: 'light_green',\n 5: 'pink',\n 6: 'pink',\n 7: 'pink',\n 8: 'pink',\n 9: 'light_blue',\n 10: 'light_blue',\n 11: 'light_blue',\n 12: 'light_blue',\n 13: 'light_red',\n 14: 'light_red',\n 15: 'light_red',\n 16: 'light_red',\n 17: 'purple',\n 18: 'purple',\n 19: 'purple',\n 20: 'purple',\n }\n else:\n print('Unknown skeleton!!')\n\n for child in range(len(parents)):\n point = joints[:, child]\n # If invisible skip\n if vis is not None and vis[child] == 0:\n continue\n if draw_edges:\n cv2.circle(image, (point[0], point[1]), radius, colors['white'],\n -1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], -1)\n else:\n # cv2.circle(image, (point[0], point[1]), 5, colors['white'], 1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], 1)\n # cv2.circle(image, (point[0], point[1]), 5, colors['gray'], -1)\n pa_id = parents[child]\n if draw_edges and pa_id >= 0:\n if vis is not None and vis[pa_id] == 0:\n continue\n point_pa = joints[:, pa_id]\n cv2.circle(image, (point_pa[0], point_pa[1]), radius - 1,\n colors[jcolors[pa_id]], -1)\n if child not in ecolors.keys():\n print('bad')\n cv2.line(image, (point[0], point[1]), (point_pa[0], point_pa[1]),\n colors[ecolors[child]], radius - 2)\n\n # Convert back in original dtype\n if input_is_float:\n if max_val <= 1.:\n image = image.astype(np.float32) / 255.\n else:\n image = image.astype(np.float32)\n\n return image\n\ndef draw_text(input_image, content):\n \"\"\"\n content is a dict. draws key: val on image\n Assumes key is str, val is float\n \"\"\"\n image = input_image.copy()\n input_is_float = False\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n image = (image * 255).astype(np.uint8)\n\n black = (255, 255, 0)\n margin = 15\n start_x = 5\n start_y = margin\n for key in sorted(content.keys()):\n text = \"%s: %.2g\" % (key, content[key])\n cv2.putText(image, text, (start_x, start_y), 0, 0.45, black)\n start_y += margin\n\n if input_is_float:\n image = image.astype(np.float32) / 255.\n return image\n\ndef visualize_reconstruction_opendr(img, vertices, camera, renderer, color='light_blue', focal_length=1000):\n \"\"\"\n Renderer is an instance of OpenDR Renderer.\n \"\"\"\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n\n combined = np.hstack([img, rend_img])\n\n return combined\n\ndef visualize_reconstruction_multi_view_opendr(img, vertices, camera, renderer, color='light_blue', focal_length=1000):\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n\n # rotate\n aroundy0 = cv2.Rodrigues(np.array([0, np.radians(0.), 0]))[0]\n aroundy1 = cv2.Rodrigues(np.array([0, np.radians(90.), 0]))[0]\n aroundy2 = cv2.Rodrigues(np.array([0, np.radians(180.), 0]))[0]\n aroundy3 = cv2.Rodrigues(np.array([0, np.radians(270.), 0]))[0]\n aroundy4 = cv2.Rodrigues(np.array([0, np.radians(45.), 0]))[0]\n center = vertices.mean(axis=0)\n rot_vertices0 = np.dot((vertices - center), aroundy0) + center\n rot_vertices1 = np.dot((vertices - center), aroundy1) + center\n rot_vertices2 = np.dot((vertices - center), aroundy2) + center\n rot_vertices3 = np.dot((vertices - center), aroundy3) + center\n rot_vertices4 = np.dot((vertices - center), aroundy4) + center\n \n # # Render non-parametric shape\n img_side0 = renderer.render(rot_vertices0, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side1 = renderer.render(rot_vertices1, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side2 = renderer.render(rot_vertices2, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side3 = renderer.render(rot_vertices3, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side4 = renderer.render(rot_vertices4, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n\n combined = np.hstack([img, rend_img, img_side0, img_side1, img_side2, img_side3, img_side4])\n\n return combined\n\ndef visualize_reconstruction_smpl_opendr(img, vertices, camera, renderer, smpl_vertices, color='light_blue', focal_length=1000):\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n \n rend_img_smpl = renderer.render(smpl_vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n\n combined = np.hstack([img, rend_img, rend_img_smpl])\n\n return combined\n\n\ndef cam2pixel(cam_coord, f, c):\n x = cam_coord[:, 0] / (cam_coord[:, 2]) * f[0] + c[0]\n y = cam_coord[:, 1] / (cam_coord[:, 2]) * f[1] + c[1]\n z = cam_coord[:, 2]\n img_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)\n return img_coord\n\n\nclass OpenDR_Renderer(object):\n \"\"\"\n Render mesh using OpenDR for visualization.\n \"\"\"\n\n def __init__(self, width=800, height=600, near=0.5, far=1000, faces=None):\n self.colors = {'hand': [.9, .9, .9], 'pink': [.9, .7, .7], 'light_blue': [0.65098039, 0.74117647, 0.85882353] }\n self.width = width\n self.height = height\n self.faces = faces\n self.renderer = ColoredRenderer()\n\n def render(self, vertices, faces=None, img=None,\n camera_t=np.zeros([3], dtype=np.float32),\n camera_rot=np.zeros([3], dtype=np.float32),\n camera_center=None,\n use_bg=False,\n bg_color=(0.0, 0.0, 0.0),\n body_color=None,\n focal_length=5000,\n disp_text=False,\n gt_keyp=None,\n pred_keyp=None,\n **kwargs):\n if img is not None:\n height, width = img.shape[:2]\n else:\n height, width = self.height, self.width\n\n if faces is None:\n faces = self.faces\n\n if camera_center is None:\n camera_center = np.array([width * 0.5,\n height * 0.5])\n\n self.renderer.camera = ProjectPoints(rt=camera_rot,\n t=camera_t,\n f=focal_length * np.ones(2),\n c=camera_center,\n k=np.zeros(5))\n dist = np.abs(self.renderer.camera.t.r[2] -\n np.mean(vertices, axis=0)[2])\n far = dist + 20\n\n self.renderer.frustum = {'near': 1.0, 'far': far,\n 'width': width,\n 'height': height}\n\n if img is not None:\n if use_bg:\n self.renderer.background_image = img\n else:\n self.renderer.background_image = np.ones_like(\n img) * np.array(bg_color)\n\n if body_color is None:\n color = self.colors['light_blue']\n else:\n color = self.colors[body_color]\n\n if isinstance(self.renderer, TexturedRenderer):\n color = [1.,1.,1.]\n\n self.renderer.set(v=vertices, f=faces,\n vc=color, bgcolor=np.ones(3))\n albedo = self.renderer.vc\n # Construct Back Light (on back right corner)\n yrot = np.radians(120)\n\n self.renderer.vc = LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-200, -100, -100]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Left Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([800, 10, 300]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Right Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-500, 500, 1000]), yrot),\n vc=albedo,\n light_color=np.array([.7, .7, .7]))\n\n return self.renderer.r\n\n\n def render_vertex_color(self, vertices, faces=None, img=None,\n camera_t=np.zeros([3], dtype=np.float32),\n camera_rot=np.zeros([3], dtype=np.float32),\n camera_center=None,\n use_bg=False,\n bg_color=(0.0, 0.0, 0.0),\n vertex_color=None,\n focal_length=5000,\n disp_text=False,\n gt_keyp=None,\n pred_keyp=None,\n **kwargs):\n if img is not None:\n height, width = img.shape[:2]\n else:\n height, width = self.height, self.width\n\n if faces is None:\n faces = self.faces\n\n if camera_center is None:\n camera_center = np.array([width * 0.5,\n height * 0.5])\n\n self.renderer.camera = ProjectPoints(rt=camera_rot,\n t=camera_t,\n f=focal_length * np.ones(2),\n c=camera_center,\n k=np.zeros(5))\n dist = np.abs(self.renderer.camera.t.r[2] -\n np.mean(vertices, axis=0)[2])\n far = dist + 20\n\n self.renderer.frustum = {'near': 1.0, 'far': far,\n 'width': width,\n 'height': height}\n\n if img is not None:\n if use_bg:\n self.renderer.background_image = img\n else:\n self.renderer.background_image = np.ones_like(\n img) * np.array(bg_color)\n\n if vertex_color is None:\n vertex_color = self.colors['light_blue']\n\n\n self.renderer.set(v=vertices, f=faces,\n vc=vertex_color, bgcolor=np.ones(3))\n albedo = self.renderer.vc\n # Construct Back Light (on back right corner)\n yrot = np.radians(120)\n\n self.renderer.vc = LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-200, -100, -100]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Left Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([800, 10, 300]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Right Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-500, 500, 1000]), yrot),\n vc=albedo,\n light_color=np.array([.7, .7, .7]))\n\n return self.renderer.r","repo_name":"postech-ami/FastMETRO","sub_path":"src/utils/renderer_opendr.py","file_name":"renderer_opendr.py","file_ext":"py","file_size_in_byte":17822,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"48"}
+{"seq_id":"11450658600","text":"def module():\r\n numb = input(\"Введіть число \")\r\n str_numb = str(numb)\r\n sum1 = 0\r\n for i in range(len(str_numb)):\r\n sum1+= int(str_numb[i])\r\n print(\"Сума \", sum1)\r\n max1 = max(numb)\r\n min1 = min(numb)\r\n print(\"Максимальна цифра \", max1)\r\n print(\"Мінімальна цифра \", min1)\r\n return(module)\r\nmodule() ","repo_name":"MaksKulinich/MKG","sub_path":"modulelab.py","file_name":"modulelab.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"25585217868","text":"from snakeguice import providers\nfrom snakeguice.annotation import Annotation\nfrom snakeguice.binder import Key\nfrom snakeguice.decorators import inject\nfrom snakeguice.interfaces import Injector\nfrom snakeguice.errors import MultiBindingError\n\n\nclass _MultiBinder(object):\n\n def __init__(self, binder, interface):\n self._binder = binder\n self._interface = interface\n self._provider = self._get_or_create_provider()\n\n def _get_or_create_provider(self):\n key = Key(self.multibinding_type(self._interface))\n binding = self._binder.get_binding(key)\n if not binding:\n self._binder.bind(self.multibinding_type(self._interface),\n to_provider=self._create_provider())\n binding = self._binder.get_binding(key)\n return binding.provider\n\n def _dsl_to_provider(self, to, to_provider, to_instance):\n if to:\n #TODO: add some validation\n return providers.create_simple_provider(to)\n elif to_provider:\n #TODO: add some validation\n return to_provider\n elif to_instance:\n #TODO: add some validation\n return providers.create_instance_provider(to_instance)\n else:\n raise MultiBindingError('incorrect arguments to %s.add_binding'\n % self.__class__.__name__)\n\n\nclass List(Annotation):\n \"\"\"Used for binding lists.\"\"\"\n\n\nclass ListBinder(_MultiBinder):\n\n multibinding_type = List\n\n def add_binding(self, to=None, to_provider=None, to_instance=None):\n provider = self._dsl_to_provider(to, to_provider, to_instance)\n self._provider.add_provider(provider)\n\n def _create_provider(self):\n class DynamicMultiBindingProvider(object):\n providers = []\n\n @inject(injector=Injector)\n def __init__(self, injector):\n self._injector = injector\n\n @classmethod\n def add_provider(cls, provider):\n cls.providers.append(provider)\n\n def get(self):\n return [self._injector.get_instance(p).get()\n for p in self.providers]\n\n return DynamicMultiBindingProvider\n\n\nclass Dict(Annotation):\n \"\"\"Used for binding dictionaries.\"\"\"\n\n\nclass DictBinder(_MultiBinder):\n\n multibinding_type = Dict\n\n def add_binding(self, key, to=None, to_provider=None, to_instance=None):\n provider = self._dsl_to_provider(to, to_provider, to_instance)\n self._provider.add_provider(key, provider)\n\n def _create_provider(self):\n binder_self = self\n\n class DynamicMultiBindingProvider(object):\n providers = {}\n\n @inject(injector=Injector)\n def __init__(self, injector):\n self._injector = injector\n\n @classmethod\n def add_provider(cls, key, provider):\n if key in cls.providers:\n msg = ('duplicate binding for %r in Dict(%s) found'\n % (key, binder_self.interface.__class__.__name__))\n raise MultiBindingError(msg)\n cls.providers[key] = provider\n\n def get(self):\n return dict([(k, self._injector.get_instance(p).get())\n for k, p in self.providers.items()])\n\n return DynamicMultiBindingProvider\n","repo_name":"dstanek/snake-guice.orig","sub_path":"snakeguice/multibinder.py","file_name":"multibinder.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"20636476039","text":"import numpy as np\nimport sys\nfrom keras import backend as K\nfrom custom_image import (ImageDataGenerator, \n standardize, \n random_transform, \n random_crop, \n center_crop, \n pick_channels, \n get_max_class,\n get_soft_class)\nimport tensorflow as tf\nimport pickle\nimport os\nimport tables\n\ndef _pickle_fit_vars(path, fit_vars):\n data_file = open(path, 'wb')\n pickle.dump(fit_vars, data_file)\n data_file.close()\n\ndef _preload_data(data_dir, read_format='tbl'):\n \"\"\"Utility function which preloads the data from the directory\n \"\"\"\n X = []\n #if data_dir == 'masks/':\n # running_sum = np.zeros(10)\n #else:\n # running_sum = np.zeros(20)\n for im in os.listdir(data_dir):\n print('Preloading image {}'.format(im))\n if read_format == 'npy':\n x = np.load(os.path.join(data_dir, im))\n elif read_format == 'tbl':\n with tables.open_file(os.path.join(data_dir, im), 'r') as h5_file:\n x = h5_file.root.carray.read()\n #running_sum += np.sum(x, axis=(0,1))\n X.append(x)\n\n \n #print((3349*3389*25-np.sum(running_sum))/running_sum)\n\n return np.asarray(X)\n\ndef setup_generator(data_dir,\n batch_size=1,\n augment=False,\n shape_gen=(572,572),\n shape_gen_out=None,\n mask_channels=None,\n seed=0,\n verbose=1,\n norm_gen=True,\n classify=None,\n preload=False,\n read_format='tbl'):\n \"\"\"Utility function to help set up generators\n \"\"\"\n\n if augment:\n datagen = ImageDataGenerator(\n featurewise_center=norm_gen,\n featurewise_std_normalization=norm_gen,\n featurewise_standardize_axis=(0, 1, 2),\n rotation_range=90,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='reflect',\n seed=seed,\n verbose=verbose)\n else:\n datagen = ImageDataGenerator(\n featurewise_center=norm_gen,\n featurewise_std_normalization=norm_gen,\n featurewise_standardize_axis=(0, 1, 2),\n fill_mode='reflect',\n seed=seed,\n verbose=verbose)\n\n datagen.config['random_crop_size'] = shape_gen\n datagen.config['sync_seed'] = seed\n datagen.config['seed'] = seed\n if shape_gen_out:\n datagen.config['center_crop_size'] = shape_gen_out\n else:\n datagen.config['center_crop_size'] = shape_gen \n\n # sets which channels to use for fitting\n # note that is -1 is included, a 'no pixel' channel is appended\n datagen.config['channel_idxs'] = mask_channels\n\n if augment and norm_gen:\n datagen.set_pipeline([random_crop, random_transform, standardize, center_crop, pick_channels])\n elif augment and not norm_gen:\n datagen.set_pipeline([random_crop, random_transform, center_crop, pick_channels])\n elif not augment and norm_gen:\n datagen.set_pipeline([random_crop, standardize, center_crop, pick_channels])\n else:\n datagen.set_pipeline([random_crop, center_crop, pick_channels])\n\n if classify == 'hard':\n datagen.set_pipeline([get_max_class])\n elif classify == 'soft':\n datagen.set_pipeline([get_soft_class])\n \n # define how the data is flowing from the directory\n # If we are preloading, we load up the data and then\n # use flow to iterate the numpy array\n if preload:\n # load up X data, assuming in tbl format\n X = _preload_data(data_dir, read_format)\n # flow does not need y (the labeled data) to be \n # passed, we can zip it up later\n datagen_flow = datagen.flow(X,\n batch_size=batch_size,\n seed=seed)\n else:\n datagen_flow = datagen.flow_from_directory(data_dir,\n class_mode=None,\n read_formats={read_format},\n image_reader=read_format,\n batch_size=batch_size,\n seed=seed)\n X = None\n\n return (datagen, datagen_flow, X)\n\n\ndef get_classifier_generators(batch_size=4,\n augment=False,\n nb_iter=200,\n shape_in=(572, 572),\n seed=0,\n verbose=1,\n norm_path='gen_norm.p',\n preload=False,\n read_format='tbl'):\n \"\"\" Creates starting classifier net to pre-train\n auto-encoder \"\"\"\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train/',\n shape_gen = shape_in,\n augment = augment,\n batch_size = batch_size,\n preload=preload,\n read_format=read_format)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'masks/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = augment,\n classify = 'soft',\n batch_size=batch_size,\n preload=preload,\n read_format=read_format)\n\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n dg_mean = datagen_X.config['mean']\n dg_std = datagen_X.config['std']\n print('Generator fitted, mean: {mean}, std: {std}'.format(mean=dg_mean, \n std=dg_std))\n\n _pickle_fit_vars(norm_path, (dg_mean, dg_std))\n\n classify_generator = dgdx + dgdy\n return classify_generator\n\n\ndef get_ae_generators(batch_size=4,\n augment=False,\n nb_iter=200,\n shape_in=(572,572),\n shape_out=(388,388),\n seed=0,\n verbose=1,\n norm_path='gen_norm.p',\n read_format='tbl',\n preload=False):\n \"\"\" Creates generators for autoencoder net\n \"\"\"\n\n # The autoencoder pulls images from both test and train\n # sets, avoiding over-fitting and giving better results\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train',\n shape_gen = shape_in,\n augment = augment,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'images/train',\n shape_gen = shape_out,\n augment = augment,\n mask_channels = (0, 1, 2),\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n\n # fit generator for normalization\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n # transfer over learned norm parameters\n x_mean = datagen_X.config['mean']\n x_std = datagen_X.config['std']\n datagen_Y.config['mean'] = x_mean \n datagen_Y.config['std'] = x_std\n\n # Save variables\n _pickle_fit_vars(norm_path, (x_mean, x_std))\n\n # pack up and return generator\n autoencode_generator = dgdx + dgdy\n\n return autoencode_generator\n\n\ndef get_generators(batch_size=8,\n augment=False,\n nb_iter=200,\n shape_in=(572,572, 20),\n shape_out=(388,388, 11),\n seed=0,\n verbose=1,\n channel_idxs=None,\n norm_path=None,\n read_format='tbl',\n preload=False):\n\n assert channel_idxs is None or len(channel_idxs) == shape_out[2]\n shape_in = (shape_in[0], shape_in[1])\n shape_out = (shape_out[0], shape_out[1])\n\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train/',\n shape_gen = shape_in,\n augment = augment,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'masks/train/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = augment,\n norm_gen = False,\n mask_channels=channel_idxs,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_validX, dvdx, vX) = setup_generator(data_dir = 'images/valid/',\n shape_gen = shape_in,\n augment = False,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_validY, dvdy, vy) = setup_generator(data_dir = 'masks/valid/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = False,\n norm_gen = False,\n mask_channels=channel_idxs,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n\n\n # enforce syncing\n datagen_X.config['sync_seed'] = seed\n datagen_Y.config['sync_seed'] = seed\n\n datagen_validX.config['sync_seed'] = seed\n datagen_validY.config['sync_seed'] = seed\n # use previously fitted values from autoencoder run, otherwise\n # refit generator\n if norm_path in os.listdir():\n data_file = open(norm_path, 'rb')\n (x_mean, x_std) = pickle.load(data_file)\n data_file.close()\n\n datagen_X.config['mean'] = x_mean\n datagen_X.config['std'] = x_std\n else:\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n\n # make sure the validation data is using the same mean/std\n datagen_validX.config['mean'] = datagen_X.config['mean']\n datagen_validX.config['std'] = datagen_X.config['std']\n # synchronize the two generators (+ operation creates tuple)\n train_generator = dgdx + dgdy\n valid_generator = dvdx + dvdy\n\n # return the zipped up generators\n return (train_generator, valid_generator)\n\n","repo_name":"platawiec/sat-segment","sub_path":"generator_utils.py","file_name":"generator_utils.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"2002108982","text":"from datetime import datetime\nfrom typing import List, Tuple, Optional\nfrom discord import PartialEmoji\nfrom .._connection import _PostgresConnection\n\n\nclass EmojisUsedMixin(_PostgresConnection):\n async def add_used_emotes(self, to_cache: List[Tuple[int, PartialEmoji, datetime]]):\n params = {\n \"times\": [],\n \"guild_ids\": [],\n \"names\": [],\n \"ids\": [],\n \"animateds\": [],\n }\n\n for guild_id, emote, time in to_cache:\n params[\"guild_ids\"].append(guild_id)\n params[\"times\"].append(time)\n params[\"names\"].append(emote.name)\n params[\"ids\"].append(emote.id)\n params[\"animateds\"].append(emote.animated)\n\n await self.cur.execute(\n f\"INSERT INTO emotes_used (time, guild_id, name, emote_id, animated) VALUES (unnest(%(times)s), unnest(%(guild_ids)s), unnest(%(names)s), unnest(%(ids)s), unnest(%(animateds)s))\",\n parameters=params\n )\n\n async def get_recently_used_emote(self, guild_id: int, name: str) -> Optional[PartialEmoji]:\n await self.cur.execute(\n \"SELECT first(animated, time), first(name, time), emote_id FROM emotes_used WHERE guild_id=%(guild_id)s and lower(\\\"name\\\")=%(name)s group by emote_id\",\n parameters={\"guild_id\": guild_id, \"name\": name.lower()}\n )\n emotes = await _get_emotes(self.cur)\n if emotes:\n return next((emote for emote in emotes if emote.name == name), emotes[0])\n\n async def get_recently_used_emotes(self, guild_id: int, prefix: str, limit: int = 25) -> List[PartialEmoji]:\n if not prefix:\n await self.cur.execute(\n \"select first(animated, time), first(name, time), emote_id from emotes_used where guild_id=%(guild_id)s group by emote_id limit %(limit)s\",\n parameters={\"guild_id\": guild_id, \"limit\": limit}\n )\n else:\n # This one doesn't use an index for the whole thing. Should be OK though\n await self.cur.execute(\n \"select first(animated, time), first(name, time), emote_id from emotes_used where guild_id=%(guild_id)s and starts_with(lower(\\\"name\\\"), %(prefix)s) group by emote_id limit %(limit)s\",\n parameters={\"guild_id\": guild_id, \"prefix\": prefix, \"limit\": limit}\n )\n return await _get_emotes(self.cur)\n\n\nasync def _get_emotes(cur) -> List[PartialEmoji]:\n results = await cur.fetchall()\n return [\n PartialEmoji(animated=animated, name=name.rstrip(\" \"), id=id)\n for animated, name, id in results\n ]\n","repo_name":"NQN-Discord/sql_helper","sub_path":"sql_helper/mixins/emojis_used.py","file_name":"emojis_used.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"73350677906","text":"#!/usr/bin/env python\n\"\"\"Resort a BAM file karyotypically to match GATK's preferred file order.\n\nBroad's GATK and associated resources prefer BAM files sorted as:\n\n chr1, chr2... chr10, chr11... chrX\n\ninstead of the simple alphabetic sort:\n\n chr1, chr10, chr2 ...\n\nThis takes a sorted BAM files with an alternative ordering of chromosomes\nand re-sorts it the karyotypic way.\n\nUsage:\n resort_bam_karyotype.py []\n\n is a *.dict file produced by Picard that identifies the order\nof chromsomes to sort by:\n\njava -jar CreateSequenceDictionary.jar REFERENCE=your.fasta OUTPUT=your.dict\n\nRequires:\n pysam -- http://code.google.com/p/pysam/\n\"\"\"\nimport os\nimport sys\n\nimport pysam\n\ndef main(ref_file, *in_bams):\n ref = pysam.Samfile(ref_file, \"r\")\n sorter = SortByHeader(ref.header)\n for bam in in_bams:\n sort_bam(bam, sorter.header_cmp, sorter.to_include)\n\ndef sort_bam(in_bam, sort_fn, to_include=None):\n out_file = \"%s-ksort%s\" % os.path.splitext(in_bam)\n index_file = \"%s.bai\" % in_bam\n if not os.path.exists(index_file):\n pysam.index(in_bam)\n\n orig = pysam.Samfile(in_bam, \"rb\")\n chroms = [(c[\"SN\"], c) for c in orig.header[\"SQ\"]]\n new_chroms = chroms[:]\n if to_include:\n new_chroms = [(c, x) for (c, x) in new_chroms if c in to_include]\n new_chroms.sort(sort_fn)\n remapper = _id_remapper(chroms, new_chroms)\n new_header = orig.header\n new_header[\"SQ\"] = [h for (_, h) in new_chroms]\n\n new = pysam.Samfile(out_file, \"wb\", header=new_header)\n for (chrom, _) in new_chroms:\n for read in orig.fetch(chrom):\n write = True\n read.rname = remapper[read.rname]\n try:\n read.mrnm = remapper[read.mrnm]\n # read pair is on a chromosome we are not using\n except KeyError:\n assert to_include is not None\n write = False\n if write:\n new.write(read)\n\ndef _id_remapper(orig, new):\n \"\"\"Provide a dictionary remapping original read indexes to new indexes.\n\n When re-ordering the header, the individual read identifiers need to be\n updated as well.\n \"\"\"\n new_chrom_to_index = {}\n for i_n, (chr_n, _) in enumerate(new):\n new_chrom_to_index[chr_n] = i_n\n remap_indexes = {}\n for i_o, (chr_o, _) in enumerate(orig):\n if chr_o in new_chrom_to_index.keys():\n remap_indexes[i_o] = new_chrom_to_index[chr_o]\n remap_indexes[None] = None\n return remap_indexes\n\nclass SortByHeader:\n \"\"\"Provide chromosome sorting to match an existing header.\n \"\"\"\n def __init__(self, base_header):\n self._chrom_indexes = {}\n self.to_include = []\n for i, item in enumerate(base_header[\"SQ\"]):\n self._chrom_indexes[item[\"SN\"]] = i\n self.to_include.append(item[\"SN\"])\n\n def header_cmp(self, one, two):\n return cmp(self._chrom_indexes[one[0]],\n self._chrom_indexes[two[0]])\n\ndef sort_by_karyotype(one, two):\n \"\"\"Sort function to order reads by karyotype.\n \"\"\"\n return cmp(_split_to_karyotype(one[0]),\n _split_to_karyotype(two[0]))\n\ndef _split_to_karyotype(name):\n parts = name.replace(\"chr\", \"\").split(\"_\")\n try:\n parts[0] = int(parts[0])\n except ValueError:\n pass\n # anything with an extension (_random) goes at the end\n if len(parts) > 1:\n parts.insert(0, \"z\")\n return parts\n\nif __name__ == \"__main__\":\n main(*sys.argv[1:])\n\n","repo_name":"chapmanb/bcbb","sub_path":"nextgen/scripts/utils/resort_bam_karyotype.py","file_name":"resort_bam_karyotype.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":580,"dataset":"github-code","pt":"48"}
+{"seq_id":"31564248730","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numpy import linalg as la\nimport struct\nfrom array import array\nimport matplotlib.pyplot as plt\nfrom os.path import join\n#from scipy.linalg import eigh\n#from sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\n#from sklearn import decomposition\nfrom math import sqrt\nnp.random.seed(2)\n\n#Constants for PCA\nD = 5\nM = 3\nK = 10\ndimRed = [1, 2, 8, 16, 32, 64, 128, 256, 612, 783]\n\ndef runTest():\n #testImageFile = 't10k-images.idx3-ubyte'\n #testLabelFile = 't10k-labels.idx1-ubyte'\n trainImageFile = 'train-images.idx3-ubyte'\n trainLabelFile = 'train-labels.idx1-ubyte'\n trainImages, trainLabels = readImagesAndLabels(trainLabelFile, trainImageFile)\n #testImages, testLabels = readImagesAndLabels(testLabelFile, testImageFile)\n trainImagesSorted = parseImagesIntoArrays(trainImages, trainLabels) \n del trainImageFile, trainLabelFile, trainImages, trainLabels\n #for digit in range(len(trainImagesSorted)):\n # mat = np.array(trainImagesSorted[digit])\n # mat = mat.reshape(len(trainImagesSorted[digit]) ,784)\n # pc, pcmean = pcaViaSVD(mat, K)\n # principal_C.append(pc)\n # pcMeans.append(pcmean)\n #digitNum = 0\n #for digit in pcMeans:\n # #workableArr = np.reshape(digit, (28,28))\n # plt.figure()\n # plt.title(\"Mean of each PC, {}\".format(digitNum))\n # plt.xlabel(\"PC Number\")\n # plt.ylabel(\"Value\")\n # plt.plot(digit)\n # plt.show()\n # digitNum += 1\n nrmsesTotal = []\n for k in range(len(dimRed)):\n #principal_C = []\n #pcMeans = []\n #r2s = []\n nrmses = []\n for digit in range(len(trainImagesSorted)):\n mat = np.array(trainImagesSorted[digit])\n mat = mat.reshape(len(trainImagesSorted[digit]),784)\n pc, pcmean, recon = pcaViaSVD(mat, dimRed[k])\n #principal_C.append(pc)\n #pcMeans.append(pcmean)\n #r2 = 0\n #r2 += r2_score(mat, recon)\n rmse = sqrt(mean_squared_error(mat, recon))\n nrmse = rmse/sqrt(np.mean(mat**2))\n #r2s.append(r2)\n nrmses.append(nrmse)\n nrmsesTotal.append(nrmses)\n for i in range(10):\n nrmseDigit = []\n for n in range(len(nrmsesTotal)):\n nrmseDigit.append(nrmsesTotal[n][i]) \n plt.figure()\n plt.title(\"Normalized Root Mean Square Error for Digit {}\".format(i))\n default_x_ticks = range(len(dimRed))\n plt.xticks(default_x_ticks, dimRed)\n plt.xlabel(\"PC number\")\n plt.ylabel(\"R2\")\n plt.plot(default_x_ticks, nrmseDigit)\n plt.show()\n \n \ndef readImagesAndLabels(labelFile, imageFile):\n path = '..\\\\'\n labelFile = join(path, labelFile)\n imageFile = join(path, imageFile)\n labels = []\n with open(labelFile,'rb') as l:\n magic, size = struct.unpack(\">II\", l.read(8))\n if magic != 2049:\n raise ValueError('Magic number error, expected 2049, got {}'.format(magic))\n labels = array(\"B\", l.read()) \n \n with open(imageFile,'rb') as f:\n magic, size = struct.unpack(\">II\", f.read(8))\n nrows, ncols = struct.unpack(\">II\", f.read(8))\n if magic != 2051:\n raise ValueError('Magic number error, expected 2051, got {}'.format(magic))\n image_data = array(\"B\", f.read())\n images = []\n for i in range(size):\n images.append([0]*nrows*ncols)\n for i in range(size):\n img = np.array(image_data[i*nrows*ncols:(i+1)*nrows*ncols])\n img = img.reshape(1, 784)\n images[i][:] = img\n return images, labels\n \ndef parseImagesIntoArrays(images, labels):\n arrOfImagesSortedByLabel = {}\n for i in range(len(images)):\n if labels[i] in arrOfImagesSortedByLabel:\n arrOfImagesSortedByLabel[labels[i]].append(np.matrix(images[i]))\n else:\n arrOfImagesSortedByLabel[labels[i]] = [np.matrix(images[i])]\n return arrOfImagesSortedByLabel\n \ndef pcaViaSVD(matrix, k):\n #C = np.matmul(matrix.T, matrix)\n #print(\"C = \\n\", C)\n #l, principalA = eigh(C, eigvals=((784-K, 783)))\n #idx = l.argsort()[::-1]\n #l, principalA = l[idx], principalA[:, idx]\n #principalA = principalA.T\n #newCoords = np.matmul(principalA, matrix.T)\n #print(\"l = \\n\", l)\n #print(\"V = \\n\", principalA)\n #principalC = matrix.dot(principalA)\n #print(\"Y = \\n\", principalC)\n U, s, Vt = la.svd(matrix, full_matrices=False)\n #V = Vt.T\n S = np.diag(s)\n #PC_k = principalC[:, 0:K]\n US_k = U[:, 0:k].dot(S[0:k, 0:k])\n recon = U[:, :k] @ np.diag(s[:k]) @ Vt[:k, :]\n means = []\n for pc in US_k.T:\n minVal = np.min(pc)\n for e in range(len(pc)):\n val = pc[e]\n newVal = (val-minVal)\n pc[e] = newVal\n maxVal = np.max(pc)\n for e in range(len(pc)):\n val = pc[e]\n newVal = ((val)/maxVal)\n pc[e] = newVal\n #means.append(np.mean(pc))\n\n return US_k, means, recon\n \nif __name__ == \"__main__\":\n runTest()\n ","repo_name":"WolfWindOW/MLDL","sub_path":"MNIST/MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4296649396","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport os\n\nimport argparse\nimport tensorflow as tf\n\n\nclass BaseRun(object):\n def __init__(self):\n self.ARGS = None\n self.parser_init()\n self.tf_init()\n\n def parser_init(self):\n parser = argparse.ArgumentParser(description='Train global model')\n parser.add_argument('train_or_test', nargs='?', help='choose train or test model', choices=['train', 'test'],\n default='train')\n parser.add_argument('--gpu', help=\"gpu device\", default='4')\n parser.add_argument('--model_id', help=\"model id\", default='0')\n parser.add_argument('--model_ids', help=\"model ids\", default='0,1,2,3')\n parser.add_argument('--data_set', help=\"data_set\", default='en_es')\n self.ARGS = parser.parse_args()\n\n def tf_init(self):\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self.ARGS.gpu)\n tf.logging.set_verbosity(tf.logging.ERROR)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n return sess\n\n def trainer_init(self):\n NotImplementedError\n\n def model_init(self):\n NotImplementedError\n\n def run(self):\n sess = self.tf_init()\n trainer = self.trainer_init()\n model = self.model_init()\n if self.ARGS.train_or_test == 'train':\n sess.run(tf.global_variables_initializer())\n trainer.train(model, sess)\n else:\n model_ids = [int(item) for item in self.ARGS.model_ids.split(',')]\n trainer.restore_and_test_model(model, sess, model_ids=model_ids)\n","repo_name":"nghuyong/MTL-SLAM","sub_path":"models/base/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"}
+{"seq_id":"42036682276","text":"import os\nimport statistics\nimport sys\nimport time\nimport random\nfrom datetime import datetime\n\nfrom django import forms\nfrom django.http import HttpResponse, request\nfrom django.shortcuts import render, HttpResponseRedirect\nfrom django.urls import get_resolver, reverse\nfrom django.db.models import Avg\nfrom django.utils.dateparse import parse_datetime\nfrom jinja2 import Environment, FileSystemLoader\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar, Line, Page, Tab, HeatMap, Grid\nfrom pyecharts.globals import CurrentConfig\nfrom pyecharts.faker import Faker\n\nfrom .common.util.file import (parse_file_size_int_to_str,\n parse_file_size_str_to_int)\nfrom .models import DataFile, DataFileRecord, StatFileRecord\n\nCurrentConfig.GLOBAL_ENV = Environment(\n loader=FileSystemLoader(\"stats/templates/stats/\"))\nCurrentConfig.ONLINE_HOST = '/static/stats/'\n\n\ndef index(request):\n context = {'url_list': set(v[1].replace(\n 'stats/', '') for k, v in get_resolver(None).reverse_dict.items() if '$' not in v[1])}\n return render(request, 'stats/index.html', context)\n\n\ndef get_changed_data_files(datatime):\n records = DataFileRecord.objects.filter(date_time__date=datatime.date())\n\n ret = []\n for r in records:\n pre_day_rec = DataFileRecord.objects.filter(\n data_file=r.data_file, date_time__lt=r.date_time.date()).order_by('date_time').first()\n if not pre_day_rec:\n ret.append((r.data_file.full_name, r.size))\n else:\n ret.append((r.data_file.full_name, r.size - pre_day_rec.size))\n\n return ret\n\n\ndef data_file_changes(request):\n date = request.GET.get('date', '')\n if date == '':\n date = datetime.today()\n else:\n try:\n date = datetime.strptime(date + ' 23:59:59', \"%Y-%m-%d %H:%M:%S\")\n except ValueError as e:\n pass\n if isinstance(date, datetime):\n datas = get_changed_data_files(date)\n datas = sorted(datas, key=lambda d: d[1], reverse=True)\n datas = [\n {'file_name': k, 'size': parse_file_size_int_to_str(v)} for k, v in datas]\n context = {'d': datas}\n return render(request, 'stats/table.html', context)\n else:\n return HttpResponse('Wrong date parameter')\n\n\ndef data_file_list(request):\n file_name_count = request.GET.get('count', '')\n if file_name_count is not None and file_name_count.isnumeric():\n file_name_count = int(file_name_count)\n else:\n file_name_count = 10\n\n data_files = DataFile.objects.all()\n datas = []\n for obj in data_files:\n datas.append({'file_name': obj.full_name, 'size': obj.current_size()})\n datas = sorted(datas, key=lambda d: d['size'], reverse=True)\n datas = [{'file_name': obj['file_name'], 'size': parse_file_size_int_to_str(\n obj['size'])} for obj in datas][:file_name_count]\n context = {'d': datas}\n return render(request, 'stats/table.html', context)\n\n\ndef data_file_info(request):\n file_names = request.GET.getlist('file_name')\n if not file_names:\n return HttpResponse(\"file_name parameter missing\")\n\n tab = Tab()\n\n for file_name in file_names:\n data_file = DataFile.objects.filter(file_name=file_name).first()\n if not data_file:\n return HttpResponse(f\"Can not find {file_name}\")\n\n data_file_records = DataFileRecord.objects.filter(\n data_file=data_file).order_by('date_time')\n if len(data_file_records) == 0:\n return HttpResponse(f\"No data file record for {file_name}\")\n\n date_list = []\n size_list = []\n for r in data_file_records:\n size_list.append(max(round(r.size / 1024 / 1024, 2), 0.01))\n date_list.append(r.date_time.date())\n\n bar = (\n Bar()\n .add_xaxis(date_list)\n .add_yaxis(file_name, size_list)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Size(MB)\"))\n )\n tab.add(bar, file_name)\n return HttpResponse(tab.render_embed())\n\n\ndef handle_uploaded_data_file(f):\n lines = []\n for chunk in f.chunks():\n for line in chunk.splitlines():\n if line != '':\n lines.append(line)\n\n version = lines[0].decode('utf-8').split('_')[0]\n date_time_str = lines[1].decode('utf-8').replace('/', '-')\n\n for line in lines[2:]:\n full_name, size_str = line.decode('utf-8').split(',', 2)\n full_name = full_name.replace('\\\\', '/').replace('//', '/')\n full_name = full_name.split('/ns/data/')[1]\n\n data_file_created = False\n new_record_datetime = parse_datetime(date_time_str)\n new_record_size = parse_file_size_str_to_int(size_str)\n data_file = DataFile.objects.filter(full_name=full_name).first()\n if not data_file:\n data_file = DataFile(full_name=full_name,\n file_name=os.path.basename(full_name))\n data_file.save()\n data_file_created = True\n\n create_data_file_record = False\n if data_file_created:\n create_data_file_record = True\n else:\n # check and update same day record\n same_day_data_file_record = DataFileRecord.objects.filter(data_file=data_file,\n date_time__date=new_record_datetime.date()).first()\n if same_day_data_file_record:\n if new_record_size != same_day_data_file_record.size and new_record_datetime > same_day_data_file_record.date_time:\n same_day_data_file_record.size = new_record_size\n same_day_data_file_record.date_time = new_record_datetime\n same_day_data_file_record.save()\n else:\n # check the closet earlier/later day\n pre_day_rec = DataFileRecord.objects.filter(data_file=data_file,\n date_time__lt=new_record_datetime.date()).order_by('-date_time').first()\n next_day_rec = DataFileRecord.objects.filter(data_file=data_file,\n date_time__gt=new_record_datetime.date()).order_by(\n 'date_time').first()\n if not pre_day_rec and not next_day_rec:\n create_data_file_record = True\n elif pre_day_rec and pre_day_rec.size != new_record_size and (not next_day_rec or next_day_rec.size != new_record_size):\n create_data_file_record = True\n elif next_day_rec and next_day_rec.size != new_record_size and (not pre_day_rec or pre_day_rec.size != new_record_size):\n create_data_file_record = True\n # same size record in a row, keep the first one\n if next_day_rec and next_day_rec.size == new_record_size:\n next_day_rec.date_time = new_record_datetime\n next_day_rec.save()\n\n if create_data_file_record:\n data_file_record = DataFileRecord(size=new_record_size,\n date_time=new_record_datetime,\n data_file=data_file)\n data_file_record.save()\n\n\nclass UploadDataFileForm(forms.Form):\n file = forms.FileField()\n\n\ndef add_data_file_record(request):\n if request.method == 'POST':\n form = UploadDataFileForm(request.POST, request.FILES)\n if form.is_valid():\n start = time.time()\n handle_uploaded_data_file(request.FILES['file'])\n return HttpResponse(f'success, time elapsed:{time.time() - start}')\n else:\n form = UploadDataFileForm()\n return render(request, 'stats/upload.html', {'form': form, 'title': 'Add data file record'})\n\n\ndef phase_record(request):\n phase_name = request.GET.get('phase_name', '')\n sub_phase_name = request.GET.get('sub_phase_name', '')\n date_time_str = request.GET.get('date_time_str', '')\n\n if not phase_name or not sub_phase_name:\n return HttpResponse('Invalid paramaters')\n\n statFileRecord = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name, date_time__date=parse_datetime(date_time_str).date()).first()\n if statFileRecord:\n return HttpResponse(\"no recored\")\n\n lines = []\n\n lines = statFileRecord.file.open(mode=\"r\").read().splitlines()\n statFileRecord.file.close()\n\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n # Frame_Time(ms),CPU_Time(ms),GPU_Time(ms),Draw_Call(100),ESP2D(ms),SORT3D(ms),EVENTS\n for line in lines[2:]:\n attrs = line.split(',')\n frames.append(round(1000 / float(attrs[0]), 2))\n cpu_times.append(round(float(attrs[1]), 2))\n gpu_times.append(round(float(attrs[2]), 2))\n drawcall_cnts.append(round(float(attrs[3]), 2))\n\n page = Page()\n opt_avg = opts.MarkPointOpts(data=[opts.MarkPointItem(type_=\"average\")])\n\n perf_line = (\n Line(init_opts=opts.InitOpts(width=\"1800px\", height=\"900px\"))\n .add_xaxis(list(range(1, len(frames))))\n .add_yaxis(\"frames\", frames, markpoint_opts=opt_avg)\n .add_yaxis(\"cpu_times\", cpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"gpu_times\", gpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"drawcall_cnts\", drawcall_cnts, markpoint_opts=opt_avg)\n .set_global_opts(\n title_opts=opts.TitleOpts(title=f'{phase_name}:{sub_phase_name}'),\n yaxis_opts=opts.AxisOpts(max_=100),\n datazoom_opts=[opts.DataZoomOpts(\n range_start=1, range_end=sys.maxsize)]\n )\n )\n page.add(perf_line)\n\n return HttpResponse(page.render_embed())\n\n\ndef phase_stat(request):\n phase_name = request.GET.get('phase_name', '')\n sub_phase_name = request.GET.get('sub_phase_name', '')\n\n if not phase_name or not sub_phase_name:\n return HttpResponse('Invalid paramaters')\n\n dates = []\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n statFileRecords = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name).order_by('date_time')\n for statFileRecord in statFileRecords:\n dates.append(statFileRecord.date_time.date())\n frames.append(round(statFileRecord.avg_fps, 2))\n cpu_times.append(round(statFileRecord.avg_cpu, 2))\n gpu_times.append(round(statFileRecord.avg_gpu, 2))\n drawcall_cnts.append(round(statFileRecord.avg_drawcall, 2))\n\n if len(dates) > 0:\n page = Page()\n opt_avg = opts.MarkPointOpts(\n data=[opts.MarkPointItem(type_=\"average\")])\n\n perf_line = (\n Line(init_opts=opts.InitOpts(width=\"1800px\", height=\"900px\"))\n .add_xaxis(dates)\n .add_yaxis(\"frames\", frames, markpoint_opts=opt_avg)\n .add_yaxis(\"cpu_times\", cpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"gpu_times\", gpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"drawcall_cnts\", drawcall_cnts, markpoint_opts=opt_avg)\n .set_global_opts(\n title_opts=opts.TitleOpts(\n title=f'{phase_name}:{sub_phase_name}'),\n yaxis_opts=opts.AxisOpts(max_=100),\n datazoom_opts=[opts.DataZoomOpts(\n range_start=1, range_end=sys.maxsize)]\n )\n )\n page.add(perf_line)\n\n return HttpResponse(page.render_embed())\n else:\n return HttpResponse(\"no recored\")\n\n\ndef create_uploaded_stats_file(file, version, phase_name, sub_phase_name, date_time_str):\n statFileRecord = StatFileRecord.objects.create(\n phase_name=phase_name, sub_phase_name=sub_phase_name, version=version, file=file, date_time=parse_datetime(date_time_str))\n lines = statFileRecord.file.open(mode=\"r\").read().splitlines()\n statFileRecord.file.close()\n\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n # Frame_Time(ms),CPU_Time(ms),GPU_Time(ms),Draw_Call(100),ESP2D(ms),SORT3D(ms),EVENTS\n for line in lines[2:]:\n attrs = line.split(',')\n frames.append(float(attrs[0]))\n cpu_times.append(float(attrs[1]))\n gpu_times.append(float(attrs[2]))\n drawcall_cnts.append(float(attrs[3]))\n\n statFileRecord.avg_fps = 1000 / statistics.mean(frames)\n statFileRecord.avg_cpu = statistics.mean(cpu_times)\n statFileRecord.avg_gpu = statistics.mean(gpu_times)\n statFileRecord.avg_drawcall = statistics.mean(drawcall_cnts)\n statFileRecord.save()\n\n print(f'Added stat: P{phase_name} {sub_phase_name} {date_time_str}')\n\n\nclass UploadStatsFileForm(forms.Form):\n files = forms.FileField(\n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n\ndef add_stats_file_record(request):\n if request.method == 'POST':\n form = UploadStatsFileForm(request.POST, request.FILES)\n if form.is_valid():\n start = time.time()\n\n files = request.FILES.getlist('files')\n\n for file in files:\n for chunk in file.chunks():\n lines = chunk.splitlines()[:2]\n version = lines[0].decode('utf-8').split('_')[0]\n date_time_str = lines[1].decode('utf-8').replace('/', '-')\n break\n phase_name, sub_phase_name = file.name.replace(\n '.csv', '').split('-')\n\n create_uploaded_stats_file(\n file, version, phase_name, sub_phase_name, date_time_str)\n return HttpResponse(f'Upload finished,time elapsed:{round(time.time() - start, 2)}')\n else:\n form = UploadStatsFileForm()\n return render(request, 'stats/upload.html', {'form': form, 'title': 'Add fps record'})\n\ndef get_phase_avg_fps(date, phase_name, sub_phase_name):\n avg_fps = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name, date_time__date=date).values('avg_fps').aggregate(Avg('avg_fps'))\n if len(avg_fps.values()) > 0:\n return list(avg_fps.values())[0]\n else:\n return 0\n\n\ndef phase_fps_heatmap(request):\n phases = StatFileRecord.objects.values_list(\n 'phase_name', 'sub_phase_name').distinct()\n date_times = StatFileRecord.objects.values_list(\n 'date_time', flat=True).distinct()\n dates = sorted(list({item.date() for item in date_times}))\n if len(dates) == 0:\n return HttpResponse('No records for fps')\n # return HttpResponse(f'phase num:{len(phases)} date_time num:{len(dates)}\\n' + '\\n'.join( [f'{t[0]} {t[1]}' for t in list(phases)]), content_type=\"text/plain\")\n # print(f'phase num:{len(phases)} date_time num:{len(dates)}')\n # print('P100', '140_30_A_RobotM_CR_GuruBattle', get_phase_avg_fps(parse_datetime('2021-11-26 0:0:0'), 'P100', '140_30_A_RobotM_CR_GuruBattle'))\n\n latest_date = dates[-1]\n latest_fps_list = []\n for phase_index, phase_info in enumerate(phases):\n latest_fps_list.append((phase_info[0], phase_info[1], get_phase_avg_fps(latest_date, phase_info[0], phase_info[1])))\n latest_fps_list.sort(key=lambda x: x[2], reverse=True)\n\n fps_datas = []\n for date_index, date in enumerate(dates):\n for phase_index, phase_info in enumerate(latest_fps_list):\n fps_datas.append([date_index, phase_index, round(get_phase_avg_fps(date, phase_info[0], phase_info[1]), 2)])\n c = (\n HeatMap()\n .add_xaxis(dates)\n .add_yaxis(\n series_name=\"phases\", \n yaxis_data=[f'{t[0]} {t[1]}' for t in latest_fps_list], \n value=fps_datas,\n label_opts=opts.LabelOpts(\n is_show=True, color=\"#fff\", position=\"inside\"\n ), \n )\n .set_global_opts(\n legend_opts=opts.LegendOpts(is_show=False),\n title_opts=opts.TitleOpts(title=\"FPS HeatMap\"),\n yaxis_opts=opts.AxisOpts(axislabel_opts={\"interval\":\"0\"}),\n visualmap_opts=opts.VisualMapOpts(\n min_=0, max_=30, is_calculable=True, orient=\"horizontal\", pos_left=\"center\",\n range_color=[\"#d94e5d\"]* 28 + [\"#eac763\", \"#50a3ba\"]\n ),\n )\n )\n\n grid = Grid(init_opts=opts.InitOpts(width=\"1200px\", height=\"800px\"))\n grid.add(c, grid_opts=opts.GridOpts(pos_left=260))\n return HttpResponse(grid.render_embed())\n\n","repo_name":"superzscy/django_data_visualization_proj","sub_path":"stats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1387503094","text":"import heapq\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\ndata = list(map(int, input().split()))\nanswer = 0\nheapq.heapify(data)\nfor _ in range(m):\n first = heapq.heappop(data)\n seoncd = heapq.heappop(data)\n heapq.heappush(data, first + seoncd)\n heapq.heappush(data, first + seoncd)\n\nprint(sum(data))","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/15903/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"14532513656","text":"# names=['Aung Aung','Kyaw Kyaw','Su Su','Ma Ma'];\n\n# for loop\n# for name in names :\n# if name=='Su Su' :\n# print(f'{name} is constructor.');\n# break\n# else :\n# print(f'{name} is student.');\n\n\n#while loop\nnum=0\nwhile num<10 :\n if num>5 :\n break;\n if num%2 == 0 :\n print(f'{num} is even number.');\n else :\n print(f'{num} is odd number.');\n num+=1;","repo_name":"NangSengHarn/python-learning","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1570234803","text":"\"\"\" Evaluating the effectiveness of a variety of clustering algorithms \n\"\"\"\n__author__ = \"Rohan Pandit\" \n\nimport sys\nfrom itertools import cycle\nfrom time import time\nimport os\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.constants import k as k_b\nfrom sklearn import cluster\nfrom sklearn.neighbors import BallTree\nfrom sklearn.utils import extmath\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.cluster._dbscan_inner import dbscan_inner\n\nfilename = sys.argv[1]\n\nK_MEANS \t\t= True\nAFFINITY_PROP \t= False\nMEAN_SHIFT \t\t= True\nAGGLOMERATIVE \t= True\nDBSCAN \t\t\t= True\n\n#Example Usage: python cluster.py 5000_SOD1\ndef main():\n\t####################### Loading Files ##########################\n\tpdb_name = filename.split(\"_\")[0]\n\n\talgorithms = ['k_means', 'affinity_prop', 'affinity_prop_eps', \n\t\t\t\t 'mean_shift', 'mean_shift_eps', 'agglomerative',\n\t\t\t\t 'DBSCAN', 'DBSCAN_eps', ]\n\tfor name in algorithms:\n\t\tif not os.path.exists(\"output/%s/%s\"%(filename, name)):\t\n\t\t\tos.makedirs(\"output/%s/%s\"%(filename, name))\n\n\tprojections = np.load(\"output/%s/projections.npy\" % filename)[:, :2]\n\tRMSDs = np.load(\"output/%s/RMSD.npy\" % filename)\n\tepsilons = np.load(\"output/%s/epsilons.npy\" % filename)\n\teigenvals = np.load(\"output/%s/eigenvals.npy\" % filename)\n\n\t########################## K-Means ############################\n\tif K_MEANS:\n\t\tt0 = time()\n\t\tprint(\"Starting K-Means for\", filename)\n\n\t\tk = cluster.KMeans(n_clusters=10, n_jobs=-1).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/k_means/cluster_centers\"%filename, k.cluster_centers_)\n\t\tnp.save(\"output/%s/k_means/labels\"%filename, k.labels_)\n\n\t\tprint(\"num clusters: \", k.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\t\t#cluster_plot(projections, labels)\n\n\t################### Affinity Propagation #######################\n\tif AFFINITY_PROP:\n\t\tt0 = time()\n\t\tprint(\"Starting Affinity Propagation for\", filename)\n\n\t\taf = cluster.AffinityPropagation(verbose=True, affinity='precomputed').fit(RMSDs)\n\n\t\tnp.save(\"output/%s/affinity_prop/cluster_centers\"%filename, af.cluster_centers_)\n\t\tnp.save(\"output/%s/affinity_prop/labels\"%filename, af.labels_)\n\n\t\tprint(\"num clusters: \", af.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t################ Affinity Propagation with Epsilons ###############\n\tif AFFINITY_PROP:\n\t\tt0 = time()\n\t\tprint(\"Starting Affinity Propagation with epsilons for\", filename)\n\n\t\taf = cluster.AffinityPropagation(preference=epsilons, verbose=True, \n\t\t\t\t\t\t\t\t\t\t\taffinity='precomputed').fit(RMSDs)\n\n\t\tnp.save(\"output/%s/affinity_prop_eps/cluster_centers\"%filename, af.cluster_centers_)\n\t\tnp.save(\"output/%s/affinity_prop_eps/labels\"%filename, af.labels_)\n\n\t\tprint(\"num clusters: \", cluster_centers.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t############################ Mean Shift #############################\n\tif MEAN_SHIFT:\n\t\tt0 = time()\n\t\tprint(\"Starting Mean Shift for\", filename)\n\n\t\tms = cluster.MeanShift(bandwidth=np.mean(RMSDs), bin_seeding=False).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/mean_shift/cluster_centers\"%filename, ms.cluster_centers_)\n\t\tnp.save(\"output/%s/mean_shift/labels\"%filename, ms.labels_)\n\n\t\tprint(\"num clusters: \", ms.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t####################### Mean Shift with Epsilons #########################\n\tif MEAN_SHIFT:\n\t\tt0 = time()\n\t\tprint(\"Starting Mean Shift with epsilons for\", filename)\n\n\t\tcluster_centers, labels = variable_bw_mean_shift(RMSDs, bandwidth_array=epsilons)\n\n\t\tnp.save(\"output/%s/mean_shift_eps/cluster_centers\"%filename, cluster_centers)\n\t\tnp.save(\"output/%s/mean_shift_eps/labels\"%filename, labels)\n\n\t\tprint(\"num clusters: \", cluster_centers.shape[0])\n\t\tprint(\"num clusters: \", len(set(labels)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t##### Density-Based Spatial Clustering of Applications with Noise (DBSCAN) ####\n\tif DBSCAN:\n\t\tt0 = time()\n\t\tprint(\"Starting DBSCAN for\", filename)\n\n\t\td = cluster.DBSCAN(eps=np.mean(RMSDs), metric='precomputed',\n\t\t\t\t\t\t\talgorithm='ball_tree', min_samples=3).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/DBSCAN/cluster_centers\"%filename, d.components_)\n\t\tnp.save(\"output/%s/DBSCAN/labels\"%filename, d.labels_)\n\n\t\tprint(\"num clusters: \", len(set(d.labels_)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t############################ DBSCAN with Epsilons ###########################\n\tif DBSCAN:\n\t\tt0 = time()\n\t\tprint(\"Starting DBSCAN with epsilons for\", filename)\n\n\t\tcluster_centers, labels = variable_eps_DBSCAN(RMSDs, epsilons, min_samples=2)\n\n\t\tnp.save(\"output/%s/DBSCAN_eps/cluster_centers\"%filename, cluster_centers)\n\t\tnp.save(\"output/%s/DBSCAN_eps/labels\"%filename, labels)\n\n\t\tprint(d.labels_)\n\t\tprint(np.unique(d.labels_).shape[0])\n\t\tprint(\"num clusters: \", len(set(d.labels_)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n########################## Clustering Algorithms #######################\n\ndef variable_bw_mean_shift(X, bandwidth_array, seeds=None, max_iterations=300):\n\t\"\"\"Variable bandwidth mean shift with gaussian kernel\n\n\tParameters\n\t----------\n\tX : array-like, shape=[n_samples, n_features]\n\t\tInput data.\n\n\tbandwidth : array[float], shape=[n_samples]\n\t\tKernel bandwidth.\n\n\tseeds : array[float, float], shape=(n_seeds, n_features), optional\n\t\tPoint used as initial kernel locations. Default is\n\t\tsetting each point in input data as a seed.\n\n\tmax_iter : int, default 300\n\t\tMaximum number of iterations, per seed point before the clustering\n\t\toperation terminates (for that seed point), if has not converged yet.\n\n\tReturns\n\t-------\n\tcluster_centers : array, shape=[n_clusters, n_features]\n\t\tCoordinates of cluster centers.\n\n\tlabels : array, shape=[n_samples]\n\t\tCluster labels for each point.\n\n\tNotes\n\t-----\n\tCode adapted from scikit-learn library.\n\n\t\"\"\"\n\n\tif not seeds:\n\t\tseeds = X \n\n\tn_points, n_features = X.shape\n\tstop_thresh = 1e-3 * np.mean(bandwidth_array) # when mean has converged\n\tcenter_intensity_dict = {}\n\tcluster_centers = []\n\tball_tree = BallTree(X) # to efficiently look up nearby points\n\n\tdef gaussian_kernel(x, points, bandwidth):\n\t\tdistances = euclidean_distances(points, x)\n\t\tweights = np.exp(-1 * (distances ** 2 / bandwidth ** 2))\n\t\treturn np.sum(points * weights, axis=0) / np.sum(weights)\n\n\t# For each seed, climb gradient until convergence or max_iterations \n\tfor i, weighted_mean in enumerate(seeds):\n\t\tcompleted_iterations = 0\n\t\twhile True:\n\t\t\tpoints_within = X[ball_tree.query_radius([weighted_mean], bandwidth_array[i])[0]]\n\t\t\told_mean = weighted_mean # save the old mean \n\t\t\tweighted_mean = gaussian_kernel(old_mean, points_within, bandwidth_array[i])\n\t\t\tconverged = extmath.norm(weighted_mean - old_mean) < stop_thresh\n\n\t\t\tif converged or completed_iterations == max_iterations:\n\t\t\t\tif completed_iterations == max_iterations:\n\t\t\t\t\tprint(\"reached max iterations\")\n\t\t\t\tcluster_centers.append(weighted_mean)\n\t\t\t\tcenter_intensity_dict[tuple(weighted_mean)] = len(points_within)\n\t\t\t\tbreak\n\t\t\t\t \n\t\t\tcompleted_iterations += 1\n\n\t# POST PROCESSING: remove near duplicate points\n\t# If the distance between two kernels is less than the bandwidth,\n\t# then we have to remove one because it is a duplicate. Remove the\n\t# one with fewer points.\n\tsorted_by_intensity = sorted(center_intensity_dict.items(),\n\t\t\t\t\t\t\t\t key=lambda tup: tup[1], reverse=True)\n\tsorted_centers = np.array([tup[0] for tup in sorted_by_intensity])\n\tunique = np.ones(len(sorted_centers), dtype=np.bool)\n\tball_tree = BallTree(sorted_centers)\n\n\tfor i, center in enumerate(sorted_centers):\n\t\tif unique[i]:\n\t\t\tneighbor_idxs = ball_tree.query_radius([center], np.mean(bandwidth_array))[0]\n\t\t\tunique[neighbor_idxs] = 0\n\t\t\tunique[i] = 1 # leave the current point as unique\n\tcluster_centers = sorted_centers[unique]\n\n\t# ASSIGN LABELS: a point belongs to the cluster that it is closest to\n\tnbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(cluster_centers)\n\tlabels = np.zeros(n_points, dtype=np.int)\n\tdistances, idxs = nbrs.kneighbors(X)\n\tlabels = idxs.flatten()\n\n\treturn cluster_centers, labels\n\ndef variable_eps_DBSCAN(X, eps_array, min_samples=5):\n\t\"\"\" Density-Based Spatial Clustering of Applications with Noise\n\n\tParameters\n\t----------\n\tX : array[float, float], shape=(n_samples,n_features)\n\t\tSimilarity matrix\n\n\teps_array : array[float], shape=(n_samples)\n\t\tThe maximum distance between two points for them to be considered \n\t\tto be in the same neighborhood, applied locally.\n\n\tReturns\n\t--------\n\tcluster_centers : array, shape=[n_clusters, n_features]\n\t\tCoordinates of cluster centers.\n\n\tlabels : array, shape=[n_samples]\n\t\tCluster labels for each point.\n\n\tNotes\n\t-----\n\tCode adapted from scikit-learn library \n\t\"\"\"\n\t# Calculate neighborhood for all samples. This leaves the original point\n\t# in, which needs to be considered later (i.e. point i is in the\n\t# neighborhood of point i. While True, its useless information)\n\tneighborhoods = np.array([np.where(x <= eps_array[i])[0] for i, x in enumerate(X)])\n\n\tn_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])\n\n\t# Initially, all samples are noise.\n\tlabels = -np.ones(X.shape[0], dtype=np.intp)\n\n\t# A list of all core samples found.\n\tcore_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)\n\tdbscan_inner(core_samples, neighborhoods, labels)\n\n\treturn np.where(core_samples)[0], labels\n\n\n######################### Plotting ########################\n\ndef cluster_plot(data, labels):\n\tcolors = cycle('bgrcmyk')\n\tnum_clusters = len(np.unique(labels))\n\tprint(\"num clusters: \", num_clusters)\n\n\tfor i in range(num_clusters):\n\t\tto_plot = data[ np.where(labels == i) ]\n\t\tx_plot = to_plot[:, 0]\n\t\ty_plot = to_plot[:, 1]\n\t\tplt.scatter(x_plot, y_plot, c=next(colors))\n\n\tplot(data)\n\ndef plot(data):\n\tx = data[:, 0]\n\ty = data[:, 1]\n\tplt.axis([min(x), max(x),min(y),max(y)])\n\tplt.xlabel(\"DC1\")\n\tplt.ylabel(\"DC2\")\n\tplt.grid()\n\tplt.show()\n\tplt.savefig(\"%s_plot.png\"%filename, transparent=True, \n\t\t\t\tbbox_inches='tight', figsize=(3,3), dpi=300)\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n# def calcProb(energy, temp=300):\n# \treturn np.exp(-energy / (k_b * temp))\n\n# def calcEntropy(num_states, probs):\n# \t\"\"\"\n# \t\tNotes\n# \t\t-----\n# \t\t\tDerived using stirling approximation of formal \n# \t\t\tdefinition of entropy.\n# \t\t\t..math -K_b N \\sum_{k=1}^{s}p_k \\ln p_k\n# \t\"\"\"\n# \treturn -k_b * num_states * np.sum(probs * np.log(probs))\n\n","repo_name":"rohanp/LDFMap","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"}
+{"seq_id":"5207609904","text":"import json\nfrom posixpath import dirname\nfrom urllib.error import HTTPError, URLError\nfrom urllib.request import Request, urlopen\nfrom os.path import abspath, dirname\nfrom inspect import getfile, currentframe\n\n\ndef get_key():\n \"\"\"Gets api key from secret file\n\n Returns: (str) api key\n \"\"\"\n key = []\n secret = \"\"\n # gets path of this script - OS independent\n path = dirname(abspath(getfile(currentframe()))) + \"/.secret.txt\"\n try:\n # get appropriate api key\n with open(path) as f:\n key = [line.strip() for line in f]\n for k in key:\n if k.startswith(\"OPENAI_KEY\"):\n secret = k.split('\"')[1::2]\n except FileNotFoundError:\n print(\"Could not find api key. Please make sure you've run the CLI \"\n \"tool and set up your model\")\n quit(\"Exiting...\")\n\n return secret[0]\n\n\ndef get_model():\n \"\"\"Gets model from model file\n\n Returns: (str) model\n \"\"\"\n model = []\n model_name = \"\"\n # gets path of this script - OS independent\n path = dirname(abspath(getfile(currentframe()))) + \"/.model.txt\"\n try:\n # get appropriate api key\n with open(path) as f:\n model = [line.strip() for line in f]\n for m in model:\n if m.startswith(\"MODEL\"):\n model_name = m.split('\"')[1::2]\n except FileNotFoundError:\n print(\"Could not find model. Please make sure you've run the CLI \"\n \"tool and set up your model\")\n quit(\"Exiting...\")\n\n return model_name[0]\n\n\ndef make_request(url, headers=None, data=None):\n \"\"\"Makes API request\n\n Params:\n url (str): url to make request to\n headers (dict, optional): headers to send with request. Defaults to None.\n data (bytes, optional): data to send with request. Defaults to None.\n \"\"\"\n request = Request(url, headers=headers or {}, data=data)\n try:\n with urlopen(request, timeout=10) as response:\n return response.read(), response\n except HTTPError as error:\n print(error.status, error.reason)\n quit(\"Exiting...\")\n except URLError as error:\n print(error.reason)\n quit(\"Exiting...\")\n except TimeoutError:\n print(\"Request timed out\")\n quit(\"Exiting...\")\n\n\ndef chat_help():\n \"\"\"Prints help message for all available commands\"\"\"\n print(\n \"ChatGDB is a python script that defines some extra helpful GDB and \"\n \"LLDB commands. Before use, be sure to set up your api key using the \"\n \"CLI tool. The commands are as follows:\\n\\n\"\n \"chat: This command is used to generate GDB/LLDB commands based on plain \"\n \"English input. For example, 'chat stop my code at line 7' will \"\n \"generate the GDB command 'break 7'. Remember that in LLDB, many \"\n \"commands require filename information as well.\\n\\n\"\n \"explain: This command is used to generate explanations for either \"\n \"the previous command or a user query. 'explain' with \"\n \"no arguments will generate an explanation for the previous command \"\n \"but typing a query after will generate an answer for it.\\n\\n\")\n\n\nHEADERS = {\n \"Authorization\": \"Bearer \" + get_key(),\n \"Content-Type\": \"application/json\"\n}\nURL = \"https://api.openai.com/v1/chat/completions\"\n\n\ndef explain_helper(prev_command, command, prompt):\n \"\"\"Generates explanation for either the previous command or a user query\n\n Params:\n prev_command (str): previous command\n command (str): user query\n prompt (str): prompt to use for explanation\n \"\"\"\n question = prompt + prev_command if command == \"\" else command\n data = {\"model\": get_model(),\n \"messages\": [{\"role\": \"user\",\n \"content\": question}]}\n body, response = make_request(URL, HEADERS, data=bytes(json.dumps(data),\n encoding=\"utf-8\"))\n body = json.loads(body)\n explanation = body['choices'][0]['message']['content']\n print(explanation)\n\n\ndef chat_helper(command, prompt):\n \"\"\"Generates GDB/LLDB command based on user input\n\n Params:\n command (str): user input\n prompt (str): prompt to use for command generation\n \"\"\"\n data = {\"model\": get_model(),\n \"messages\": [{\"role\": \"user\",\n \"content\": prompt + command}]}\n\n body, response = make_request(URL, HEADERS, data=bytes(json.dumps(data),\n encoding=\"utf-8\"))\n body = json.loads(body)\n command = body['choices'][0]['message']['content']\n print(command)\n # the first is technically also the previous command\n return command, command\n","repo_name":"pgosar/ChatGDB","sub_path":"chatgdb/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":882,"dataset":"github-code","pt":"48"}
+{"seq_id":"25361729974","text":"# from flask_script import Manager\nfrom controller import create_app\n\n# 创建APP对象\napp = create_app('dev')\n# # 创建脚本管理\n# mgr = Manager(app)\n\n\nif __name__ == '__main__':\n # mgr.run()\n app.run(threaded=True, host=\"0.0.0.0\")\n\n","repo_name":"Kr1s77/flask-video-streaming-recorder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"48"}
+{"seq_id":"5864725651","text":"\"\"\"\nSimple scenario that wraps the node.form of an artificial within square brackets\nif it isn't already wrapped\n\"\"\"\n\nfrom udapi.core.block import Block\n\nclass WrapArtificials(Block):\n\n def process_node(self, node):\n if node.misc['NodeType'] == 'Artificial':\n if not node.form.startswith('['):\n node.form = f'[{node.form}]'\n","repo_name":"francescomambrini/Udapi_AGLDT","sub_path":"udapi_agldt/util/wrapartificials.py","file_name":"wrapartificials.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41160355671","text":"import motor.motor_asyncio\nfrom bson.objectid import ObjectId\nfrom decouple import config\n\nMONGO_DETAILS = config('MONGO_DETAILS')\n\nclient = motor.motor_asyncio.AsyncIOMotorClient(MONGO_DETAILS)\n\ndatabase = client.mongoDB\n\ntweet_coll = database.get_collection(\"tweet_collection\")\n\n\n# helpers\n\ndef tweet_helper(tweet) -> dict:\n return {\n \"_id\": str(tweet[\"_id\"]),\n \"tweet\": tweet[\"tweet\"],\n \"created_at\": tweet[\"created_at\"],\n }\n\n\n# Retrieve all tweets present in the database\nasync def retrieve_tweets():\n tweets = []\n async for tweet in tweet_coll.find():\n tweets.append(tweet_helper(tweet))\n return tweets\n\n\n# Add a new tweet into to the database\nasync def add_tweet(tweet_data: dict) -> dict:\n new_tweet = await tweet_coll.insert_one(tweet_data)\n return tweet_helper(tweet_data)\n\n\n# Retrieve a tweet with a matching ID\nasync def retrieve_tweet(id: str) -> dict:\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n return tweet_helper(tweet)\n\n# Delete a tweet from the database\nasync def delete_tweet(id: str):\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n await tweet_coll.delete_one({\"_id\": ObjectId(id)})\n return True\n return False\n\n# Update a tweet with a matching ID\nasync def update_tweet(id: str, data: dict):\n # Return false if an empty request body is sent.\n if len(data) < 1:\n return False\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n updated_tweet = await tweet_coll.update_one(\n {\"_id\": ObjectId(id)}, {\"$set\": data}\n )\n if updated_tweet:\n return True\n return False\n","repo_name":"Jeromeschmidt/Tweet-gen-fastAPI","sub_path":"app/core/database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"17518575049","text":"import numpy as np\nimport cv2\nimport os.path\nimport sys\n\nDIR_PATH = os.path.join(os.path.dirname(__file__), '..')\nsys.path.append(DIR_PATH)\n\nfrom utils.prepare_data import read_image, write_image\nfrom Convolution.convolution import Convolution\n\ndef gauss2D(shape=(3,3),sigma= 3):\n \"\"\"\n 2D gaussian mask\n fspecial('gaussian',[shape],[sigma])\n \"\"\"\n try:\n m, n = [(x - 1.) / 2. for x in shape]\n y, x = np.ogrid[-m:m+1,-n:n+1]\n\n height = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n height[ height < np.finfo(height.dtype).eps*height.max() ] = 0\n\n sum_height = np.sum(height)\n if sum_height != 0:\n height /= sum_height\n\n return height\n except EOFError as e:\n raise(e)\n\nclass Blur:\n \"\"\"\n Blur is class that can blur image\n Mean blur has parameters: mode = 1\n Gaussian blur has parameters: mode = 2\n \"\"\"\n def makeKernel(self, mode: int):\n try:\n if mode == 1:\n kernel = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) /9\n elif mode == 2:\n kernel = np.array(gauss2D())\n else:\n kernel = None\n\n return kernel\n except EOFError as e:\n raise(e)\n\n def __init__(self, image, mode: int in [1, 2]):\n \"\"\"\n path: where is image in directory,\n mode: 1 (mean blur), 2 (gaussian blur)\n \"\"\"\n self.kernel = self.makeKernel(mode)\n self.image = image\n self.items = Convolution(self.kernel, self.image).items\n\nif __name__ == '__main__':\n print(\"Read image from path\")\n image = read_image(os.path.join(DIR_PATH+'/data/lena.png'), 1)\n blur_image = Blur(image, 1).items\n print(\"Write blur image\")\n write_image(os.path.join(DIR_PATH+'/data/blur-lena.png'), 1, blur_image)\n pass\n \n","repo_name":"truongcntn2017/ImageProcessing","sub_path":"Blur/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"28351202297","text":"from features import AbstractFeature\nfrom visualize import ImagePlot\nfrom skimage.measure import regionprops, label\nfrom skimage import exposure, transform\nimport numpy as np\n\n\nclass ColorFeature(AbstractFeature):\n def process(self, img):\n # Extract blue regions\n rgb2yuv = np.array([[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]])\n yuv = self.rescale_values(np.dot(img.image, rgb2yuv.T))\n white_img = self.extract_color(yuv[:, :, 0])\n red_img = self.extract_color(yuv[:, :, 2])\n blue_img = self.extract_color(yuv[:, :, 1])\n black_img = self.extract_color(1 - yuv[:, :, 0])\n size = min(img.image.shape[0], img.image.shape[1])\n ratio_img = transform.resize(img.image, [size, size])\n # RED WHITE OVERLAP\n overlap = red_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n red_img -= overlap\n else:\n white_img -= overlap\n # BLUE WHITE OVERLAP\n overlap = blue_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n blue_img -= overlap\n else:\n white_img -= overlap\n # RED BLACK OVERLAP\n overlap = red_img * black_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n red_img -= overlap\n else:\n black_img -= overlap\n # BLUE BLACK OVERLAP\n overlap = blue_img * black_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n blue_img -= overlap\n else:\n black_img -= overlap\n # RED BLUE OVERLAP\n overlap = blue_img * red_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if RGB_avg[0] - RGB_avg[2] > 0:\n blue_img -= overlap\n else:\n red_img -= overlap\n # WHITE BLACK OVERLAP\n overlap = black_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if (RGB_avg[0] + RGB_avg[1] + RGB_avg[2]) >= 0.5 * 3:\n white_img -= overlap\n else:\n black_img -= overlap\n return [np.mean(red_img), np.mean(blue_img), np.mean(black_img), np.mean(white_img)]\n\n def rescale_values(self, img):\n img[:, :, 1] = (img[:, :, 1] + 0.436)/(0.436 * 2)\n img[:, :, 2] = (img[:, :, 2] + 0.615)/(0.615 * 2)\n return img\n\n def relative_proportions(self, bw_img, img):\n sum = np.sum(bw_img)\n RGB_avg = [0, 0, 0]\n for i in range(len(RGB_avg)):\n mult_img = bw_img * img[:, :, i]\n RGB_avg[i] = np.sum(mult_img) / sum\n return RGB_avg / RGB_avg[0] # Normalize\n\n def extract_color(self, img, show=False):\n size = min(img.shape[0], img.shape[1])\n ratio_img = transform.resize(img, [size, size])\n width, height = ratio_img.shape\n # if exposure.is_low_contrast(ratio_img, fraction_threshold=0.45):\n ratio_img = exposure.equalize_hist(ratio_img)\n # gray_img *= (im.image[:, :, 0] > 150) * (im.image[:, :, 1] > 150) * (im.image[:, :, 2] > 150)\n # Find an appropriate threshold in order to extract white-like regions\n min_threshold = np.amin(ratio_img)\n max_threshold = np.amax(ratio_img)\n value_range = (max_threshold - min_threshold)\n threshold = value_range * 0.5 + min_threshold\n props = []\n max_iter = 1\n while (len(props) <= 2 or len(props) >= 10) and max_iter < 1000:\n # Filtered white image\n filtered_img = ratio_img > threshold\n # Divide in regions\n labeled_img = label(filtered_img) + 1\n # Filter out largest white region\n mult_img = np.multiply(labeled_img, filtered_img) # only whitish regions are considered\n props = regionprops(mult_img)\n props = [region for region in props if region.area > 0.01 * width * height]\n # Adjust threshold boundaries and threshold\n too_less_regions = len(props) <= 2\n min_threshold = threshold if too_less_regions else min_threshold\n max_threshold = threshold if not too_less_regions else max_threshold\n threshold = threshold + value_range * 0.01 if too_less_regions else threshold - value_range * 0.01\n max_iter += 1\n # Extract largest white-like region\n # print(\"Threshold: %.2f\" % threshold)\n largest_region_img = np.zeros_like(ratio_img)\n if len(props) == 0:\n return largest_region_img\n max_region_coords = props[np.argmax([region.area for region in props])].coords\n largest_region_img[max_region_coords[:, 0], max_region_coords[:, 1]] = 1\n # Exclude white regions on the border\n excluded_border_img = np.ones_like(ratio_img)\n border_region_coords = np.array([coord.tolist() for coords in\n [region.coords for region in props if 0 in region.coords or\n width - 1 in region.coords[:, 0] or height - 1 in region.coords[:, 1]]\n for coord in coords])\n if len(border_region_coords) != 0:\n excluded_border_img[border_region_coords[:, 0], border_region_coords[:, 1]] = 0\n excluded_border_img *= filtered_img\n # Remove small regions\n exclude_small_img = np.zeros_like(ratio_img)\n excluded_small_coords = np.array([coord.tolist() for region in [region for region in props if region.area > 0.01*width*height]\n for coord in region.coords])\n exclude_small_img[excluded_small_coords[:, 0], excluded_small_coords[:, 1]] = 1\n # Only most central white region\n cx = width/2\n cy = height/2\n dx = width/5\n dy = height/5\n center_region_img = np.zeros_like(ratio_img)\n for region in props:\n keep = False\n for coord in region.coords:\n if cx - dx <= coord[0] <= cx + dx and cy - dy <= coord[1] <= cy + dy:\n keep = True\n break\n if keep:\n for coord in region.coords:\n center_region_img[coord[0], coord[1]] = 1\n # # Exclude regions with high saturation\n # hsv_img = HsvTransform().process(ratio_img)\n # filtered_hsv_img = hsv_img[:, :, 1] < 0.5\n # Combine images\n result = (exclude_small_img + excluded_border_img + largest_region_img + center_region_img) >= 3\n if show:\n print(\"Show of largest region\")\n ImagePlot().show(\"\", largest_region_img)\n print(\"Show of exclusion border\")\n ImagePlot().show(\"\", excluded_border_img)\n print(\"Show of removal small regions\")\n ImagePlot().show(\"\", exclude_small_img)\n print(\"Show of retaining center regions\")\n ImagePlot().show(\"\", center_region_img)\n # print(\"Filtered hsv image\")\n # ImagePlot().show(\"\", filtered_hsv_img)\n print(\"Show of result\")\n ImagePlot().show(\"TADA!\", result)\n\n return result\n","repo_name":"Zepheus/ml-traffic","sub_path":"features/color_feature.py","file_name":"color_feature.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"37037640008","text":"from collections import deque\n\n\ndef parse_input(file):\n data = open(file, \"r\").read().strip()\n data = [x for x in data.split(\"\\n\")]\n return data\n\n\ndef get_bliz_locs(g):\n # creates a dict with key of time (t)\n # contains locations of blizzards at next time (t + 1)\n # those are unsafe spots to move\n # because it will repeat, we only need enough entries for\n # the cycle which is height * width of area of field\n height = len(g)\n width = len(g[0])\n bliz_locs = {}\n for t in range(((height - 2) * (width - 2) + 1)):\n unsafe_locs = set()\n for row in range(height):\n for col in range(width):\n curr_item = g[row][col]\n if curr_item == \">\":\n unsafe_locs.add((row, 1 + ((col - 1 + t) % (width - 2))))\n elif curr_item == \"<\":\n unsafe_locs.add((row, 1 + ((col - 1 - t) % (width - 2))))\n elif curr_item == \"v\":\n unsafe_locs.add((1 + ((row - 1 + t) % (height - 2)), col))\n elif curr_item == \"^\":\n unsafe_locs.add((1 + ((row - 1 - t) % (height - 2)), col))\n bliz_locs[t] = unsafe_locs\n return bliz_locs\n\n\ntest_inp = \"test.txt\"\npuzz_inp = \"2022/inputs/24.txt\"\ncurr_inp = puzz_inp\n\ngrid = parse_input(curr_inp)\nmax_rows = len(grid)\nmax_cols = len(grid[0])\n\n# populate bliz_locs dict\nbliz_locs = get_bliz_locs(grid)\n\n# find start location\nr = 0\nc = grid[r].index(\".\")\n\nseen_states = set()\nstart_state = (r, c, 0, False, False) # row, col, time, seen_end, seen_start\nstates = deque([start_state])\npart1_finished = False\nwhile states:\n (row, col, time, seen_end, seen_start) = states.popleft()\n # if it isn't a valid spot, ignore it\n # need this because we don't check when adding states\n if not (0 <= row < max_rows and 0 <= col < max_cols and grid[row][col] != \"#\"):\n continue\n # once we are at the end,\n # and have already been to the end and back to start,\n # we are done with part 2\n if row == max_rows - 1 and seen_start and seen_end:\n print(\"Part 2:\", time)\n break\n # first time we reach the end, print result for part 1\n if row == max_rows - 1 and (not part1_finished):\n print(\"Part 1:\", time)\n part1_finished = True\n # mark when we've seen the end\n if row == max_rows - 1:\n seen_end = True\n # if we have already seen the end, mark if we got back to start\n if row == 0 and seen_end:\n seen_start = True\n\n # keep track of states we have already processed to avoid duplicates\n if (row, col, time, seen_end, seen_start) in seen_states:\n continue\n seen_states.add((row, col, time, seen_end, seen_start))\n\n # get where blizzards will be at next time (time cycles by area of field)\n bliz_time = time % ((max_rows - 2) * (max_cols - 2))\n avoid_locs = bliz_locs[bliz_time + 1]\n\n # can we stay put?\n if (row, col) not in avoid_locs:\n states.append((row, col, time + 1, seen_end, seen_start))\n # can we move right?\n if (row, col + 1) not in avoid_locs:\n states.append((row, col + 1, time + 1, seen_end, seen_start))\n # can we move left?\n if (row, col - 1) not in avoid_locs:\n states.append((row, col - 1, time + 1, seen_end, seen_start))\n # can we move up?\n if (row - 1, col) not in avoid_locs:\n states.append((row - 1, col, time + 1, seen_end, seen_start))\n # can we move down?\n if (row + 1, col) not in avoid_locs:\n states.append((row + 1, col, time + 1, seen_end, seen_start))\n","repo_name":"MattMichaud/AoC","sub_path":"2022/code/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"12449332000","text":"\ndef solve(s):\n global ans # 바꾸어야 할 변수는 글로벌 선언\n for p in possible:\n now = s + str(p)\n ans = min(ans, len(now)+abs(n-int(now)))\n if len(now) < 6:\n solve(now)\n\nn = int(input())\nm = int(input())\nans = abs(n-100) # 100에서 +-만 눌러서 가는 방법\nif m: # 고장난 버튼이 있는 경우\n broken = list(map(int, input().split()))\n # 고장나지 않은 버튼만 구하기 (set을 이용하자!)\n possible = set(i for i in range(10))-set(broken)\n solve('')\nelse: # 고장난 버튼이 없는 경우\n ans = min(ans, len(str(n)))\n\n\nprint(ans)\n","repo_name":"yujing-kim/algorithm_coding_test","sub_path":"ps_python/joon/1107.py","file_name":"1107.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"1866694465","text":"from bot.database.models import Farm as FarmModel\nfrom bot.database.models import PlantedCrop\nfrom bot.game.crop import Crop\nfrom bot.utils.constants import (\n FarmSizes,\n FARM_DIMENSIONS,\n PlotCoordinate,\n PlotActions,\n CROP_DATA,\n)\nfrom discord import Embed\nfrom typing import List, Optional\n\n\nclass Farm:\n def __init__(self, farm_id: int, size: FarmSizes, name: str):\n self.id = farm_id\n self.name = name\n self.size = size\n self.dimensions = FARM_DIMENSIONS.get(self.size)\n self.plot: List[List] = self.initialize_plot()\n\n @classmethod\n async def load(cls, player_id: int) -> \"Farm\":\n farm_model = await FarmModel.query.where(\n FarmModel.player_id == player_id\n ).gino.first()\n if farm_model is None:\n farm_model = await FarmModel.create(player_id=player_id)\n farm = Farm(\n farm_id=farm_model.id, size=FarmSizes(farm_model.size), name=farm_model.name\n )\n await farm.load_crops()\n return farm\n\n def initialize_plot(self) -> List[List[Optional[Crop]]]:\n return [\n [None for _ in range(self.dimensions.rows)]\n for _ in range(self.dimensions.columns)\n ]\n\n async def load_crops(self):\n crops = await PlantedCrop.query.where(PlantedCrop.farm_id == self.id).gino.all()\n for crop in crops:\n self.place_crop(\n Crop(\n id=crop.id,\n farm_id=crop.farm_id,\n crop_id=crop.crop_id,\n planted_at=crop.planted_at,\n ),\n row=crop.coord_row,\n column=crop.coord_column,\n )\n\n def validate_coordinate(self, row: int = None, column: int = None):\n if row is not None:\n if row >= self.dimensions.rows or row < 0:\n return False\n if column is not None:\n if column >= self.dimensions.columns or column < 0:\n return False\n return True\n\n def place_crop(self, crop: Crop, row: int, column: int):\n if not self.validate_coordinate(row=row, column=column):\n raise ValueError(\"Crop placement out of bounds\")\n self.plot[row][column] = crop\n\n async def work_plot(self, action: PlotActions, row: int, column: int, crop_id: int):\n if not self.validate_coordinate(row, column):\n return\n if self.plot[row][column] is not None:\n\n await self.plot[row][column].work(action)\n else:\n if action is PlotActions.PLANT:\n self.plot[row][column] = await Crop.new(\n farm_id=self.id, crop_id=crop_id, row=row, column=column\n )\n\n async def work_plots(\n self,\n action: PlotActions,\n coordinates: List[PlotCoordinate],\n crop_id: int = None,\n ):\n if not coordinates:\n for row in range(self.dimensions.rows):\n for column in range(self.dimensions.columns):\n await self.work_plot(\n action=action, row=row, column=column, crop_id=crop_id\n )\n\n for coordinate in coordinates:\n await self.work_plot(\n action=action,\n row=coordinate.row,\n column=coordinate.column,\n crop_id=crop_id,\n )\n\n def display(self):\n farm_land = \"\"\n for row in self.plot:\n for crop in row:\n if crop is None:\n farm_land += \"<:Crop_Land:753444938791911474>\" # Dirt Emoji\n continue\n farm_land += CROP_DATA[str(crop.crop_id)][\"stages\"][crop.state][\"emote\"]\n farm_land += \"\\n\"\n embed = Embed(title=self.name, description=farm_land)\n return embed\n\n def get_plots(self, planted: bool = True):\n crops = []\n for crop_row, row in enumerate(self.plot):\n for crop_column, crop in enumerate(row):\n if planted and crop is not None:\n crops.append(PlotCoordinate(crop_row, crop_column))\n else:\n if not planted and crop is None:\n crops.append(PlotCoordinate(crop_row, crop_column))\n return crops if crops else None\n","repo_name":"DiscordValley/TheValley","sub_path":"bot/game/farm.py","file_name":"farm.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"}
+{"seq_id":"9972328897","text":"import json\nimport hashlib\n\nfrom moban import constants, exceptions\nfrom moban.externals import file_system\n\n\nclass HashStore:\n IGNORE_CACHE_FILE = False\n\n def __init__(self):\n self.cache_file = constants.DEFAULT_MOBAN_CACHE_FILE\n if (\n file_system.exists(self.cache_file)\n and self.IGNORE_CACHE_FILE is False\n ):\n with file_system.open_file(self.cache_file) as f:\n self.hashes = json.load(f)\n else:\n self.hashes = {}\n\n def is_file_changed(self, file_name, file_content, source_template):\n changed, with_permission = self._is_source_updated(\n file_name, file_content, source_template\n )\n\n if changed is False:\n target_hash = get_file_hash(\n file_name, with_permission=with_permission\n )\n if target_hash != self.hashes[file_name]:\n changed = True\n return changed\n\n def _is_source_updated(self, file_name, file_content, source_template):\n changed = True\n content = file_content\n with_permission = True\n try:\n content = _mix(\n file_content,\n oct(file_system.file_permissions(source_template)),\n )\n except exceptions.NoPermissionsNeeded:\n # HttpFs does not have getsyspath\n # zip, tar have no permission\n # win32 does not work\n with_permission = False\n pass\n content_hash = get_hash(content)\n if file_system.exists(file_name):\n if file_name in self.hashes:\n if content_hash == self.hashes[file_name]:\n changed = False\n # else the dest file has not been created yet\n # so no need to get content hash at all\n if changed:\n self.hashes[file_name] = content_hash\n\n return changed, with_permission\n\n def save_hashes(self):\n with open(self.cache_file, \"w\") as f:\n json.dump(self.hashes, f)\n\n\nHASH_STORE = HashStore()\n\n\ndef get_file_hash(afile, with_permission=True):\n content = file_system.read_bytes(afile)\n try:\n if with_permission:\n content = _mix(content, oct(file_system.file_permissions(afile)))\n except exceptions.NoPermissionsNeeded:\n # HttpFs does not have getsyspath\n # zip, tar have no permission\n # win32 does not work\n pass\n return get_hash(content)\n\n\ndef get_hash(content):\n md5 = hashlib.md5()\n md5.update(content)\n return md5.digest().decode(\"latin1\")\n\n\ndef _mix(content, file_permissions_copy):\n file_permissions_copy = file_permissions_copy.encode(\"utf-8\")\n return content + file_permissions_copy\n","repo_name":"moremoban/moban","sub_path":"moban/core/hashstore.py","file_name":"hashstore.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"}
+{"seq_id":"1716027567","text":"from datetime import datetime\nimport argparse\n\nfrom cassandra.cluster import Cluster\nimport requests\n\nfrom BasketballReferencePageTextSchema import BasketballReferencePageTextSchema\nfrom BasketballReferenceScrapeStatusSchema import BasketballReferenceScrapeStatusSchema\nfrom CassandraQueryBuilder import CassandraQueryBuilder\nfrom CassandraTables import CassandraTables\n\n\n__author__ = 'Ryan'\n\n\"\"\"\nCREATE TABLE nba.basketball_reference_page_text_table (\ngameid text,\ntype text,\npage text,\nscrapedate text,\nPRIMARY KEY (type, gameid)\n);\n\n\"\"\"\n\n\nclass BasketballReferencePageScraper:\n def __init__(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect(CassandraTables.KEYSPACE_NBA)\n self.scrapeDate = str(datetime.today())\n\n\n def process(self, pageType):\n column = self.getColumnFromPageType(pageType)\n gameids = self.selectSource(column)\n links = self.selectLinks(gameids)\n for r in links:\n self.extractPage(r, pageType, column)\n\n\n def extractPage(self, row, pageType, column):\n r = requests.get(row.boxscorelink)\n self.writePageToTable(row.gameid, pageType, r.text)\n self.updateSourceTable(row.gameid, column)\n\n\n def updateSourceTable(self, gameId, column):\n self.session.execute(\n CassandraQueryBuilder.updateQueryBuilder(CassandraTables.BASKETBALLREFERENCE_SCRAPE_STATUS_TABLE,\n \"{0}=$${1}$$\".format(column, self.scrapeDate),\n [\"gameid=\\'{0}\\'\".format(gameId)]))\n\n def writePageToTable(self, gameId, pageType, text):\n self.session.execute(\n CassandraQueryBuilder.insertInto(CassandraTables.BASKETBALLREFERENCE_PAGE_TEXT,\n BasketballReferencePageTextSchema.toHeader(),\n [gameId, pageType, text, self.scrapeDate]\n )\n )\n\n def selectLinks(self, gameids):\n return self.session.execute(\n CassandraQueryBuilder.selectFrom(CassandraTables.BASKETBALLREFERENCE_GAME_LINKS_TABLE,\n ['gameid', 'boxscorelink'],\n [CassandraQueryBuilder.inClause('gameid', gameids)]))\n\n def selectSource(self, column):\n source = self.session.execute(\n CassandraQueryBuilder.selectFrom(CassandraTables.BASKETBALLREFERENCE_SCRAPE_STATUS_TABLE,\n ['gameid'],[column + \"=''\"]))\n return [r.gameid for r in source.current_rows]\n\n def getColumnFromPageType(self, pageType):\n if pageType == \"BoxScore\":\n return BasketballReferenceScrapeStatusSchema.boxscore_scrapedate\n elif pageType == \"PlayByPlay\":\n return BasketballReferenceScrapeStatusSchema.play_by_play_scrapedate\n elif pageType == \"ShotChart\":\n return BasketballReferenceScrapeStatusSchema.shotchart_scrapedate\n else:\n raise ValueError(\"The pageType provided is not a valid pageType\")\n\n\ndef main(argv):\n BSS = BasketballReferencePageScraper()\n if argv['run_all']:\n argv['playbyplay'] = True\n argv['boxscore'] = True\n argv['shotchart'] = True\n\n if argv['playbyplay']:\n BSS.process(\"PlayByPlay\")\n if argv['boxscore']:\n BSS.process(\"BoxScore\")\n if argv['shotchart']:\n BSS.process(\"ShotChart\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-ra', '--run-all', help='Will run all three page extractors (BoxScore, ShotChart, PlayByPlay)',\n action=\"store_true\", default=False)\n parser.add_argument('-bs', '--boxscore', help='Scrape all new boxscore links', action=\"store_true\", default=False)\n parser.add_argument('-sc', '--shotchart', help='Scrape all new shotchart links', action=\"store_true\", default=False)\n parser.add_argument('-pbp', '--playbyplay', help='Scrape all new playbyplay links', action=\"store_true\",\n default=False)\n results = vars(parser.parse_args())\n main(results)\n\n","repo_name":"rd11490/NBA","sub_path":"NBAProject/BasketballReferencePageScraper.py","file_name":"BasketballReferencePageScraper.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"33231228282","text":"import os\n\nos.chdir('/home/eugenegalaxy/Documents/projects/simp/simp/tests/yolo_dataset/mask_incorrect_detections')\nCOUNT = 0\n\n\ndef increment():\n global COUNT\n COUNT = COUNT + 1\n\n\ndef renamer():\n for f in os.listdir():\n f_name, f_ext = os.path.splitext(f)\n f_name = 'nofever_{0:04}'.format(COUNT)\n increment()\n\n new_name = '{}{}'.format(f_name, f_ext)\n os.rename(f, new_name)\n\n\ndef renamer_parts():\n for f in os.listdir():\n f_name, f_ext = os.path.splitext(f)\n # f_name = 'something'\n parts = f_name.split('height')\n f_name = parts[0]\n parts2 = f_name.split('_')\n f_name = '{0:04}'.format(COUNT) + '_old_' + parts2[1] + '_' + parts2[2]\n new_name = '{}{}'.format(f_name, f_ext)\n os.rename(f, new_name)\n increment()\n\n\nrenamer_parts()\n","repo_name":"eugenegalaxy/simp","sub_path":"simp/file_renamer.py","file_name":"file_renamer.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10853763740","text":"import numpy as np\nfrom time import time\nfrom datasphere.populate import init_data_sphere\nimport getpass, os, sys\n\ndef test_populate_init_data_sphere():\n \n Nx=3\n Ny=5\n Nz=7\n ZZ=np.arange(Nz, dtype=np.float64)\n YY=np.arange(Ny, dtype=np.float64)\n XX=np.arange(Nx, dtype=np.float64)\n\n print(XX,YY,ZZ)\n voxel_size=np.array([1.], dtype=np.float64)\n\n # (1) compute the result using our C extension\n t0 = time()\n out = init_data_sphere(ZZ, YY, XX,voxel_size)\n dt0 = time() - t0\n print(\"XX={}\".format(XX))\n print(\"XX shape={}\".format(XX.shape))\n print(\"out={}\".format(out))\n print(\"out.shape={}\".format(out.shape))\n\n\n# MAIN\nif __name__ == \"__main__\":\n test_populate_init_data_sphere()\n\n\n\n\n\n","repo_name":"a356617605/Datasphere","sub_path":"test_populate.py","file_name":"test_populate.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"9971521811","text":"from SEIL_Energy import *\nimport paho.mqtt.client as mqtt\nimport MySQLdb\nfrom config import CONFIG\n# Open database connection\ndb = MySQLdb.connect(CONFIG[\"database\"][\"host\"],CONFIG[\"database\"][\"user\"],CONFIG[\"database\"][\"password\"],CONFIG[\"database\"][\"name\"] )\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(CONFIG[\"mqtt\"][\"topic\"])\n\nqueue = []\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n # print(msg.topic+\" \"+str(msg.payload))\n actual_value = str(msg.payload).split(',')[2]\n ts = int(float(str(msg.payload).split(',')[1]))\n # print(\"ts\",ts)\n if len(queue)<6:\n queue.append(actual_value)\n return\n predicted_value = energy_pred_LSTM(queue)\n queue.pop(0)\n queue.append(actual_value)\n print(predicted_value, actual_value)\n sql = \"insert into predicted_power(ts,predicted_value) values(\"+str(ts)+\", \"+str(predicted_value)+\")\"\n try:\n # Execute the SQL command\n cursor.execute(sql)\n # Commit your changes in the database\n db.commit()\n except:\n # Rollback in case there is any error\n db.rollback()\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(CONFIG[\"mqtt\"][\"host\"], CONFIG[\"mqtt\"][\"port\"], 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()","repo_name":"seil-cse-iitb/energy-prediction","sub_path":"live_prediction.py","file_name":"live_prediction.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32306336341","text":"\"\"\"Unit tests verifying tag-related delivery stream APIs.\"\"\"\nimport boto3\nfrom botocore.exceptions import ClientError\nimport pytest\n\nfrom moto import mock_firehose\nfrom moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID\nfrom moto.firehose.models import MAX_TAGS_PER_DELIVERY_STREAM\nfrom moto.moto_api._internal import mock_random\nfrom tests.test_firehose.test_firehose import TEST_REGION\nfrom tests.test_firehose.test_firehose import sample_s3_dest_config\n\n\n@mock_firehose\ndef test_list_tags_for_delivery_stream():\n \"\"\"Test invocations of list_tags_for_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n stream_name = f\"test_list_tags_{mock_random.get_random_hex(6)}\"\n\n number_of_tags = 50\n tags = [{\"Key\": f\"{x}_k\", \"Value\": f\"{x}_v\"} for x in range(1, number_of_tags + 1)]\n\n # Create a delivery stream to work with.\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n S3DestinationConfiguration=sample_s3_dest_config(),\n Tags=tags,\n )\n\n # Verify limit works.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, Limit=1\n )\n assert len(result[\"Tags\"]) == 1\n assert result[\"Tags\"] == [{\"Key\": \"1_k\", \"Value\": \"1_v\"}]\n assert result[\"HasMoreTags\"] is True\n\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, Limit=number_of_tags\n )\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"HasMoreTags\"] is False\n\n # Verify exclusive_start_tag_key returns truncated list.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=\"30_k\"\n )\n assert len(result[\"Tags\"]) == number_of_tags - 30\n expected_tags = [\n {\"Key\": f\"{x}_k\", \"Value\": f\"{x}_v\"} for x in range(31, number_of_tags + 1)\n ]\n assert result[\"Tags\"] == expected_tags\n assert result[\"HasMoreTags\"] is False\n\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=f\"{number_of_tags}_k\"\n )\n assert len(result[\"Tags\"]) == 0\n assert result[\"HasMoreTags\"] is False\n\n # boto3 ignores bad stream names for ExclusiveStartTagKey.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=\"foo\"\n )\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"Tags\"] == tags\n assert result[\"HasMoreTags\"] is False\n\n # Verify no parameters returns entire list.\n client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"Tags\"] == tags\n assert result[\"HasMoreTags\"] is False\n\n\n@mock_firehose\ndef test_tag_delivery_stream():\n \"\"\"Test successful, failed invocations of tag_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n\n # Create a delivery stream for testing purposes.\n stream_name = f\"test_tags_{mock_random.get_random_hex(6)}\"\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n ExtendedS3DestinationConfiguration=sample_s3_dest_config(),\n )\n\n # Unknown stream name.\n unknown_name = \"foo\"\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=unknown_name, Tags=[{\"Key\": \"foo\", \"Value\": \"bar\"}]\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ResourceNotFoundException\"\n assert (\n f\"Firehose {unknown_name} under account {ACCOUNT_ID} not found\"\n in err[\"Message\"]\n )\n\n # Too many tags.\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=stream_name,\n Tags=[{\"Key\": f\"{x}\", \"Value\": f\"{x}\"} for x in range(51)],\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ValidationException\"\n assert (\n f\"failed to satisify contstraint: Member must have length \"\n f\"less than or equal to {MAX_TAGS_PER_DELIVERY_STREAM}\"\n ) in err[\"Message\"]\n\n # Bad tags.\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=stream_name, Tags=[{\"Key\": \"foo!\", \"Value\": \"bar\"}]\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ValidationException\"\n assert (\n \"1 validation error detected: Value 'foo!' at 'tags.1.member.key' \"\n \"failed to satisfy constraint: Member must satisfy regular \"\n \"expression pattern\"\n ) in err[\"Message\"]\n\n # Successful addition of tags.\n added_tags = [\n {\"Key\": f\"{x}\", \"Value\": f\"{x}\"} for x in range(MAX_TAGS_PER_DELIVERY_STREAM)\n ]\n client.tag_delivery_stream(DeliveryStreamName=stream_name, Tags=added_tags)\n results = client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert len(results[\"Tags\"]) == MAX_TAGS_PER_DELIVERY_STREAM\n assert results[\"Tags\"] == added_tags\n\n\n@mock_firehose\ndef test_untag_delivery_stream():\n \"\"\"Test successful, failed invocations of untag_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n\n # Create a delivery stream for testing purposes.\n stream_name = f\"test_untag_{mock_random.get_random_hex(6)}\"\n tag_list = [\n {\"Key\": \"one\", \"Value\": \"1\"},\n {\"Key\": \"two\", \"Value\": \"2\"},\n {\"Key\": \"three\", \"Value\": \"3\"},\n ]\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n ExtendedS3DestinationConfiguration=sample_s3_dest_config(),\n Tags=tag_list,\n )\n\n # Untag all of the tags. Verify there are no more tags.\n tag_keys = [x[\"Key\"] for x in tag_list]\n client.untag_delivery_stream(DeliveryStreamName=stream_name, TagKeys=tag_keys)\n results = client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert not results[\"Tags\"]\n assert not results[\"HasMoreTags\"]\n","repo_name":"getmoto/moto","sub_path":"tests/test_firehose/test_firehose_tags.py","file_name":"test_firehose_tags.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"}
+{"seq_id":"19882159694","text":"import math\r\nimport pygame\r\n\r\npygame.init()\r\n\r\nglobal w, h, vo, theta\r\n\r\nw,h = 1024,1024\r\nscreen = pygame.display.set_mode((w,h))\r\ndef findVelocity(a = 9.8):\r\n global vox, voy, time, distance, vo\r\n vo = input('enter initial velocity(m/s):>')\r\n theta = input('enter angle(degrees):>')\r\n vo = int(vo)\r\n theta = int(theta)\r\n #convert theta to radians\r\n theta = math.radians(theta)\r\n #perform trig \r\n costheta = math.cos(theta)\r\n sintheta = math.sin(theta)\r\n #find x and y velocity\r\n vo = math.radians(vo)\r\n vox = vo * costheta \r\n voy = vo * sintheta\r\n #convert to degrees\r\n vox = math.degrees(vox)\r\n voy = math.degrees(voy)\r\n main()\r\n\r\ndef finish(a = 9.8):\r\n print('a =', a, 'm/s^2')\r\n print('Initial velocity =', vo, 'm/s')\r\n print('Vox = ', vox , 'm/s')\r\n print('Voy = ', voy , 'm/s')\r\n tt= voy * -2\r\n t = tt / a\r\n time = t * -1\r\n distance = vox * time\r\n print('projectile traveled', distance, 'meters in', time, 'seconds')\r\n\r\n\r\ndef main(a = 9.8, hdamp = 0.1, damp = 0.1):\r\n ball_x = 1018\r\n ball_y = 1018\r\n ball_x_v = int(vox)\r\n ball_y_v = int(voy)\r\n ball_y = int(ball_y)\r\n ball_x_v = int(ball_x_v)\r\n ball_y_v = int(ball_y_v)\r\n screen = pygame.display.set_mode((w,h))\r\n\r\n count = 0\r\n \r\n while True:\r\n\r\n screen.fill((0,0,0))\r\n\r\n\r\n if ball_x <= 0:\r\n\r\n ball_x_v = -ball_x_v*(1-damp)\r\n ball_x = 1\r\n ball_x = 1024\r\n if ball_y <= 0:\r\n\r\n ball_y_v = -ball_y_v*(1-damp)\r\n ball_x_v = ball_x_v*(1-hdamp)\r\n ball_y = 1\r\n ball_y = 1024\r\n if ball_x >= w:\r\n\r\n ball_x_v = -ball_x_v*(1-damp)\r\n\r\n #ball_x = ball_x - ball_x\r\n\r\n #print(ball_x)\r\n \r\n\r\n if ball_y >=h:\r\n ball_y_v = -ball_y_v*(1-damp)\r\n \r\n ball_y = h - 1\r\n #print('landing coordinates: ', ball_x, ' ', ball_y)\r\n \r\n \r\n\r\n \r\n oldposx = ball_x\r\n oldposy = ball_y\r\n ball_x += ball_x_v\r\n ball_y += ball_y_v\r\n ball_y_v += (a / 125)\r\n #print(ball_x, ball_y)\r\n print()\r\n \r\n \r\n pygame.draw.circle(screen, (255,255,255), (int(ball_x), int(ball_y)), 5)\r\n\r\n pygame.display.update()\r\n\r\nif __name__ == '__main__':\r\n\r\n findVelocity()\r\n","repo_name":"waddyado/2dphysics","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31322624783","text":"from os import system\nimport functions.banco as b\n\n\ndef main():\n clientes = []\n contas = []\n\n while True:\n opcao = b.menu()\n\n if opcao == 'd':\n b.depositar(clientes)\n\n elif opcao == 's':\n b.sacar(clientes)\n\n elif opcao == 'e':\n b.exibir_extrato(clientes)\n\n elif opcao == 'nu':\n b.criar_cliente(clientes)\n\n elif opcao == 'nc':\n numero_conta = len(contas) + 1\n b.criar_conta(numero_conta, clientes, contas)\n\n elif opcao == 'lc':\n b.listar_contas(contas)\n\n elif opcao == 'l':\n system('cls')\n\n elif opcao == 'q':\n break\n\n else:\n print(f'\\n{ \" Operação inválida, por favor selecione novamente a operação desejada. \".center(100, \"@\") }')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"GuilhermeDGDEV/DIO_sistema_bancario_python_poo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"41546346316","text":"if __name__ == '__main__':\n from random import randint\n import os\n from time import sleep\n\n TAMANHO_GRID = 10 # Tamanho do grid do jogo\n PACMAN = ['O', 'o'] # Lista com as formas do pacman a cada interação\n COMIDA = 'X' # Formato da comida\n TEMPO_DA_INTERACAO = 0.5 # Tempo entre as interações de movimento do pacman\n\n pos_pacman = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)] # Sortear posição que o pacman vai ficar no jogo\n pos_comida = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)] # Sortear posição que vai ficar a comida no jogo\n\n pacman_formato = PACMAN[0] # Formato do primeiro pacman no jogo\n\n # Imprimindo o jogo na tela\n def imprimir_grid():\n os.system('cls')\n for l in range(TAMANHO_GRID): # percorre as linhas do jogo - índice [0]\n for c in range(TAMANHO_GRID): # percorre as colunas do jogo - índice [1]\n if l == pos_pacman[0] and c == pos_pacman[1]:\n print(pacman_formato, end='') # Imprime a forma atual do Pacman no grid\n elif l == pos_comida[0] and c == pos_comida[1]:\n print(COMIDA, end='') # Imprime a comida do grid\n else:\n print('.', end='')\n print()\n\n # Função para mover o Pacman em direção à comida\n def mover_pacman():\n if pos_pacman[0] < pos_comida[0]:\n pos_pacman[0] += 1\n elif pos_pacman[0] > pos_comida[0]:\n pos_pacman[0] -= 1\n elif pos_pacman[1] < pos_comida[1]:\n pos_pacman[1] += 1\n elif pos_pacman[1] > pos_comida[1]:\n pos_pacman[1] -= 1\n\n # Loop principal do jogo\n while True:\n imprimir_grid()\n mover_pacman()\n\n # Altera a forma do Pacman para a próxima forma na lista\n pacman_formato = PACMAN[(PACMAN.index(pacman_formato) + 1) % len(PACMAN)]\n\n if pos_pacman == pos_comida:\n pos_comida = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)]\n sleep(TEMPO_DA_INTERACAO)","repo_name":"fhvol/projetoADS-AV2","sub_path":"pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"20723847246","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on : 2019/03/07 14:21:04 JST.\nLast Change: 2019/07/05 22:52:10 JST.\n\n\n時間計測を簡単に行うことが出来る\nReference\n- https://qiita.com/tag1216/items/e1e3c565a2bf8dbc7f86\nエラーの例外処理\n- https://blog.amedama.jp/entry/2015/10/02/234946\n\n@author: Koki Obinata\n\"\"\"\nimport time\nfrom contextlib import contextmanager\nfrom collections import defaultdict\n\n\n@contextmanager\ndef single_timer(label):\n \"\"\"\n 処理の時間を計測\n\n Usage\n -----\n with single_timer('some_process'):\n time.sleep(0.1)\n\n Parameters\n ----------\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n print('{}: {:.4f}'.format(label, end-start))\n\n\n@contextmanager\ndef timer_for_each():\n \"\"\"\n forループの中などで,処理ごとに別々に時間を計測\n\n Usage\n -----\n with timer_for_each() as timer:\n for _ in range(10):\n with timer('process1'):\n time.sleep(0.1)\n\n with timer('process2'):\n time.sleep(0.2)\n\n Parameters\n ----------\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n times = defaultdict(float)\n\n @contextmanager\n def timer(label):\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n times[label] += end - start\n\n yield timer\n\n for label, t in times.items():\n print('{}: {:.4f}'.format(label, t))\n\n\n@contextmanager\ndef timer_for_total(total_label):\n \"\"\"\n forループの中などで,処理ごとに別々に時間を計測し,\n 最後に全体の処理時間を表示\n\n Usage\n -----\n with timer_for_total('Total time') as timer:\n for _ in range(10):\n\n with timer('process1'):\n time.sleep(0.1)\n\n with timer('process2'):\n time.sleep(0.2)\n\n Parameters\n ----------\n total_label : str\n 全体の処理時間を表示する際の名前\n\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n times = defaultdict(float)\n\n @contextmanager\n def timer(label):\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n times[label] += end - start\n\n with timer(total_label):\n yield timer\n\n for label, t in times.items():\n if label != total_label:\n print('{}: {:.3f}'.format(label, t))\n print('{}: {:.3f}'.format(total_label, times[total_label]))\n\n\nif __name__ == '__main__':\n # 1種類の処理時間\n with single_timer('for loop'):\n MOD = 10**9 + 7\n a = 0\n for i in range(10**6):\n a += i\n a %= MOD\n\n # 各処理の処理時間\n with timer_for_each() as timer:\n for _ in range(10):\n with timer('処理1'):\n time.sleep(0.1)\n\n with timer('処理2'):\n time.sleep(0.2)\n\n # 各処理 + 全体の処理時間\n with timer_for_total('全体') as timer:\n for _ in range(10):\n with timer('処理1'):\n time.sleep(0.1)\n\n with timer('処理2'):\n time.sleep(0.2)\n\n # 例外時の処理\n with single_timer('Error handling'):\n print(\"Let's raise error\")\n raise Exception('Error occured!')\n","repo_name":"IkokObi/Reference","sub_path":"Python/context_manager/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"3371684827","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n# author = 'Han Kai'\nimport requests,re\nimport urllib.request\n\ndef getResponse(url, headers):\n try:\n response = requests.get(url=url, headers=headers)\n if response.status_code == 200:\n return response\n return None\n except Exception as e:\n return None\n\ndef getSongname(songid):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'\n }\n url = 'https://music.163.com/song?id={}'.format(songid)\n html = getResponse(url, headers=headers).text\n # print(html)\n title = re.findall('(.*?)', html, re.S)\n print('----------------')\n print(title)\n name = title[0].split('-')[0]\n return name.strip()\n except:\n print(\"获取歌名失败\")\n\nif __name__ == '__main__':\n songid = input(\"请输入要下载的歌曲id:\")\n url = 'http://music.163.com/song/media/outer/url?id={}'.format(int(songid))\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'\n }\n download_url = getResponse(url, headers).url\n Songname = getSongname(int(songid))\n print(\"下载的地址:\",download_url)\n print(\"下载的歌曲:\", Songname)\n #和with open类似,这个方法可以根据地址另存为\n urllib.request.urlretrieve(download_url, Songname + '.mp3')\n\n","repo_name":"hanshoukai/Python-","sub_path":"Python实用小脚本/网易云根据歌曲id下载.py","file_name":"网易云根据歌曲id下载.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6016511591","text":"from tkinter import *\r\nimport tkinter as tk\r\nimport cv2\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import filedialog\r\nimport datetime\r\n\r\n\r\nclass MainGUI:\r\n def __init__(self, root) -> None:\r\n # BUTTON ON/OFF STATES\r\n self.isDetectEyes = False\r\n self.isDetectFace = False\r\n self.isDetectSmile = False\r\n self.isDetectCat = False\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n\r\n # DEFAULT IMAGE WHEN NO IMAGE/VIDEO SOURCE IS USED\r\n self.defaultFrameImage = PhotoImage(file=r\"./icons/need_image.png\")\r\n\r\n # RESIZING ICONS TO A SMALLER SIZE\r\n iconWidth = 1 # Icons will resize 1/n of the original Icon Image\r\n iconHeight = 1\r\n eyeIcon = PhotoImage(\r\n file=r\"./icons/eye.png\").subsample(iconWidth, iconHeight)\r\n faceIcon = PhotoImage(\r\n file=r\"./icons/face-detection.png\").subsample(iconWidth, iconHeight)\r\n smileIcon = PhotoImage(\r\n file=r\"./icons/smile.png\").subsample(iconWidth, iconHeight)\r\n catIcon = PhotoImage(\r\n file=r\"./icons/cat.png\").subsample(iconWidth, iconHeight)\r\n webCamIcon = PhotoImage(\r\n file=r\"./icons/webcam.png\").subsample(iconWidth, iconHeight)\r\n videoIcon = PhotoImage(\r\n file=r\"./icons/video.png\").subsample(iconWidth, iconHeight)\r\n imageIcon = PhotoImage(\r\n file=r\"./icons/image.png\").subsample(iconWidth, iconHeight)\r\n stopIcon = PhotoImage(\r\n file=r\"./icons/stop-button.png\").subsample(iconWidth, iconHeight)\r\n captureIcon = PhotoImage(\r\n file=r\"./icons/photo-capture.png\").subsample(iconWidth, iconHeight)\r\n\r\n # MAIN WINDOW\r\n self.root = root\r\n self.root.title(\"Final Project\")\r\n self.root.configure(bg=\"#4DBF85\")\r\n self.root.option_add(\"*font\", \"Arial 12\")\r\n self.root.resizable(False, False)\r\n\r\n # IMAGE/VIDEO CANVAS\r\n imageFrame = Frame(self.root)\r\n imageFrame.grid(row=0, column=0, padx=10, pady=10)\r\n self.labelImage = Label(imageFrame)\r\n self.labelImage.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n # INDIVIDUAL FRAMES FOR GROUPED BUTTONS FOR BETTER LAYOUT\r\n buttonsFrame = Frame(self.root, width=650, height=400)\r\n buttonsFrame.grid(row=0, column=1, padx=10, pady=5)\r\n\r\n sourcesButtonsFrame = Frame(buttonsFrame)\r\n detectButtonsFrame = Frame(buttonsFrame)\r\n snapshotButtonsFrame = Frame(buttonsFrame)\r\n\r\n sourcesButtonsFrame.grid(row=1, column=0)\r\n detectButtonsFrame.grid(row=3, column=0)\r\n snapshotButtonsFrame.grid(row=5, column=0)\r\n\r\n # BUTTONS\r\n # SOURCE BUTTONS\r\n self.buttonUseWebCam = Button(\r\n sourcesButtonsFrame, width=80, text=\"Webcam\", image=webCamIcon,\r\n compound=TOP, command=self.start_webcam)\r\n self.buttonUploadVideo = Button(\r\n sourcesButtonsFrame, width=80, text=\"Video\", image=videoIcon,\r\n compound=TOP, command=self.upload_video)\r\n self.buttonUploadImage = Button(\r\n sourcesButtonsFrame, width=80, text=\"Image\", image=imageIcon,\r\n compound=TOP, command=self.upload_image)\r\n\r\n # DETECTION BUTTONS\r\n self.buttonEyes = Button(\r\n detectButtonsFrame, text=\"Eyes\", image=eyeIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectEyes, 'isDetectEyes', self.buttonEyes))\r\n self.buttonFace = Button(\r\n detectButtonsFrame, text=\"Face\", image=faceIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectFace, 'isDetectFace', self.buttonFace))\r\n self.buttonSmile = Button(\r\n detectButtonsFrame, text=\"Smile\", image=smileIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectSmile, 'isDetectSmile', self.buttonSmile))\r\n self.buttonCat = Button(\r\n detectButtonsFrame, text=\"Cats\", image=catIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectCat, 'isDetectCat', self.buttonCat))\r\n\r\n # MISC BUTTONS\r\n self.buttonSnapShot = Button(\r\n snapshotButtonsFrame, text=\"SnapShot\", image=captureIcon, width=140, state=DISABLED,\r\n compound=LEFT, command=lambda: SaveSnapShotWindow(self.root, self.filteredFrame))\r\n self.buttonStop = Button(\r\n snapshotButtonsFrame, text=\"Stop\", image=stopIcon, width=140,\r\n compound=LEFT, command=self.stop_playing)\r\n\r\n # BUTTON LAYOUT\r\n Label(buttonsFrame, text=\"Pick a Source\").grid(\r\n row=0, column=0, pady=10)\r\n self.buttonUseWebCam.grid(row=1, column=0, padx=5, pady=5)\r\n self.buttonUploadVideo.grid(row=1, column=1, padx=5, pady=5)\r\n self.buttonUploadImage.grid(row=1, column=2, padx=5, pady=5)\r\n\r\n Label(buttonsFrame, text=\"Pick a Detection\").grid(\r\n row=2, column=0, pady=10)\r\n self.buttonEyes.grid(row=0, column=0, padx=5, pady=5)\r\n self.buttonFace.grid(row=1, column=0, padx=5, pady=5)\r\n self.buttonSmile.grid(row=0, column=1, padx=5, pady=5)\r\n self.buttonCat.grid(row=1, column=1, padx=5, pady=5)\r\n\r\n Label(buttonsFrame, text=\"Misc\").grid(\r\n row=4, column=0, pady=10)\r\n self.buttonSnapShot.grid(row=0, column=0, padx=5, pady=5)\r\n self.buttonStop.grid(row=0, column=1, padx=5, pady=5)\r\n\r\n # DISPLAY DEFAULT IMAGE UPON LAUNCH OR WHEN STOP BUTTON IS PRESSED\r\n self.labelImage.configure(image=self.defaultFrameImage)\r\n\r\n # CUSTOMIZE THE \"X\" BUTTON OF A WINDOW TO CLOSE IT WITHOUT ERROR\r\n # When the user pressed the X button on a window, this function will suspend all running functions before closing/destroying the window\r\n self.root.protocol(\"WM_DELETE_WINDOW\", lambda: (\r\n self.stop_playing(), self.root.destroy()))\r\n\r\n self.root.mainloop()\r\n\r\n def start_webcam(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # State that the program is using the Web Cam as source\r\n self.isUsingWebCam = True\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n self.buttonUseWebCam.configure(state=DISABLED)\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n # Opens the Web cam\r\n self.webCamCapture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\n\r\n self.cycle_frames()\r\n\r\n def upload_image(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # Spawns a window to choose a file, if a file was not selected the function will stop\r\n filePath = filedialog.askopenfilename(title=\"Select image file\", filetypes=(\r\n (\"jpg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\r\n if filePath == \"\":\r\n return\r\n # Convert / path to \\\\ for Windows to be able to read\r\n self.convertedFilePath = filePath.replace('/', \"\\\\\\\\\")\r\n\r\n # State that the program is using an Image as source\r\n self.isImageSelected = True\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n self.cycle_frames()\r\n\r\n def upload_video(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # Spawns a window to choose a file, if a file was not selected the function will stop\r\n filePath = filedialog.askopenfilename(title=\"Select video file\", filetypes=(\r\n (\"mp4 files\", \"*.mp4\"), (\"all files\", \"*.*\")))\r\n if filePath == \"\":\r\n return\r\n\r\n # Convert / path to \\\\ for Windows to be able to read\r\n convertedFilePath = filePath.replace('/', \"\\\\\\\\\")\r\n\r\n self.video = cv2.VideoCapture(convertedFilePath)\r\n\r\n # State that the program is using a Video as source\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = True\r\n self.isImageSelected = False\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n self.cycle_frames()\r\n\r\n def detect_button_click(self, buttonState, instanceName, button):\r\n if buttonState:\r\n # Sets the button to be Unpressed\r\n setattr(self, instanceName, False)\r\n button.config(relief=RAISED)\r\n\r\n else:\r\n # Sets the button to be Pressed\r\n setattr(self, instanceName, True)\r\n button.config(relief=SUNKEN)\r\n\r\n # Conditional statement for when an Image is the source, every click of the button updates the image\r\n if ((not self.isUsingWebCam) & (not self.isPlayingVideo)):\r\n if self.isImageSelected:\r\n self.cycle_frames()\r\n\r\n def cycle_frames(self):\r\n if self.isPlayingVideo | self.isUsingWebCam:\r\n while True:\r\n ret = 0\r\n if self.isUsingWebCam:\r\n ret, frame = self.webCamCapture.read()\r\n # Flips the webcam output to act like a mirror\r\n frame = cv2.flip(frame, 1)\r\n\r\n if self.isPlayingVideo:\r\n ret, frame = self.video.read()\r\n\r\n if not ret: # Stops the while loop when the video/webcam has no more frames or have been stopped\r\n break\r\n #\r\n self.show_image_on_label(frame)\r\n\r\n if self.isImageSelected:\r\n self.image = cv2.imread(self.convertedFilePath)\r\n self.show_image_on_label(self.image)\r\n\r\n def show_image_on_label(self, frame):\r\n # Apply the boxes and convert the image from BGR to RGB\r\n # This is the variable that will be used when clicking the snapshot button\r\n self.filteredFrame = self.detection_applier(frame)\r\n cv2image = cv2.cvtColor(self.filteredFrame, cv2.COLOR_BGR2RGB)\r\n\r\n # Converts CV2 image to PIL image that tkinter can read\r\n img_update = ImageTk.PhotoImage(Image.fromarray(cv2image))\r\n\r\n # Configure the label to show the PIL image in it\r\n self.labelImage.configure(image=img_update)\r\n self.labelImage.image = img_update\r\n self.labelImage.update()\r\n\r\n def detection_applier(self, frame):\r\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n\r\n # Configs of when drawing a text on the frames/images\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n fontScale = 0.7\r\n fontBold = 2\r\n\r\n faceCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_frontalface_default.xml\")\r\n eyeCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_eye.xml\")\r\n smileCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_smile.xml\")\r\n catFaceCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_frontalcatface.xml\")\r\n\r\n if (self.isDetectFace):\r\n # WHEN A FACE IS DETECTED, ONLY THE EYES AND SMILE IN THE FACE ROI WILL BE BOXED\r\n detectedFaces = faceCascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in detectedFaces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n cv2.putText(frame, 'Face', (x, y), font,\r\n fontScale, (0, 255, 0), fontBold)\r\n\r\n # Region of Interest of the Face\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = frame[y:y+h, x:x+w]\r\n\r\n if self.isDetectEyes:\r\n eyes = eyeCascade.detectMultiScale(roi_gray, 1.3, 25)\r\n for (ex, ey, ew, eh) in eyes:\r\n cv2.rectangle(roi_color, (ex, ey),\r\n (ex+ew, ey+eh), (0, 0, 255), 2)\r\n cv2.putText(roi_color, 'Eyes', (ex, ey),\r\n font, fontScale, (0, 0, 255), fontBold)\r\n\r\n if self.isDetectSmile:\r\n smiles = smileCascade.detectMultiScale(\r\n roi_gray, 1.8, 25)\r\n for (sx, sy, sw, sh) in smiles:\r\n cv2.rectangle(roi_color, (sx, sy),\r\n ((sx + sw), (sy + sh)), (255, 0, 0), 2)\r\n cv2.putText(roi_color, 'Smile', (sx, sy),\r\n font, fontScale, (255, 0, 0), fontBold)\r\n\r\n if (self.isDetectEyes & (not self.isDetectFace)):\r\n\r\n detectedEyes = eyeCascade.detectMultiScale(gray, 1.3, 25)\r\n for (x, y, w, h) in detectedEyes:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n cv2.putText(frame, 'Eyes', (x, y),\r\n font, fontScale, (0, 0, 255), fontBold)\r\n\r\n if (self.isDetectSmile & (not self.isDetectFace)):\r\n\r\n detectedSmiles = smileCascade.detectMultiScale(gray, 1.8, 20)\r\n for (x, y, w, h) in detectedSmiles:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (225, 0, 0), 2)\r\n cv2.putText(frame, 'Smile', (x, y),\r\n font, fontScale, (255, 0, 0), fontBold)\r\n\r\n if self.isDetectCat:\r\n\r\n detectedCats = catFaceCascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in detectedCats:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 102, 255), 2)\r\n cv2.putText(frame, 'Cat', (x, y),\r\n font, fontScale, (0, 102, 255), fontBold)\r\n\r\n return frame\r\n\r\n def stop_playing(self):\r\n # Releases either the webcam of the video source\r\n if self.isUsingWebCam:\r\n self.webCamCapture.release()\r\n if self.isPlayingVideo:\r\n self.video.release()\r\n\r\n # Set the default image on the image canvas\r\n self.labelImage.configure(image=self.defaultFrameImage)\r\n\r\n # Change button states accordingly\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n self.buttonSnapShot.configure(state=DISABLED)\r\n self.buttonUseWebCam.configure(state=NORMAL)\r\n\r\n\r\nclass SaveSnapShotWindow:\r\n def __init__(self, root, filteredFrame) -> None:\r\n self.saveWindow = Toplevel(root)\r\n self.saveWindow.title(\"Save SnapShot\")\r\n self.saveWindow.resizable(False, False)\r\n self.saveWindow.option_add(\"*font\", \"Arial 12\")\r\n\r\n # IMAGE/VIDEO CANVAS\r\n self.imageFrame = Frame(self.saveWindow)\r\n self.labelImage = Label(self.imageFrame)\r\n self.imageFrame.pack()\r\n self.labelImage.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n # SEPARATE FRAMES FOR THE BUTTONS AND TEXT FIELD\r\n changeFileNameFrame = Frame(self.saveWindow)\r\n changeFileNameFrame.pack()\r\n buttonFrame = Frame(self.saveWindow)\r\n buttonFrame.pack()\r\n\r\n # TEXTFIELD FOR THE FILE NAME\r\n self.entryboxSaveAs = Entry(changeFileNameFrame, width=25)\r\n labelFileExtension = Label(changeFileNameFrame, text=\".jpg\")\r\n\r\n # SAVE AND CANCEL BUTTONS\r\n self.buttonSave = Button(\r\n buttonFrame, text=\"Save\", command=self.save, bg='#4764ff', fg='white', width=15, height=1)\r\n self.buttonCancel = Button(\r\n buttonFrame, text=\"Cancel\", command=self.close_window_or_cancel, bg='#3c3c3c', fg='white', width=15, height=1)\r\n\r\n # TEXTFIELD AND BUTTONS LAYOUT\r\n Label(changeFileNameFrame, text=\"Set Filename:\").grid(row=0, column=0)\r\n self.entryboxSaveAs.grid(row=1, column=0)\r\n labelFileExtension.grid(row=1, column=1, sticky=W)\r\n self.buttonSave.grid(row=0, column=1, padx=20, pady=10)\r\n self.buttonCancel.grid(row=0, column=0, padx=20, pady=10)\r\n #\r\n #\r\n # SHOW THE IMAGE ON THE IMAGE CANVAS\r\n self.snapShotImage = filteredFrame\r\n self.show_image_on_label(self.snapShotImage)\r\n\r\n # GETS THE CURRENT TIME AND PLACE IT IN THE TEXTFIELD AS A DEFAULT FILE NAME\r\n self.timeString = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\r\n self.entryboxSaveAs.insert(INSERT, self.timeString)\r\n\r\n # GRABS THE FOCUS ON THIS WINDOW SO THAT THE MAIN WINDOW CANNOT BE PRESSED UNTIL THIS WINDOW IS CLOSED\r\n self.saveWindow.grab_set()\r\n\r\n # MODIFIED THE X BUTTON FOR A SAFE CLOSE\r\n self.saveWindow.protocol(\r\n \"WM_DELETE_WINDOW\", self.close_window_or_cancel)\r\n\r\n def save(self):\r\n # Gets the file name from the textbox\r\n saveAsFileName = self.entryboxSaveAs.get()\r\n\r\n # If the user did not type any name, it will default to the current time\r\n if saveAsFileName == \"\":\r\n saveAsFileName = self.timeString\r\n\r\n # Writes/Saves the image that was passed through the class not the downscale thumbnail/image\r\n cv2.imwrite(f'./snapshots/{saveAsFileName}.jpg', self.snapShotImage)\r\n\r\n self.close_window_or_cancel()\r\n\r\n def close_window_or_cancel(self):\r\n self.saveWindow.grab_release()\r\n # Safely destroy the toplevel without affecting the root window\r\n self.saveWindow.destroy()\r\n\r\n def show_image_on_label(self, frame):\r\n # Apply the boxes and convert the image from BGR to RGB\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n cv2image = self.downscale_image(cv2image)\r\n\r\n # Converts CV2 image to PIL image that tkinter can read\r\n img_update = ImageTk.PhotoImage(Image.fromarray(cv2image))\r\n\r\n # Configure the label to show the PIL image in it\r\n self.labelImage.configure(image=img_update)\r\n self.labelImage.image = img_update\r\n self.labelImage.update()\r\n\r\n def downscale_image(self, image):\r\n scale_percent = 70 # Resize the image to n%\r\n width = int(image.shape[1] * scale_percent / 100)\r\n height = int(image.shape[0] * scale_percent / 100)\r\n dim = (width, height)\r\n\r\n # Return the resized image\r\n return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\r\n\r\n\r\nroot = tk.Tk()\r\nMainGUI(root)\r\n","repo_name":"Mark-A14/haarscade-with-tkinter-gui","sub_path":"finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"17418816337","text":"from django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\n\nfrom product.utils import unique_slug_generator\nfrom product.models.datamodel import Incoming, Outgoing\n\n\n\n@receiver(pre_save, sender=Incoming)\ndef product_pre_save_slug(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = unique_slug_generator(instance)\n \n\n\n@receiver(post_save, sender=Outgoing)\ndef update_incoming(sender, instance, **kwargs):\n product_instance = Incoming.objects.get(pk=instance.product.id)\n print(product_instance.name)\n product_instance.stock = 'Sold'\n \n product_instance.save()\n ","repo_name":"milanalay/AkalaRecondition","sub_path":"product/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"23948928527","text":"import smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\n\r\nsmtp=smtplib.SMTP(\"smtp.gmail.com\",587)\r\nsmtp.ehlo()\r\nsmtp.starttls()\r\nsmtp.login(\"yujunlee7862@gmail.com\",\"dkgur3@@\")\r\n\r\nm=\"yujunlee7862@gmail.com\"\r\ny=\"yujunlee7862@gmail.com\"\r\nsubject=\"hellow\"\r\nmessage=\"dd\"\r\nmsg=MIMEText(message.encode('utf-8'),_subtype='plain',_charset='uft-8')\r\nmsg['subject']=Header(subject.encode('utf-8'),'utf-8')\r\nmsg['From']=m\r\nmsg['To']=y\r\nsmtp.sendmail(m, y, msg.as_string())\r\nsmtp.quit()\r\n","repo_name":"yujunlee12/gmail","sub_path":"ㅇ.py","file_name":"ㅇ.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"18528546394","text":"# Desafio 50:\n# Desenvolva um programa que leia SEIS NÚMEROS INTEIROS\n# e mostre a soma apenas daqueles que foram PARES. Se o\n# valor digitado for ÍMPAR, desconsidere-o.\n\nsoma = 0\ncont = 0\nfor c in range(1, 7):\n num = int(input('Digite o {}⁰ número: '.format(c)))\n if num % 2 == 0:\n soma += num\n cont += 1\nprint('Você informou {} número(s) PARE(S) e a soma foi {}.'.format(cont, soma))\n","repo_name":"wmarenga/Python_Learning","sub_path":"Basic_Python_course_Course_in_video/Mundo2_python3/Exercicio50.py","file_name":"Exercicio50.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"10322470052","text":"from flask import (Blueprint, request)\nfrom . import models\n\nbp = Blueprint('reptile', __name__, url_prefix=\"/reptiles\")\n\n@bp.route('/', methods=['POST', 'GET'])\ndef index():\n # Return Reptiles Index\n if request.method == 'GET':\n reptiles_dict = {\n 'reptiles': []\n }\n # find all reptiles\n reptiles = models.Reptile.query.all()\n for row in reptiles:\n # JSON friendly dict format\n row_dict = {\n 'id': row.id,\n 'common_name': row.common_name,\n 'scientific_name': row.scientific_name,\n 'consevation_status': row.conservation_status,\n 'native_habitat': row.native_habitat,\n 'fun_fact': row.fun_fact\n }\n reptiles_dict['reptiles'].append(row_dict)\n return reptiles_dict\n\n # Add New Reptile\n elif request.method == 'POST':\n new_reptile = models.Reptile(\n common_name = request.form['common_name'],\n scientific_name = request.form['scientific_name'],\n conservation_status = request.form['conservation_status'],\n native_habitat = request.form['native_habitat'],\n fun_fact = request.form['fun_fact']\n )\n # JSON friendly dict format\n new_reptile_dict = {\n 'common_name': request.form['common_name'],\n 'scientific_name': request.form['scientific_name'],\n 'conservation_status': request.form['conservation_status'],\n 'native_habitat': request.form['native_habitat'],\n 'fun_fact': request.form['fun_fact']\n }\n # Add New Reptile to Database\n models.db.session.add(new_reptile)\n models.db.session.commit()\n\n # Print dictionary object on console and return on postman\n print(new_reptile_dict)\n return(new_reptile_dict)\n\n\n@bp.route('/')\ndef show(id):\n reptile = models.Reptile.query.filter_by(id=id).first()\n reptile_dict = {\n 'common_name': reptile.common_name,\n 'scientific_name': reptile.scientific_name,\n 'conservation_status': reptile.conservation_status,\n 'native_habitat': reptile.native_habitat,\n 'fun_fact': reptile.fun_fact\n }\n return reptile_dict","repo_name":"skim1127/Ball-Py-API","sub_path":"ballpy/reptile.py","file_name":"reptile.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6812576739","text":"import dash\nfrom dash import html, dcc\nimport dash_bootstrap_components as dbc\n\ndash.register_page(__name__, path='/')\n\n\nselect_existing = dbc.Col(\n html.Div(\n [\n html.H2(\"Select existing dataset\"),\n html.Hr(className=\"my-2\"),\n html.P(\n \"Use a previously onboarded dataset to run the topic modeling pipeline.\"\n ),\n dbc.Button(\"Go\", color=\"dark\", outline=True, href='/tm_data_selection'),\n ],\n className=\"h-100 p-4 m-1 border rounded-3\",\n ),\n md=6,\n)\n\nonboard_new = dbc.Col(\n html.Div(\n [\n html.H2(\"Onboard new data\"),\n html.Hr(className=\"my-2\"),\n html.P(\n \"Add a new corpus to the data registry.\"\n ),\n dbc.Button(\"Go\", color=\"dark\", outline=True, href='/')\n \n ],\n className=\"h-100 p-4 m-1 border rounded-3\",\n ),\n md=6,\n)\n\nhome_selection = dbc.Row(\n [select_existing, onboard_new],\n className=\"align-items-md-stretch\",\n style={\"margin-left\":\"3%\",\"margin-right\":\"3%\" }\n)\n\nlayout = html.Div(children=[\n home_selection\n])\n\n","repo_name":"DHARPA-Project/kiara_plugin.playground","sub_path":"examples/apps/dash/pages/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"43671365687","text":"import string\nimport zipfile\nimport random\n\n\ndef check_sum():\n with open('rows.txt') as file:\n lines = file.readlines()\n sum_num = 0\n for line in lines:\n row = line.split('\\t')\n row = [int(i) for i in row]\n diff = max(row) - min(row)\n sum_num = sum_num + diff\n print(sum_num)\n return sum_num\n\n\ncheck_sum()\n\n\ndef read_zip_file():\n files = zipfile.ZipFile('zadanie_1_words.zip')\n\n for txt in files.infolist():\n single_file = files.open(txt)\n line_list = str(single_file.readlines())\n\n line_list = line_list.lower()\n print(line_list)\n signs = string.ascii_lowercase\n for sign in signs:\n amount_of_sign = line_list.count(sign)\n print(sign, amount_of_sign)\n\n\nread_zip_file()\n\n\ndef rock_paper_scissors():\n choice = ('r', 'p', 's')\n user_choice = 0\n comp_won = 0\n user_won = 0\n\n while user_choice != 'no':\n user_choice = (input('Choose a play sign: (R)ock,(P)aper, (S)cissors or (No) if you want to close the game: '))\n user_choice = user_choice.lower()\n if user_choice == 'no':\n break\n\n comp_choice = random.choice(choice)\n print(user_choice)\n print(comp_choice)\n if user_choice == 'r' and comp_choice == 's' or user_choice == 's' and comp_choice == 'p' or user_choice == 'p' and comp_choice == 'r':\n print('+' + '-' * 10 + '+')\n print('| ' + 'You won!' + ' |')\n print('+' + '-' * 10 + '+')\n user_won += 1\n print('Current result: You {}:{} Computer'.format(user_won, comp_won))\n\n elif user_choice == 'r' and comp_choice == 'r' or user_choice == 's' and comp_choice == 's' or user_choice == 'p' and comp_choice == 'p':\n print('+' + '-' * 7 + '+')\n print('| ' + 'Draw!' + ' |')\n print('+' + '-' * 7 + '+')\n\n elif user_choice == 's' and comp_choice == 'r' or user_choice == 'p' and comp_choice == 's' or user_choice == 'r' and comp_choice == 'p':\n print('+' + '-' * 15 + '+')\n print('| ' + 'Computer won!' + ' |')\n print('+' + '-' * 15 + '+')\n comp_won += 1\n print('Current result: You {}:{} Computer'.format(user_won, comp_won))\n\n else:\n print('You have chose a wrong sign. Try again.')\n\n play_again = input('Do you want to play again? Write \"yes\" or \"no\".')\n play_again = play_again.lower()\n if play_again == 'yes' or play_again == 'y':\n print('=' * 80)\n elif play_again == 'no' or play_again == 'n':\n break\n\n\nrock_paper_scissors()\n\n\n\n\n\n","repo_name":"DariaBe/Prework_PfW","sub_path":"prework_isa.py","file_name":"prework_isa.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"22170375995","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\n# create an emtry graph\ng = nx.Graph()\n\n# ----------------- adding nodes to graph -----------------\n# adding just one node (เพิ่ม 1 โหนด)\ng.add_node('a')\n# a list of nodes (เพิ่มโหนดแบบเป็นลิสต์, หลายโหนดทีเดียว)\ng.add_nodes_from(['b', 'c'])\ndemoNodes = ['d', 'g']\ng.add_nodes_from(demoNodes)\n# ---------------------------------------------------\n\n# ----------------- adding Edges to graph -----------------\n# จับคู่ edges\ng.add_edge(1, 2)\nedgeNo1 = ('x', 'z') # type : tuple ()\nedgeNo2 = ('p', 'q') # type : tuple ()\nedgeNo3 = [('a', 'c'), ('c', 'd'), ('a', 1), (1, 'd'), ('a', 2)] # type : list []\n# print(type(edgeNo3))\ng.add_edge(*edgeNo1)\ng.add_edge(*edgeNo2)\ng.add_edges_from(edgeNo3)\n# ---------------------------------------------------\nprint(f'nodes of graph : {g.nodes()}')\nprint(f'edges of graph : {g.edges()}')\nnx.draw(g, with_labels = True)\n\n# Save File\nplt.savefig('path_graph.png')\nplt.show()","repo_name":"Nattawut-CS/AI-Lab01","sub_path":"introduce_networkx.py","file_name":"introduce_networkx.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"15819560228","text":"# coding=utf-8\nr\"\"\"Evaluation on detecting key events using a RNN.\n\"\"\"\nimport math\nimport torch\nimport numpy as np\nimport sklearn\n\nfrom datasets.dataset_splits import DATASET_TO_NUM_CLASSES\nimport utils.logging as logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass VectorRegression(sklearn.base.BaseEstimator):\n \"\"\"Class to perform regression on multiple outputs.\"\"\"\n\n def __init__(self, estimator):\n self.estimator = estimator\n\n def fit(self, x, y):\n _, m = y.shape\n # Fit a separate regressor for each column of y\n self.estimators_ = [sklearn.base.clone(self.estimator).fit(x, y[:, i])\n for i in range(m)]\n return self\n\n def predict(self, x):\n # Join regressors' predictions\n res = [est.predict(x)[:, np.newaxis] for est in self.estimators_]\n return np.hstack(res)\n\n def score(self, x, y):\n # Join regressors' scores\n res = [est.score(x, y[:, i]) for i, est in enumerate(self.estimators_)]\n return np.mean(res)\n\n\ndef fit_model(train_embs, train_labels, val_embs, val_labels,\n global_step, num_classes, prefix, report_error=False):\n \"\"\"Linear Regression to regress to fraction completed.\"\"\"\n\n train_embs = np.concatenate(train_embs, axis=0)\n train_labels = np.concatenate(train_labels, axis=0)\n val_embs = np.concatenate(val_embs, axis=0)\n val_labels = np.concatenate(val_labels, axis=0)\n\n lin_model = VectorRegression(sklearn.linear_model.LinearRegression())\n lin_model.fit(train_embs, train_labels)\n\n train_score = lin_model.score(train_embs, train_labels)\n val_score = lin_model.score(val_embs, val_labels)\n\n return lin_model, train_score, val_score\n\ndef regression_labels_for_class(labels, class_idx):\n # Assumes labels are ordered. Find the last occurrence of particular class.\n transition_frame = np.argwhere(labels == class_idx)[-1, 0]\n return (np.arange(float(len(labels))) - transition_frame) / len(labels)\n\n\ndef get_regression_labels(class_labels, num_classes):\n regression_labels = []\n for i in range(num_classes - 1):\n regression_labels.append(regression_labels_for_class(class_labels, i))\n return np.stack(regression_labels, axis=1)\n\n\ndef get_targets_from_labels(all_class_labels, num_classes):\n all_regression_labels = []\n for class_labels in all_class_labels:\n all_regression_labels.append(get_regression_labels(class_labels,\n num_classes))\n return all_regression_labels\n\n\nclass EventCompletion(object):\n \"\"\"Predict event completion using linear regression.\"\"\"\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.downstream_task = True\n\n def evaluate(self, dataset, cur_epoch, summary_writer, visualize=True):\n \"\"\"Labeled evaluation.\"\"\"\n fractions = self.cfg.EVAL.CLASSIFICATION_FRACTIONS\n\n train_embs = dataset['train_dataset']['embs']\n val_embs = dataset['val_dataset']['embs']\n num_classes = DATASET_TO_NUM_CLASSES[dataset['name']]\n\n if len(train_embs) == 0 or len(val_embs) == 0:\n raise ValueError('All embeddings are NAN. Something is wrong with model.')\n\n val_labels = get_targets_from_labels(dataset['val_dataset']['labels'],\n num_classes)\n\n num_samples = len(dataset['train_dataset']['embs'])\n val_scores = []\n for fraction in fractions:\n num_samples_used = max(1, int(fraction * num_samples))\n train_embs = dataset['train_dataset']['embs'][:num_samples_used]\n train_labels = get_targets_from_labels(\n dataset['train_dataset']['labels'][:num_samples_used], num_classes)\n model, train_score, val_score = fit_model(train_embs, train_labels, val_embs, val_labels,\n cur_epoch, num_classes, '%s_%s' % (dataset['name'], str(fraction)))\n prefix = '%s_%s' % (dataset['name'], str(fraction))\n logger.info('[Global step: {}] Event Completion {} Fraction Train '\n 'Score: {:.3f},'.format(cur_epoch, prefix, train_score))\n logger.info('[Global step: {}] Event Completion {} Fraction Val '\n 'Score: {:.3f},'.format(cur_epoch, prefix, val_score))\n summary_writer.add_scalar('event_completion/train_%s_score' % prefix,\n train_score, cur_epoch)\n summary_writer.add_scalar('event_completion/val_%s_score' % prefix,\n val_score, cur_epoch)\n val_scores.append(val_score)\n \n return val_scores[-1]\n","repo_name":"minghchen/CARL_code","sub_path":"evaluation/event_completion.py","file_name":"event_completion.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"}
+{"seq_id":"41033099761","text":"# 给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和\n# 。假定每组输入只存在唯一答案。 \n# \n# \n# \n# 示例: \n# \n# 输入:nums = [-1,2,1,-4], target = 1\n# 输出:2\n# 解释:与 target 最接近的和是 2 (-1 + 2 + 1 = 2) 。\n# \n# \n# \n# \n# 提示: \n# \n# \n# 3 <= nums.length <= 10^3 \n# -10^3 <= nums[i] <= 10^3 \n# -10^4 <= target <= 10^4 \n# \n# Related Topics 数组 双指针 \n# 👍 645 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def threeSumClosest(self, nums, target):\n res = sum(nums[:2]) + nums[-1]\n nums.sort()\n for i in range(len(nums)-2):\n l, r = i + 1, len(nums) - 1\n while l < r:\n temp = nums[i] + nums[l] + nums[r]\n if abs(temp - target) < abs(res - target):\n res = temp\n if temp == target:\n return target\n if temp > target:\n r -= 1\n else:\n l += 1\n return res\n# leetcode submit region end(Prohibit modification and deletion)\n\n\na = Solution()\nprint(a.threeSumClosest([-1,2,1,-4],1))\n","repo_name":"lishx-archive/Leetcode","sub_path":"leetcode/editor/cn/[16]最接近的三数之和.py","file_name":"[16]最接近的三数之和.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"13624603683","text":"import os as _os\n\n####################################################################################################################################\n\n# get version\n\nclass Version:\n \"\"\"\n\n .. py:class:: Version\n\n This is the Version class for generating objects\n that contain the methods for getting and dumping\n the python-interface or kernel versions of the\n ParaMonte library installation on the system.\n\n **Parameters**\n\n versionPath\n A string containing the path to either the\n ParaMonte kernel or interface version file.\n\n versionType\n A string containing the type of the version\n file. It can be one of the following values:\n\n \"interface\"\n implying the Python-interface version\n number of the ParaMonte library.\n\n \"kernel\"\n implying the kernel-routines version\n number of the ParaMonte library.\n\n \"\"\"\n\n def __init__(self,versionPath,versionType):\n self._versionList = [\"interface\",\"kernel\"]\n self._versionPath = versionPath\n self._versionType = versionType\n self._versionSave = None\n self._checkVersionType()\n\n def get(self):\n \"\"\"\n\n .. py:method:: get(self)\n\n Get the Python-interface or kernel version of the\n ParaMonte library, in verbose format.\n\n **Parameters**\n\n None\n\n **Returns**\n\n None\n\n \"\"\"\n return \"ParaMonte Python \" + self._versionType.capitalize() + \" Version \" + self.dump()\n\n def dump(self):\n \"\"\"\n\n .. py:method:: dump(self)\n\n Dump **only the version number** of either\n the Python-interface or kernel of the\n ParaMonte library.\n\n **Parameters**\n\n None\n\n **Returns**\n\n None\n\n \"\"\"\n for versionType in self._versionList:\n if versionType==self._versionType:\n if self._versionSave is None:\n versionFileName = \".VERSION_\" + versionType.upper()\n versionFilePath = _os.path.join(self._versionPath, versionFileName)\n try:\n with open(versionFilePath,\"r\") as versionFile:\n self._versionSave = versionFile.readline().strip(\"\\n\")\n except:\n self._versionSave = \"UNKNOWN\"\n return self._versionSave\n else:\n return self._versionSave\n\n def _checkVersionType(self):\n versionTypeNotFound = True\n for versionType in self._versionList:\n if versionType==self._versionType:\n versionTypeNotFound = False\n break\n if versionTypeNotFound:\n _sys.exit(\"The input versionType is not a valid recognized version type. Possible values: \" + \" \".join(versionList))\n","repo_name":"rahuldwivedi01/paramonte","sub_path":"src/interface/Python/paramonte/_Version.py","file_name":"_Version.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"4972285930","text":"\"\"\"png_image_test.py\"\"\"\n\nimport unittest\nimport sys\nimport os\nsys.path.insert(0,\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n os.path.join('..', '..', '..')\n )\n)\n\nimport fpdf\nimport test\nfrom PIL import Image\n\nfrom test.utilities import relative_path_to, \\\n set_doc_date_0, \\\n calculate_hash_of_file\n\ndef goodFiles():\n not_supported = [\n \"e59ec0cfb8ab64558099543dc19f8378.png\", # Interlacing not supported:\n \"6c853ed9dacd5716bc54eb59cec30889.png\", # 16-bit depth not supported:\n \"ac6343a98f8edabfcc6e536dd75aacb0.png\", # Interlacing not supported:\n \"93e6127b9c4e7a99459c558b81d31bc5.png\", # Interlacing not supported:\n \"18f9baf3834980f4b80a3e82ad45be48.png\", # Interlacing not supported:\n \"51a4d21670dc8dfa8ffc9e54afd62f5f.png\", # Interlacing not supported:\n ]\n\n images = [relative_path_to(f) for f\n in os.listdir(relative_path_to('.'))\n if f.endswith(\".png\")\n and os.path.basename(f) not in not_supported]\n images.sort()\n rtn = []\n for image in images:\n if os.path.basename(image) in not_supported:\n pass\n else:\n rtn.append(image)\n return rtn\n\n\nclass InsertPNGSuiteFiles(unittest.TestCase):\n\n def test_insert_png_files(self):\n pdf = fpdf.FPDF(unit = 'pt')\n pdf.compress = False\n\n for image in goodFiles():\n pdf.add_page()\n pdf.image(\n image, x = 0, y = 0, w = 0, h = 0,\n type = '', link = None)\n set_doc_date_0(pdf)\n outfile = relative_path_to('insert_images_png_test_files.pdf')\n pdf.output(outfile, 'F')\n # print(calculate_hash_of_file(outfile))\n\n test_hash = calculate_hash_of_file(outfile)\n # ordered the images for reproduceability\n self.assertEqual(test_hash, \"0085260bea512b9394ce1502b196240a\")\n\n # self.assertEqual(test_hash, \"4f65582566414202a12ed86134de10a7\")\n os.unlink(outfile)\n\n def test_insert_png_files_From_PIL(self):\n pdf = fpdf.FPDF(unit = 'pt')\n pdf.compress = False\n for image in goodFiles():\n pdf.add_page()\n im = Image.open(image)\n pdf.image(\n im, x = 0, y = 0, w = 0, h = 0,\n type = '', link = None)\n\n set_doc_date_0(pdf)\n outfile = relative_path_to('insert_images_png_test_files.pdf')\n pdf.output(outfile, 'F')\n # print(calculate_hash_of_file(outfile))\n\n test_hash = calculate_hash_of_file(outfile)\n # ordered the images for reproduceability\n self.assertEqual(test_hash, \"3cfa70ad39cd595562b726fc16b8510d\")\n\n # self.assertEqual(test_hash, \"4f65582566414202a12ed86134de10a7\")\n os.unlink(outfile)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"inprojectspl/pyfpdf","sub_path":"test/image/png_images/png_file_test.py","file_name":"png_file_test.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73897238227","text":"from collections import *\n\nfactorial = [1] * 600\n\na = 1\nfor i in range(1, 500 + 1):\n a *= i\n factorial[i] = a\n\nf = open('input.txt')\nfor line in f:\n n = int(line)\n if n != 0:\n print(line.strip() + '!' + ' --')\n print(Counter(str(factorial[n])))\n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 324 Factorial frequencies/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"}
+{"seq_id":"15507353449","text":"from sklearn.preprocessing import LabelEncoder, StandardScaler, Normalizer\nfrom sklearn.pipeline import make_pipeline\nimport pandas as pd\nimport time\n\ndef label_encode_data(df, columns):\n result = df.copy()\n \n label_encoder = LabelEncoder()\n \n for col in columns:\n result[col] = label_encoder.fit_transform(df[col])\n\n return result\n\ndef setup_pipeline(pipeline):\n return make_pipeline(*pipeline)\n\ndef normalize_data_pipeline(df, pipeline):\n transformer = setup_pipeline(pipeline)\n result = transformer.fit_transform(df)\n return pd.DataFrame(result, columns = df.columns)\n\ndef export_to_kaggle_csv(df, predictions):\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n output_file_name = \"prediction_\" + timestr + \".csv\"\n\n my_submission = pd.DataFrame({'id': df[\"id\"], 'price': predictions})\n my_submission.to_csv('output/' + output_file_name, index=False)\n print(f\"Exportados los datos a: output/'{output_file_name}'\")\n ","repo_name":"rfminguez/w7-diamond_classification","sub_path":"src/transform_toolbox.py","file_name":"transform_toolbox.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"28908624101","text":"#Data processing methodology procurred from: https://towardsdatascience.com/topic-modeling-and-latent-dirichlet-allocation-in-python-9bf156893c24\n\nimport json\nimport contractions\nimport gensim\nimport pickle\nimport re\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.stem.porter import *\nimport numpy as np\nimport nltk\n\ndef lemmatize_stemming(text):\n stemmer = SnowballStemmer(\"english\")\n return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))\n\n\ndef process_body(body, edited=False):\n ans = re.sub(r\"http\\S+\", \"\", body) # removes any links to other reddit posts\n\n# want to remove summary of posts, easiest to remove end ones\n tldr_index = ans.lower().find(\"tl;dr\")\n if tldr_index > (len(ans)//2):\n ans = ans[:tldr_index]\n# now check if the post has been edited\n# Most edit: remarks are thanking the commentors, which is not necessary for this project\n elif edited:\n edit_index = ans.lower().find(\"edit:\")\n if edit_index > (len(ans)//2):\n ans = ans[:edit_index]\n# TODO: Look into removing tl;dr's that come at the beginning of posts\n# TODO: Same algorithm for detecting when people put Edit: beginning\n\n# TODO: Expand contractions\n ans = contractions.fix(ans)\n# Tokenizing the body\n resulting_process = []\n# This applies lemmatization and stemming of the tokens\n for tkn in gensim.utils.simple_preprocess(ans):\n if tkn not in gensim.parsing.preprocessing.STOPWORDS and len(tkn) > 3:\n resulting_process.append(lemmatize_stemming(tkn))\n\n return resulting_process\n\n\n# Need to do this check so other files can use process_body\nif __name__ == '__main__':\n np.random.seed(2018)\n nltk.download('wordnet')\n temp_index = 0\n model_size = [5, 10, 15, 20]\n data = []\n\n with open('Raw Data/data.json') as fp:\n data = json.load(fp)\n\n# Preprocess all the bodies\n list_of_bodies = []\n pre_data = {'submissions': []}\n for x in range(len(data['submissions'])):\n data['submissions'][x]['body'] = process_body(data['submissions'][x]['body'], data['submissions'][x]['edited'])\n list_of_bodies.append(data['submissions'][x]['body'])\n\n# Creating the dictionary\n dictionary = gensim.corpora.Dictionary(list_of_bodies)\n\n# Getting some statistics on the generated dictionary\n total = 0\n count = 0\n for k, v in dictionary.iteritems():\n total += k\n count += 1\n print(\"Total number of words == \" + str(total))\n print(\"Number of unique tokens == \" + str(count))\n\n# making bag of words out of all the submission bodies\n bag_of_bow = [dictionary.doc2bow(sub) for sub in list_of_bodies]\n\n for topic_num in model_size:\n print('\\n\\n\\n*******TRAINING FOR TOPIC SIZE = ' + str(topic_num) + '****************\\n')\n print('Starting lda training ...')\n lda_model = gensim.models.LdaModel(bag_of_bow, num_topics=topic_num, id2word=dictionary, passes=2)\n print('Finished training!')\n\n # Printing the words from each topic\n for idx, topic in lda_model.print_topics(-1,10):\n print('Topic: {} \\nWords: {}'.format(idx, topic))\n\n\n # Saving all the stuff i might need to save like model, dictionary, and processed bodies\n lda_model.save('Raw Data/ldaModel'+str(topic_num))\n\n with open('Raw Data/dictionary', 'ab') as fp:\n pickle.dump(dictionary, fp)\n\n with open('Raw Data/preData.json', 'w') as fp:\n fp.seek(0)\n fp.truncate()\n json.dump(data, fp)\n\n","repo_name":"terrie9876/LoveAdviceBot","sub_path":"ModelMaker.py","file_name":"ModelMaker.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"16857098673","text":"import configparser\nimport os\nimport smtplib \nimport logging\nfrom email.mime import multipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTPException\n\n#创建日志\nlog_format = '%(filename)s %(asctime)s %(levelname)s: %(message)s' \nlogging.basicConfig(format = log_format,level = logging.ERROR,filename = 'error.log',filemode = 'w')\nlogger = logging.getLogger(__name__)\n \n#创建配置文件解析对象\nconfig = configparser.ConfigParser()\n#加载配置文件\nconfig.read(\"MailConfiguration.ini\")\n#获取参数\nsender = config.get('Mail', 'sender')\nreceiver = config.get('Mail', 'receiver') \nfiles_path = config.get('File', 'path')\nmail_host = config.get('Mail', 'mail_host')\nmail_port = config.get('Mail', 'mail_port')\nmail_pass = config.get('Mail', 'mail_pass')\n\nmsg = multipart.MIMEMultipart() \nmsg['from'] = sender\nmsg['to'] = receiver\n#标题\nmsg['subject'] = config.get('Title','title') \n#正文\ncontent = MIMEText(config.get('Body', 'body'))\nmsg.attach(content)\nsmtp = None\n\ntry:\n #添加多个附件\n for file_path in files_path.split(','):\n basename = os.path.basename(file_path)\n f = open(file_path,'rb')\n \n att = MIMEText(f.read(),'base64','utf-8') \n att[\"Content-Type\"] = 'application/octet-stream'\n att.add_header('Content-Disposition', 'attachment',filename=('gbk', '', basename))\n msg.attach(att)\n \n smtp = smtplib.SMTP() #登录邮箱服务器\n smtp.connect(mail_host,mail_port) #连接邮箱服务器\n smtp.login(sender,mail_pass) #开始登录\n smtp.sendmail(sender,receiver,msg.as_string()) #发送邮件\nexcept IOError as e:\n logger.error(e)\nexcept SMTPException as e:\n logger.error(e)\nexcept Exception as e:\n logger.error(e)\nfinally:\n if smtp:\n smtp.close()\n\n","repo_name":"sunzhengbo/learning","sub_path":"Python/SendMail/SendMail.py","file_name":"SendMail.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"4773142539","text":"import re\nimport nltk\nnltk.download('punkt')\n\n# Deals with cleaning text for now\nimport string\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\nfrom nltk.tokenize import word_tokenize\n\ndef clean_text(data):\n data = \"\".join([word for word in data if word not in string.punctuation])\n data = word_tokenize(data)\n\n data = [word for word in data if word not in stopwords.words('english')]\n return data\n\nt1 = 'i left with my bouquet of red and yellow tulips under my arm feeling slightly more optimistic than when i arrived'\nt2 = 'i was feeling a little vain when i did this one'\nt3 = 'i cant walk into a shop anywhere where i do not feel uncomfortable'\n\ntext = clean_text(t1)\ntext_train = clean_text(t2)\ntext_test = clean_text(t3)\n\nprint(text)\nprint(text_train)\nprint(text_test)\n# tokenizer = Tokenizer()\n# tokenizer.fit_on_texts(texts)\n# sequence_train = tokenizer.texts_to_sequences(texts_train)\n# sequence_test = tokenizer.texts_to_sequences(texts_test)\n","repo_name":"Maria-Gomes/CSE400-NLP","sub_path":"basic_text_cleaning.py","file_name":"basic_text_cleaning.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"40392704989","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport pandas as pandas\nimport csv\n\n\ndata = pandas.read_csv(\"/ST-Steiner-env/ST-Steiner/clusters/cluster_n3_05.txt\")\nG = nx.Graph()\n\nfor i in range(len(data)):\n G.add_edge(data.iloc[i][0].split(\"\\t\")[1], data.iloc[i][0].split(\"\\t\")[0])\n\nplt.figure(figsize=(30,30))\n\n\ngraph_pos = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, graph_pos, node_size=10, node_color='blue', alpha=0.5)\nnx.draw_networkx_edges(G, graph_pos, edge_size=6)\nnx.draw_networkx_labels(G, graph_pos, font_size=6, font_family='sans-serif')\nplt.savefig(\"plot.png\", dpi=1000)\n\nplt.savefig(\"plot.pdf\")\nplt.show()\n\n","repo_name":"keremayoz/STSteinerSolver","sub_path":"solver/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"30974724327","text":"import tkinter as tk\n\n\nclass APP:\n def __init__(self,master):\n frame = tk.Frame(master)\n frame.pack(side=tk.LEFT,padx=10,pady=10)\n #------在这里用LEFT设置他出现的位置\n\n #----Button 设置一个按钮 frame按钮显示什么字体 bg背景色什么色 fg字体什么颜色, command =他就是如果点击他会出现执行哪些方法\n self.hi_there = tk.Button(frame,text = \"打招呼\",bg=\"black\",fg=\"white\",command=self.say_hi)\n self.hi_there.pack()\n\n def say_hi(self):\n print(\"大家好啊啊啊啊啊啊\")\n\n\n\n\nroot = tk.Tk()\napp = APP(root)\n\n\nroot.mainloop()\n","repo_name":"jiangfeng123/pygame","sub_path":"每日任务/爬虫的自我修养/gui的最终选择 tkinter/tk1.py","file_name":"tk1.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"73454688786","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import tree\nfrom matplotlib import pyplot as plt\nplt.style.use('ggplot')\nfrom sklearn.tree import DecisionTreeClassifier\nimport sklearn.metrics as metrics\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nimport random as random\nfrom sklearn.ensemble import RandomForestClassifier \n\n\ndf_train = pd.read_csv(\"glass.test\", skiprows=1, header=None)\n# df_test[9]=LabelEncoder().fit_transform(df_test[9].values)\n\ndf = pd.read_csv(\"glass.data\", skiprows=1, header=None)\n# df[9]=LabelEncoder().fit_transform(df[9].values)\n\n\ndef bagging(df_train,df_test, sampleTimes, trainTimes):\n result=pd.DataFrame(data=0,index=range(0,len(df_test)),columns=df_test[df_test.columns[-1]].unique())\n result_entropy=pd.DataFrame(data=0,index=range(0,len(df_test)),columns=df_test[df_test.columns[-1]].unique())\n vote_result=[]\n vote_result_entropy=[]\n for i in range(0, trainTimes):\n df_temp = df_train.iloc[0:1, :]\n # 随机采样 \n for j in range(0, sampleTimes):\n temp = random.randint(0, len(df_train)-1)\n df_temp = df_temp.append(df_train.loc[temp:temp], ignore_index=True)\n x_train=df_temp.iloc[:,:9]\n y_train=df_temp.iloc[:,9:]\n #创建弱训练器并训练\n clf = tree.DecisionTreeClassifier(random_state=42)\n clf_entropy = tree.DecisionTreeClassifier(criterion='entropy',random_state=42)\n clf.fit(x_train,y_train)\n clf_entropy.fit(x_train,y_train)\n x_test=df_test.iloc[:,:9]\n #获得单次训练器的结果\n result_temp=clf.predict(x_test)\n result_temp_entropy=clf_entropy.predict(x_test)\n #存储结果用于投票\n count=0\n for item in result_temp:\n result[item][count]+=1\n count+=1\n count=0\n for item in result_temp_entropy:\n result_entropy[item][count]+=1\n count+=1\n #开始投票\n lists=list(result.columns)\n for i in range(0,len(df_test)):\n max=0\n temp=0\n for j in range(0,len(result.columns)):\n if max self.number_sub:\n raise (f\"Not exist the subset {self.sub_id}\")\n\n # Training / validation set\n trainset = MnistFederatedDM.mnist_train\n rows_by_sub = floor(len(trainset) / self.number_sub)\n tr_subset = Subset(\n trainset, range(self.sub_id * rows_by_sub, (self.sub_id + 1) * rows_by_sub)\n )\n mnist_train, mnist_val = random_split(\n tr_subset,\n [\n round(len(tr_subset) * (1 - self.val_percent)),\n round(len(tr_subset) * self.val_percent),\n ],\n )\n\n # Test set\n testset = MnistFederatedDM.mnist_val\n rows_by_sub = floor(len(testset) / self.number_sub)\n te_subset = Subset(\n testset, range(self.sub_id * rows_by_sub, (self.sub_id + 1) * rows_by_sub)\n )\n\n if len(testset) < self.number_sub:\n raise (\"Too much partitions\")\n\n # DataLoaders\n self.train_loader = DataLoader(\n mnist_train,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n self.val_loader = DataLoader(\n mnist_val,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n self.test_loader = DataLoader(\n te_subset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n # print(f\"Train: {len(mnist_train)} Val:{len(mnist_val)} Test:{len(te_subset)}\")\n\n def train_dataloader(self):\n \"\"\" \"\"\"\n return self.train_loader\n\n def val_dataloader(self):\n \"\"\" \"\"\"\n return self.val_loader\n\n def test_dataloader(self):\n \"\"\" \"\"\"\n return self.test_loader\n","repo_name":"pguijas/p2pfl","sub_path":"p2pfl/learning/pytorch/mnist_examples/mnistfederated_dm.py","file_name":"mnistfederated_dm.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"}
+{"seq_id":"37070506571","text":"\nfrom typing import Optional\nimport logging\nfrom functools import partial\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\nfrom torchvision import transforms\n\nfrom galaxy_datasets.pytorch import galaxy_dataset\nfrom galaxy_datasets.transforms import default_transforms\n\n\n# https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html\nclass GalaxyDataModule(pl.LightningDataModule):\n # takes generic catalogs (which are already downloaded and happy),\n # splits if needed, and creates generic datasets->dataloaders etc\n # easy to make dataset-specific default transforms if desired\n def __init__(\n self,\n label_cols,\n # provide full catalog for automatic split, or...\n catalog=None,\n train_fraction=0.7,\n val_fraction=0.1,\n test_fraction=0.2,\n # provide train/val/test catalogs for your own previous split\n train_catalog=None,\n val_catalog=None,\n test_catalog=None,\n predict_catalog=None,\n # augmentation params (sensible supervised defaults)\n greyscale=True,\n # album=False, # now True always\n crop_scale_bounds=(0.7, 0.8),\n crop_ratio_bounds=(0.9, 1.1),\n resize_after_crop=224,\n custom_albumentation_transform=None, # will override the settings above\n # hardware params\n batch_size=256, # careful - will affect final performance\n use_memory=False, # deprecated\n num_workers=4,\n prefetch_factor=4,\n seed=42\n ):\n super().__init__()\n\n if catalog is not None: # catalog provided, should not also provide explicit split catalogs\n assert train_catalog is None\n assert val_catalog is None\n assert test_catalog is None\n else: # catalog not provided, must provide explicit split catalogs - at least one\n assert (train_catalog is not None) or (val_catalog is not None) or (test_catalog is not None) or (predict_catalog is not None)\n # see setup() for how having only some explicit catalogs is handled\n\n self.label_cols = label_cols\n\n self.catalog = catalog\n self.train_catalog = train_catalog\n self.val_catalog = val_catalog\n self.test_catalog = test_catalog\n self.predict_catalog = predict_catalog\n\n self.batch_size = batch_size\n\n self.use_memory = use_memory\n if self.use_memory:\n raise NotImplementedError\n\n self.num_workers = num_workers\n self.seed = seed\n\n assert np.isclose(train_fraction + val_fraction + test_fraction, 1.)\n self.train_fraction = train_fraction\n self.val_fraction = val_fraction\n self.test_fraction = test_fraction\n\n self.prefetch_factor = prefetch_factor\n self.dataloader_timeout = 600 # seconds aka 10 mins\n\n logging.info('Num workers: {}'.format(self.num_workers))\n logging.info('Prefetch factor: {}'.format(self.prefetch_factor))\n\n\n if custom_albumentation_transform is not None:\n self.custom_albumentation_transform = custom_albumentation_transform\n logging.info('Using custom albumentations transform for augmentations')\n else:\n self.resize_after_crop = resize_after_crop\n self.crop_scale_bounds = crop_scale_bounds\n self.crop_ratio_bounds = crop_ratio_bounds\n self.greyscale = greyscale\n self.custom_albumentation_transform = None\n\n logging.info('Using albumentations for augmentations')\n self.transform_with_album()\n\n def transform_with_torchvision(self):\n raise NotImplementedError('Deprecated in favor of albumentations')\n\n def transform_with_album(self):\n\n if self.custom_albumentation_transform is not None:\n # should be a Compose() object, TODO assert\n transforms_to_apply = self.custom_albumentation_transform\n else:\n # gives a transforms = Compose() object\n transforms_to_apply = default_transforms(\n crop_scale_bounds=self.crop_scale_bounds,\n crop_ratio_bounds=self.crop_ratio_bounds,\n resize_after_crop=self.resize_after_crop,\n pytorch_greyscale=self.greyscale\n )\n \n # applies that transforms object\n # albumentations expects np array, and returns dict keyed by \"image\"\n # transpose changes from BHWC (numpy/TF style) to BCHW (torch style) \n # cannot use a lambda or define here because must be pickleable for multi-gpu\n self.transform = partial(do_transform, transforms_to_apply=transforms_to_apply)\n\n # only called on main process\n def prepare_data(self):\n pass # could include some basic checks\n\n # called on every gpu\n\n def setup(self, stage: Optional[str] = None):\n\n self.specify_catalogs(stage)\n\n # Assign train/val datasets for use in dataloaders\n # assumes dataset_class has these standard args\n if stage == \"fit\" or stage is None:\n self.train_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.train_catalog, label_cols=self.label_cols, transform=self.transform\n )\n self.val_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.val_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n # Assign test dataset for use in dataloader(s)\n if stage == \"test\" or stage is None:\n self.test_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.test_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n if stage == 'predict': # not set up by default with stage=None, only if explicitly requested\n if self.predict_catalog is None:\n raise ValueError('Attempting to predict, but GalaxyDataModule was init without a predict_catalog arg. init with GalaxyDataModule(predict_catalog=some_catalog, ...)')\n self.predict_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.predict_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def test_dataloader(self):\n return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def predict_dataloader(self):\n return DataLoader(self.predict_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def specify_catalogs(self, stage):\n if self.catalog is not None:\n # will split the catalog into train, val, test here\n self.train_catalog, hidden_catalog = train_test_split(\n self.catalog, train_size=self.train_fraction, random_state=self.seed\n )\n self.val_catalog, self.test_catalog = train_test_split(\n hidden_catalog, train_size=self.val_fraction/(self.val_fraction + self.test_fraction), random_state=self.seed\n )\n del hidden_catalog\n else:\n # assume you have passed pre-split catalogs\n # (maybe not all, e.g. only a test catalog, or only train/val catalogs)\n if stage == 'predict':\n assert self.predict_catalog is not None\n elif stage == 'test':\n # only need test\n assert self.test_catalog is not None\n elif stage == 'fit':\n # only need train and val\n assert self.train_catalog is not None\n assert self.val_catalog is not None\n else:\n # need all three (predict is still optional)\n assert self.train_catalog is not None\n assert self.val_catalog is not None\n assert self.test_catalog is not None\n # (could write this shorter but this is clearest)\n\ndef default_torchvision_transforms(greyscale, resize_size, crop_scale_bounds, crop_ratio_bounds):\n # refactored out for use elsewhere, if need exactly these transforms\n # assume input is 0-255 uint8 tensor\n\n # automatically normalises from 0-255 int to 0-1 float\n transforms_to_apply = [transforms.ToTensor()] # dataset gives PIL image currently\n\n if greyscale:\n # transforms.Grayscale() adds perceptual weighting to rgb channels\n transforms_to_apply += [GrayscaleUnweighted()]\n\n transforms_to_apply += [\n transforms.RandomResizedCrop(\n size=resize_size, # assumed square\n scale=crop_scale_bounds, # crop factor\n ratio=crop_ratio_bounds, # crop aspect ratio\n interpolation=transforms.InterpolationMode.BILINEAR), # new aspect ratio\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(\n degrees=180., interpolation=transforms.InterpolationMode.BILINEAR)\n ]\n \n return transforms_to_apply\n\ndef do_transform(img, transforms_to_apply):\n return np.transpose(transforms_to_apply(image=np.array(img))[\"image\"], axes=[2, 0, 1]).astype(np.float32)\n\n# torchvision\nclass GrayscaleUnweighted(torch.nn.Module):\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img):\n \"\"\"\n PyTorch (and tensorflow) does greyscale conversion as a *weighted* mean by default (as colours have different perceptual brightnesses).\n Here, do a simple mean.\n Args:\n img (Tensor): Image to be converted to grayscale.\n\n Returns:\n Tensor: Grayscaled image.\n \"\"\"\n # https://pytorch.org/docs/stable/generated/torch.mean.html\n return img.mean(dim=-3, keepdim=True) # (..., C, H, W) convention\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\n","repo_name":"mwalmsley/galaxy-datasets","sub_path":"galaxy_datasets/pytorch/galaxy_datamodule.py","file_name":"galaxy_datamodule.py","file_ext":"py","file_size_in_byte":10782,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"}
+{"seq_id":"26071438124","text":"# last digit is the remainder when we %10 and to remove that digit from number /10\ndef rev_integer(n):\n\treversed=0\n\tremainder=0\n\twhile n>0:\n\t\tremainder=n%10\n\t\tn=n/10\n\t\treversed=reversed*10+remainder\n\treturn reversed\n\nif __name__==\"__main__\":\n\tprint(rev_integer(3461))","repo_name":"sayalighaisas/datastructures-algos","sub_path":"reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"12464885439","text":"from django.contrib import admin\n\nfrom .models import Subscription, User\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {\n 'fields': [('email', 'first_name'), ('username', 'last_name')]\n }),\n ('Права доступа', {\n 'classes': ('collapse',),\n 'fields': [('is_staff', 'is_superuser')],\n }),\n )\n list_display = ('id', 'email', 'username', 'first_name', 'last_name')\n list_display_links = ('id', 'email', 'username')\n search_fields = ('email', 'username')\n list_filter = ('email', 'username')\n\n\nadmin.site.register(Subscription)\n","repo_name":"Andrey11995/foodgram-project-react","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"32414614140","text":"# -*- coding: utf-8 -*-\r\n'''\r\n@author: Yalei Meng E-mail: yaleimeng@sina.com\r\n@license: (C) Copyright 2017, HUST Corporation Limited.\r\n@desc:爬取某城市小猪短租的前300多条房源的基本信息。主要是描述、地址、价位,房屋图片链接,房东网名、照片、性别;\r\n并写入csv表格。如果需要其他信息请根据实际需要修改。\r\n@DateTime: Created on 2017/9/4,at 19:36\r\n'''\r\nfrom bs4 import BeautifulSoup\r\nimport requests as rq\r\nimport time\r\nimport csv\r\n\r\n #从首页出发。目标是分析300个链接。根据数字规律构造网址的列表。\r\nsite =['http://gz.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1,15)]\r\nua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36'\r\nhead = {'User-Agent':ua}\r\n\r\nnewUrls=set()\r\ncsvRows = []\r\n# def updatePage(soup):\r\n# pages = soup.find_all('a',target='_self') #从页脚获得新的页面链接。\r\n# for page in pages: #只把全新的页面加入到新页面集合。\r\n# if page['href'] not in newPages and page['href'] not in oldPages:\r\n# newPages.add(page['href'])\r\n# print(newPages)\r\n\r\ndef getAllurls(web):\r\n r = rq.get(web, headers=head)\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n out = soup.find('ul', class_='pic_list clearfix').find_all('li') # 这里是需要访问的单个元素网页。\r\n for var in out:\r\n url = var.find('a')['href']\r\n if url not in newUrls:\r\n newUrls.add(url)\r\n print(url)\r\n\r\ndef dealPage(myPage):\r\n r = rq.get(myPage)\r\n soup = BeautifulSoup(r.text, 'lxml')\r\n #print(soup)\r\n if soup.find('div', class_='member_pic').find('div')['class'] == ['member_ico1']:\r\n gender = 'female'\r\n else:\r\n gender = 'male'\r\n data ={\r\n 'title' :soup.find('div',{'class':'pho_info'}).find('em').text,\r\n 'address':soup.find('div',class_='con_l').find('p')['title'],\r\n 'roomPic':soup.find('div',class_='pho_show_big').find('img')['src'],\r\n 'price':soup.find('div',class_='day_l').find('span').text,\r\n 'owner':soup.find('div',class_='w_240').find('a')['title'],\r\n 'gender': gender,\r\n 'ownerPic': soup.find('div', class_='member_pic').find('a')['href'],\r\n }\r\n csvRows.append(data)\r\n print(data)\r\n\r\nfor st in site: #从site每个页面分别请求,并添加url到newUrls。300个为止。\r\n getAllurls(st)\r\n time.sleep(1.5)\r\n if len(newUrls)>=300:\r\n break\r\n\r\nfor eve in newUrls: #针对newUrls里面每个url,做详情页的爬取。\r\n dealPage(eve)\r\n time.sleep(1.5)\r\n\r\n#把词典数据写入到csv文件。\r\nprint('字典列表的个数为%d'%len(csvRows))\r\nrowHeader = ['title','address','roomPic','price','owner','gender','ownerPic']\r\nwith open('E:/romm.csv','w',encoding='utf-8')as f:\r\n f_csv = csv.DictWriter(f,rowHeader)\r\n f_csv.writeheader()\r\n f_csv.writerows(csvRows)\r\n","repo_name":"yaleimeng/spider_for_XiaoZhu","sub_path":"smallPig.py","file_name":"smallPig.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"31093977551","text":"# -*- coding: utf-8 -*-\n#\n# POC for Glances 3 (Core)\n#\n# Nicolargo (08/2017)\n#\n# Three main threads will be run by the core:\n# - Update the stats (thanks to the Stats class). Each plugin will be running\n# in another thread.\n# - (optionnaly) Display the stats (thanks to the Outputs class)\n# - (optionnaly) Export the stats (thanks to the Exports class)\n\nimport signal\nimport threading\nfrom plugin import Plugin\n\n\nclass TestPlugin(Plugin):\n pass\n\n\nclass Stats(object):\n\n def __init__(self):\n # Dict of plugins\n # key: Plugin name\n # value: Plugin instance\n self.plugins = {}\n\n # Init the plugins\n for i in range(1, 10):\n pname = 'plugin%s' % (i - 1)\n self.plugins[pname] = TestPlugin(name=pname)\n\n def loop(self):\n update_thread = threading.Thread(name=\"update\",\n target=self.update)\n export_thread = threading.Thread(name=\"export\",\n target=self.export)\n display_thread = threading.Thread(name=\"display\",\n target=self.display)\n\n update_thread.start()\n export_thread.start()\n display_thread.start()\n\n def update(self, timeout=3):\n # Init the threads list\n plugin_threads = []\n for pname, p in self.plugins.iteritems():\n t = threading.Thread(name=pname,\n target=p.update,\n args=('ITEM',))\n plugin_threads.append(t)\n\n # Start all the threads\n for p in plugin_threads:\n p.start()\n\n # Wait the end of the threads\n for p in plugin_threads:\n p.join(timeout=timeout)\n if p.isAlive():\n # Process is still running\n # Kill it\n self.kill(p.name)\n p.join()\n\n def export(self, timeout=3):\n for pname, p in self.plugins.iteritems():\n p.export()\n\n def display(self, refresh=3):\n for pname, p in self.plugins.iteritems():\n p.display()\n\n def kill(self, thread_name):\n self.plugins[thread_name].stop()\n\n def stop(self, signal, frame):\n for p in self.plugins:\n p.stop()\n\n\ndef main():\n s = Stats()\n signal.signal(signal.SIGINT, s.stop)\n s.loop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nicolargo/pythonarena","sub_path":"glancesarena/glances3/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"71343045585","text":"import re\n\nimport write_read_file\n\n\ndef change_base(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n for line in file:\n if line.find(last_name) == -1:\n change_list.append(line.strip())\n return change_list\n\n\ndef replace_base(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n try:\n for line in file:\n if line.find(last_name) != -1:\n print(line.strip())\n pattern = input(\"Введите данные которые нужно заменить\\n: \").title()\n line_change = input(\"Введите на что нужно поменять\\n: \").title()\n new_line = line.strip().replace(pattern, line_change)\n change_list.append(new_line)\n else:\n change_list.append(line.strip())\n return change_list\n except ValueError:\n pass\n\n\ndef employee_search(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n for line in file:\n if line.find(last_name) != -1:\n change_list.append(line.strip())\n return change_list\n","repo_name":"Kvezac/homework21","sub_path":"lesson21_homework/python/sort_change_file.py","file_name":"sort_change_file.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6108905080","text":"######\n# segmentation task\n######\n\n@torch.no_grad()\ndef valid_one_epoch(model, dataloader, device, epoch):\n model.eval()\n \n dataset_size = 0\n running_loss = 0.0\n \n val_scores = []\n \n pbar = tqdm(enumerate(dataloader), total=len(dataloader), desc='Valid ')\n for step, (images, masks) in pbar: \n images = images.to(device, dtype=torch.float)\n masks = masks.to(device, dtype=torch.float)\n \n batch_size = images.size(0)\n \n y_pred = model(images)\n loss = criterion(y_pred, masks)\n \n running_loss += (loss.item() * batch_size)\n dataset_size += batch_size\n \n epoch_loss = running_loss / dataset_size\n \n y_pred = nn.Sigmoid()(y_pred)\n val_dice = dice_coef(masks, y_pred).cpu().detach().numpy()\n val_jaccard = iou_coef(masks, y_pred).cpu().detach().numpy()\n val_scores.append([val_dice, val_jaccard])\n \n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0\n current_lr = optimizer.param_groups[0]['lr']\n pbar.set_postfix(valid_loss=f'{epoch_loss:0.4f}',\n lr=f'{current_lr:0.5f}',\n gpu_memory=f'{mem:0.2f} GB')\n val_scores = np.mean(val_scores, axis=0)\n torch.cuda.empty_cache()\n gc.collect()\n \n return epoch_loss, val_scores","repo_name":"naoki901373/ML-Template","sub_path":"valid/valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"36759849088","text":"from Pieces.piece import Piece\nfrom Pieces.empty import Empty\nfrom Pieces.rook import Rook\n\n\nclass King(Piece):\n \"\"\"King piece\"\"\"\n\n def __init__(self):\n \"\"\"Initialize piece attributes\"\"\"\n super().__init__()\n self.display = 'K'\n self.has_moved = False\n \n def check_position(self, board, player, p1):\n \"\"\"Returns '1' if position is valid.\"\"\"\n \n possible_positions = []\n\n for count in range(0, 8):\n # Reset row and col\n row = self.get_position()[0]\n col = self.get_position()[1]\n\n # Check top\n if count == 0:\n row = row - 1\n\n # Check right\n elif count == 1:\n col = col + 1\n\n # Check left\n elif count == 2:\n col = col - 1\n\n # Check bottom\n elif count == 3:\n row = row + 1\n\n # Check top-left\n elif count == 4:\n row = row - 1\n col = col - 1\n\n # Check top-right\n elif count == 5:\n row = row - 1\n col = col + 1\n\n # Check bottom-left\n elif count == 6:\n row = row + 1\n col = col - 1\n\n # Check bottom-right\n elif count == 7:\n row = row + 1\n col = col + 1\n\n try:\n if row >= 0:\n board_pos = board.board[row][col]\n\n # Check if new position is empty or held by enemy\n if isinstance(board_pos['piece'], Empty) or board_pos['piece']['piece'].get_color() != self.get_color():\n possible_positions.append(board_pos)\n except:\n continue\n\n # King & Rook swap\n if not self.has_moved:\n row = self.get_position()[0]\n col = self.get_position()[1]\n board_pos = board.board[row]\n\n # Right\n if isinstance(board_pos[col + 1]['piece'], Empty) and isinstance(board_pos[col + 2]['piece'], Empty):\n if isinstance(board_pos[col + 3]['piece']['piece'], Rook) and board_pos[col + 3]['piece']['piece'].has_moved == False:\n col = col + 2\n board_pos = board.board[row][col]\n possible_positions.append(board_pos)\n\n # Left\n board_pos = board.board[row]\n if isinstance(board_pos[col - 1]['piece'], Empty) and isinstance(board_pos[col - 2]['piece'], Empty) and isinstance(board_pos[col - 3]['piece'], Empty):\n if isinstance(board_pos[col - 4]['piece']['piece'], Rook) and board_pos[col - 4]['piece']['piece'].has_moved == False:\n col = col - 2\n board_pos = board.board[row][col]\n possible_positions.append(board_pos)\n\n return possible_positions","repo_name":"omnz/Chess","sub_path":"Pieces/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33707377111","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nq = 1.6e-19\n\nE = np.arange(10*1000*q,60*1000*q,q/10)\ndef bremsstrahlung_cross_section(E,E_e, Z):\n alpha = 1/137\n dsigma_dE = (4*np.pi*alpha**2*Z**2)/E * ((E+E_e)/E_e) * ((E/E_e) + (E_e/E) - 1 + np.log(E_e/E))\n return dsigma_dE\n\n# B = bremsstrahlung_cross_section(E,max(E),11)\n# plt.plot(E,B)\n# plt.show()\n\n\n\n\n\ndata = pd.read_csv(r\"X-Ray\\Data\\16-01-2023\\NaCl Full Data.csv\",skiprows=0)\nprint(data)\n\n\nangle = data['angle']\nwav = data['wav / pm']\nenergy = np.sort(data['E / keV'])\ncount_0 = data['R_0 / 1/s']\n\n\nE_B = bremsstrahlung_cross_section(energy,max(energy),11)\nplt.plot(energy,E_B*max(count_0))\nplt.plot(energy,count_0)\nplt.show()\n\n\n\n\n\n\n","repo_name":"Jacob-J-E/Y3Lab","sub_path":"X-Ray Crystal Diffraction/X-Ray/Session_4_19_01_2023/bremsstrahlung.py","file_name":"bremsstrahlung.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"29417824960","text":"import pygame\nvec = pygame.math.Vector2\nfrom .player import Player\nfrom .constants import GameConstants\nclass Camera:\n def __init__(self,player :Player):\n self.player = player\n self.x = 0\n self.y = 0\n self.width = GameConstants.BACKGROUNWIDTH\n def scroll(self):\n x_camera = self.player.rect.x - (GameConstants.GAMEWIDTH/2 - self.player.rect.w/2)\n if x_camera < 0:\n x_camera = 0\n if x_camera + GameConstants.GAMEWIDTH > self.width:\n x_camera = self.width - GameConstants.GAMEWIDTH\n self.x = -x_camera\n ","repo_name":"c0ngthanh/RPGGame","sub_path":"filegame/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"21022152187","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 23 10:14:07 2018\n\n@author: Sc\n\"\"\"\n\nimport sys\nimport re\n\nif len(sys.argv) > 1:\n f = open(sys.argv[1], \"rU\",encoding='utf-8') \nelse:\n f = open(\"wc.py\",\"rU\",encoding='utf-8')\n #f = sys.stdin\n \ncodelinenum = 1 \nfor line in f.readlines():\n if (re.match(r'^ *\\n',line) or line.startswith('#')):\n print(line,file=sys.stdout, end='')\n #elif(re.match(r\"(^ *\"\"\")|(^ *''')\"):\n else:\n print(codelinenum,line,file=sys.stdout, end='')\n codelinenum = codelinenum + 1\n \nf.close()\n\n\n\n\n","repo_name":"0oSco0/systemprogram","sub_path":"kadai1/nl.py","file_name":"nl.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"43417852017","text":"\r\n# 0.5 씩 좌우로 간격을 둬야 한다.\r\n# 그럼 L 만큼 커버가 가능한거네\r\n\r\nfrom collections import deque\r\nN, L = map(int, input().split())\r\nlocation = list(map(int, input().split()))\r\n\r\nlocation.sort()\r\nlocation = deque(location)\r\n# print(location)\r\nstart = location.popleft()\r\ncnt = 1\r\nwhile len(location) != 0:\r\n node = location.popleft()\r\n if start + L - 1 < node:\r\n start = node\r\n cnt += 1\r\nprint(cnt)\r\n","repo_name":"Guitarboyjason/Algorithm","sub_path":"백준/Silver/1449. 수리공 항승/수리공 항승.py","file_name":"수리공 항승.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"11002235742","text":"from ioUtils import getFile\nfrom fsUtils import isFile\nfrom webUtils import getHTML, isBS4\nfrom strUtils import fixName\nfrom math import ceil, floor\nfrom hashlib import md5\n\nfrom dbBase import dbBase\n\nclass artistMBIDClass:\n def __init__(self, ID=None, err=None):\n self.ID=ID\n self.err=err\n \n def get(self):\n return self.__dict__\n \n \nclass artistMBURLClass:\n def __init__(self, url=None, err=None):\n self.url = url\n self.err = err\n \n def get(self):\n return self.__dict__\n \n \nclass artistMBNameClass:\n def __init__(self, name=None, err=None):\n self.name = name\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaClass:\n def __init__(self, err=None):\n self.media = {}\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaDataClass:\n def __init__(self, album=None, url=None, aclass=None, aformat=None, artist=None, code=None, year=None, err=None):\n self.album = album\n self.url = url\n self.aclass = aclass\n self.aformat = aformat\n self.artist = artist\n self.code = code\n self.year = year\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaAlbumClass:\n def __init__(self, url=None, album=None, aformat=None, err=None):\n self.url = url\n self.album = album\n self.aformat = aformat\n self.err = err \n \n def get(self):\n return self.__dict__\n\n \nclass artistMBMediaCountsClass:\n def __init__(self, err=None):\n self.counts = {}\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBPageClass:\n def __init__(self, ppp = None, tot = None, more=None, redo=None, err=None):\n self.ppp = ppp\n self.tot = tot\n if isinstance(ppp, int) and isinstance(tot, int):\n self.pages = int(ceil(tot/ppp))\n else:\n self.pages = None\n\n self.err = err\n\n self.more = more\n self.redo = redo\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBProfileClass:\n def __init__(self, profile=None, aliases=None, members=None, sites=None, groups=None, variations=None, err=None):\n self.profile = profile\n self.aliases = aliases\n self.members = members\n self.sites = sites\n self.groups = groups\n self.variations = variations\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBURLInfo:\n def __init__(self, name=None, url=None, ID=None, err=None):\n self.name = name\n self.url = url\n self.ID = ID\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBDataClass:\n def __init__(self, artist=None, url=None, ID=None, pages=None, profile=None, media=None, mediaCounts=None, err=None):\n self.artist = artist\n self.url = url\n self.ID = ID\n self.pages = pages\n self.profile = profile\n self.media = media\n self.mediaCounts = mediaCounts\n self.err = err\n \n def get(self):\n return self.__dict__\n \n \n def show(self):\n print(\"MusicBrainz Artist Data Class\")\n print(\"-------------------------\")\n print(\"Artist: {0}\".format(self.artist.name))\n print(\"URL: {0}\".format(self.url.url))\n print(\"ID: {0}\".format(self.ID.ID))\n print(\"Pages: {0}\".format(self.pages.get()))\n print(\"Media: {0}\".format(self.mediaCounts.get()))\n for mediaType,mediaTypeAlbums in self.media.media.items():\n print(\" {0}\".format(mediaType))\n for album in mediaTypeAlbums:\n print(\" {0}\".format(album.album)) \n \n def get(self):\n return self.__dict__\n\n\n \nclass artistMB(dbBase):\n def __init__(self, debug=False):\n self.debug = debug\n \n def getData(self, inputdata):\n if isinstance(inputdata, str):\n if isFile(inputdata):\n try:\n bsdata = getHTML(getFile(inputdata))\n except:\n try:\n bsdata = getHTML(getFile(inputdata, version=2))\n except:\n raise ValueError(\"Cannot read artist file: {0}\".format(inputdata))\n else:\n try:\n bsdata = getHTML(inputdata)\n except:\n raise ValueError(\"Not sure about string input: {0} . It is not a file\".format(inputdata))\n elif isBS4(inputdata):\n bsdata = inputdata\n pass\n else:\n raise ValueError(\"Not sure about input type: {0}\".format(type(inputdata)))\n\n self.bsdata = bsdata\n \n return self.parse()\n \n \n \n \n def getNamesAndURLs(self, content):\n data = []\n if content is not None:\n for ref in content.findAll(\"a\"):\n url = ref.attrs['href']\n name = ref.text\n\n ID = None\n data.append(artistMBURLInfo(name=name, url=url, ID=ID))\n return data\n\n\n\n\n\n #######################################################################################################################################\n ## Artist URL\n #######################################################################################################################################\n def getartistMBURL(self):\n artistData = self.bsdata.find(\"div\", {\"class\": \"artistheader\"})\n if artistData is None:\n auc = artistMBURLClass(err=True)\n return auc\n \n h1 = artistData.find(\"h1\")\n if h1 is None:\n auc = artistMBURLClass(err=\"NoH1\")\n \n ref = self.getNamesAndURLs(h1)\n try:\n artistURL = ref[0].url\n auc = artistMBURLClass(url=artistURL, err=None)\n except:\n auc = artistMBURLClass(err=\"TxtErr\")\n\n return auc\n\n \n\n #######################################################################################################################################\n ## Artist ID\n ####################################################################################################################################### \n def getartistMBDiscID(self, suburl):\n ival = \"/artist\"\n if isinstance(suburl, artistMBURLClass):\n suburl = suburl.url\n if not isinstance(suburl, str):\n aic = artistMBIDClass(err=\"NotStr\") \n return aic\n\n pos = suburl.find(ival)\n if pos == -1:\n aic = artistMBIDClass(err=\"NotArtist\") \n return aic\n\n uuid = suburl[pos+len(ival)+1:]\n\n \n m = md5()\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n discID = str(int(hashval, 16))\n \n try:\n int(discID)\n except:\n aic = artistMBIDClass(err=\"NotInt\") \n return aic\n\n aic = artistMBIDClass(ID=discID)\n return aic\n \n \n\n #######################################################################################################################################\n ## Artist Name\n #######################################################################################################################################\n def getartistMBName(self):\n artistData = self.bsdata.find(\"div\", {\"class\": \"artistheader\"})\n if artistData is None:\n anc = artistMBNameClass(err=True)\n return anc\n \n h1 = artistData.find(\"h1\")\n if h1 is None:\n anc = artistMBNameClass(err=\"NoH1\")\n \n ref = self.getNamesAndURLs(h1)\n try:\n artistName = ref[0].name\n anc = artistMBNameClass(name=artistName, err=None)\n except:\n anc = artistMBNameClass(err=\"TxtErr\")\n \n return anc\n \n \n\n #######################################################################################################################################\n ## Artist Media\n #######################################################################################################################################\n def getartistMBMediaAlbum(self, td):\n amac = artistMBMediaAlbumClass()\n for span in td.findAll(\"span\"):\n attrs = span.attrs\n if attrs.get(\"class\"):\n if 'format' in attrs[\"class\"]:\n albumformat = span.text\n albumformat = albumformat.replace(\"(\", \"\")\n albumformat = albumformat.replace(\")\", \"\")\n amac.format = albumformat\n continue\n span.replaceWith(\"\")\n\n ref = td.find(\"a\")\n if ref:\n amac.url = ref.attrs['href']\n amac.album = ref.text\n else:\n amac.err = \"NoText\"\n\n return amac\n \n \n def getartistMBMedia(self):\n amc = artistMBMediaClass()\n \n \n mediaTypes = [x.text for x in self.bsdata.findAll(\"h3\")]\n tables = dict(zip(mediaTypes, self.bsdata.findAll(\"table\")))\n\n for mediaType, table in tables.items():\n headers = [x.text for x in table.findAll(\"th\")]\n trs = table.findAll('tr')\n for tr in trs[1:]:\n tds = tr.findAll(\"td\")\n\n ## Year\n idx = headers.index(\"Year\")\n year = tds[idx].text\n\n ## Title\n idx = headers.index(\"Title\")\n refs = [x.attrs['href'] for x in tds[idx].findAll('a')]\n if len(refs) == 0:\n raise ValueError(\"No link for album\")\n url = refs[0]\n album = tds[idx].text\n\n \n m = md5()\n uuid = url.split(\"/\")[-1]\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n code = int(hashval, 16)\n \n\n ## Artist\n idx = headers.index(\"Artist\")\n artists = []\n for artistVal in tds[idx].findAll('a'):\n url = artistVal.attrs['href']\n name = artistVal.text\n m = md5()\n uuid = url.split(\"/\")[-1]\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n ID = int(hashval, 16)\n artists.append(artistMBURLInfo(name=name, url=url, ID=ID))\n \n\n amdc = artistMBMediaDataClass(album=album, url=url, aclass=None, aformat=None, artist=artists, code=code, year=year)\n if amc.media.get(mediaType) is None:\n amc.media[mediaType] = []\n amc.media[mediaType].append(amdc)\n\n \n \n\n return amc\n \n \n\n #######################################################################################################################################\n ## Artist Media Counts\n ####################################################################################################################################### \n def getartistMBMediaCounts(self, media):\n \n amcc = artistMBMediaCountsClass()\n \n credittype = \"Releases\"\n if amcc.counts.get(credittype) == None:\n amcc.counts[credittype] = {}\n for creditsubtype in media.media.keys():\n amcc.counts[credittype][creditsubtype] = int(len(media.media[creditsubtype]))\n \n return amcc\n \n \n amcc.err = \"No Counts\"\n return amcc\n \n results = self.bsdata.findAll(\"ul\", {\"class\": \"facets_nav\"})\n if results is None or len(results) == 0:\n amcc.err = \"No Counts\"\n return amcc\n \n for result in results:\n for li in result.findAll(\"li\"):\n ref = li.find(\"a\")\n if ref:\n attrs = ref.attrs\n span = ref.find(\"span\", {\"class\": \"facet_count\"})\n count = None\n if span:\n count = span.text\n credittype = attrs.get(\"data-credit-type\")\n creditsubtype = attrs.get(\"data-credit-subtype\")\n if credittype and creditsubtype:\n if amcc.counts.get(credittype) == None:\n amcc.counts[credittype] = {}\n if amcc.counts[credittype].get(creditsubtype) == None:\n try:\n amcc.counts[credittype][creditsubtype] = int(count)\n except:\n amcc.counts[credittype][creditsubtype] = count\n amcc.err = \"Non Int\"\n\n return amcc\n \n \n\n #######################################################################################################################################\n ## Artist Variations\n #######################################################################################################################################\n def getartistMBProfile(self):\n data = {} \n genres = self.bsdata.find(\"div\", {\"class\": \"genre-list\"})\n genre = self.getNamesAndURLs(genres)\n style = []\n data[\"Profile\"] = {'genre': genre, 'style': style}\n \n apc = artistMBProfileClass(profile=data.get(\"Profile\"), aliases=data.get(\"Aliases\"),\n members=data.get(\"Members\"), groups=data.get(\"In Groups\"),\n sites=data.get(\"Sites\"), variations=data.get(\"Variations\"))\n return apc\n\n\n \n #######################################################################################################################################\n ## Artist Pages\n #######################################################################################################################################\n def getartistMBPages(self):\n apc = artistMBPageClass()\n from numpy import ceil\n bsdata = self.bsdata\n\n try:\n pages = bsdata.find(\"ul\", {\"class\": \"pagination\"})\n lis = pages.findAll(\"li\")\n txts = [li.text for li in lis]\n npages = 0\n for item in txts:\n try:\n npages = max([npages, int(item)])\n except:\n continue\n \n apc = artistMBPageClass(ppp=100, tot=100*npages, redo=False, more=True)\n except:\n apc = artistMBPageClass(ppp=100, tot=1, redo=False, more=False)\n \n return apc\n\n\n\n def parse(self):\n bsdata = self.bsdata\n \n artist = self.getartistMBName()\n url = self.getartistMBURL()\n ID = self.getartistMBDiscID(url)\n pages = self.getartistMBPages()\n profile = self.getartistMBProfile()\n media = self.getartistMBMedia()\n mediaCounts = self.getartistMBMediaCounts(media)\n \n err = [artist.err, url.err, ID.err, pages.err, profile.err, mediaCounts.err, media.err]\n \n adc = artistMBDataClass(artist=artist, url=url, ID=ID, pages=pages, profile=profile, mediaCounts=mediaCounts, media=media, err=err)\n \n return adc","repo_name":"tgadf/discogs","sub_path":"artistMB.py","file_name":"artistMB.py","file_ext":"py","file_size_in_byte":16139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"24379705191","text":"# -*- coding: utf-8 -*-\nimport jwt\nimport datetime\nimport functools\nfrom apps.nosql_db import r, r_3\nfrom jwt import exceptions\nfrom apps.error import ApiError\nfrom functools import wraps\nfrom flask import g, request, current_app, jsonify\n\n\n# 构造一个密钥\n# SALT = \"zhananbudanchou1234678\"\nSALT = \"mengnaiaihuachachacha\"\n\n# 构造 headers\nheaders = {\n \"typ\": \"jwt\",\n \"alg\": \"HS256\"\n}\n\n\n# 创建 JWT\ndef create_jwt(username, password):\n payload = {\n \"username\": username,\n \"password\": password\n }\n\n result = jwt.encode(payload=payload, key=SALT, algorithm=\"HS256\",\n headers=headers)\n return result\n\n\n# 用于认证普通用户\ndef login_required(func):\n\n @wraps(func)\n def decorate(*args, **kwargs):\n if hasattr(g, \"username\"):\n return g.username\n auth_jwt = request.headers.get('token')\n g.username = None\n try:\n \"判断token的校验结果\"\n payload = jwt.decode(auth_jwt, SALT, algorithms=['HS256'])\n \"获取载荷中的信息赋值给g对象\"\n g.username = payload.get(\"username\")\n # print(g.username)\n assert r.get(g.username) == auth_jwt\n except Exception as e:\n print(e)\n return jsonify({\n \"code\": 201,\n \"message\": \"抱歉,用户未登录!\",\n \"data\": None,\n \"ok\": False\n })\n\n return func(*args, **kwargs)\n\n return decorate\n\n\n# 单独实现一个解析jwt_token的函数\ndef parse_jwt(auth_jwt, db):\n payload = jwt.decode(auth_jwt, SALT, algorithms=['HS256'])\n user = db.find_one({\"username\": payload.get(\"username\")})\n return user\n\n\n# 用于认证后台管理用户\ndef permission_required(func):\n @wraps(func)\n def decorate(*args, **kwargs):\n if hasattr(g, \"admin_username\"):\n return g.admin_username\n x_api_key = request.headers.get(\"XAPIKEY\")\n g.admin_username = None\n try:\n g.admin_username = r_3.get(x_api_key)\n assert x_api_key == r_3.get(g.admin_username)\n except Exception as e:\n print(e)\n return jsonify({\n \"code\": 201,\n \"message\": \"抱歉,用户权限认证失败!\",\n \"data\": None,\n \"ok\": False\n })\n return func(*args, **kwargs)\n\n return decorate\n\n\n\n","repo_name":"WanwanLinLin/FlaskBackend","sub_path":"apps/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"71257077265","text":"\"\"\"\nconfig.py\n~~~~~~~~~\n\nCommon configuration settings for scripts used to make the AgroSuccess \nsuccession rules table.\n\"\"\"\nimport os \nimport sys\n\nDATA_DIR = os.path.abspath(\"/home/andrew/Documents/phd/models/\"\n \"AgroSuccess/data\")\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nDIRS = {\n \"scripts\": THIS_DIR,\n \"logs\": os.path.join(THIS_DIR, \"logs\"),\n \"data\": {\n \"raw\": os.path.join(DATA_DIR, \"raw\"),\n \"created\": os.path.join(DATA_DIR, \"created\"),\n \"tmp\": os.path.join(DATA_DIR, \"tmp\"),\n },\n}\n\ndef ensure_dirs_exist(dir_list):\n \"\"\"Given list of dir names, recursively create dirs if they don't exist.\"\"\"\n for d in dir_list:\n try:\n os.makedirs(d)\n except FileExistsError:\n pass\n\ndef exit_if_file_missing(fname):\n \"\"\"Exit program if given file name doesn't exit.\"\"\"\n if not os.path.isfile(fname):\n sys.exit(\"Source file {0} does not exist.\".format(fname))\n\n# Check if all data and logs directories exist, make them if not\nensure_dirs_exist(list(DIRS[\"data\"].values()) + [DIRS[\"logs\"]])","repo_name":"lanecodes/agrosuccess-graph","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31823031969","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pyevtk.hl import gridToVTK\n\ndef mandelbrot_set(X, Y, maxiter, horizon = 2.0):\n C = X + Y[:, None] * 1j\n N = np.zeros(C.shape, dtype = int)\n Z = np.zeros(C.shape, np.complex64)\n for n in range(maxiter):\n if n % (maxiter / 10) == 0:\n print('progress: %d/%d' % (n, maxiter))\n I = np.less(abs(Z), horizon)\n N[I] = n\n Z[I] = Z[I] ** 2 + C[I]\n return Z.transpose(), N.transpose()\n\nnx = 800\nny = 600\nx = np.linspace(-2.25, 0.75, nx, dtype=np.float32)\ny = np.linspace(-1.25, 1.25, ny, dtype=np.float32)\nz = np.linspace(0.0, 1.0, 1, dtype=np.float32)\n\nZ, N = mandelbrot_set(x, y, 2000, 2.0)\n\nfilename = 'mandel_grid'\n\ngridToVTK(filename, x, y, z, pointData = {'N': N.reshape((nx, ny, 1), order = 'C')})\n\nprint('%s.vtr generated' % (filename))\n","repo_name":"dubrayn/dubrayn.github.io","sub_path":"examples/vtk/pyevtk_grid.py","file_name":"pyevtk_grid.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"}
+{"seq_id":"69884265747","text":"with open('text.txt', 'r') as cats:\n \n dict_cat = []\n list_cats = cats.readlines()\n for i in list_cats:\n \n keys = ['id', 'name', 'age']\n i = tuple(i.strip().split(','))\n dictionary = dict(zip(keys, i))\n dict_cat.append(dictionary)\n \n print(dict_cat)\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # for i in cats:\n # list_cats += i.split('\\n')\n # new_list_cats = list_cats[::2]\n # print(new_list_cats)\n #result = [{'id': d[0], 'name': d[1], 'number': d[2]} for d in data]\n\n \n","repo_name":"Ihor-Usenko/go_it","sub_path":"06working_with_files/auto_05.py","file_name":"auto_05.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"2691776561","text":"from __future__ import annotations\nfrom os.path import basename, splitext\nfrom typing import Optional\n\nfrom .nodes import TydDocument\nfrom .tyd_from_text import parse\nfrom .tyd_to_text import dump\n\n\ndef from_document(doc: TydDocument, file_path: Optional[str] = None) -> TydFile:\n \"\"\"Returns TydFile object created from a TydDocument object.\n\n Parameters\n ----------\n doc : TydDocument\n A TydDocument object to use to create TydFile.\n file_path : Optional[str]\n A string representing file path, by default None.\n\n Returns\n -------\n TydFile\n A TydFile created.\n \"\"\"\n tyd_file = TydFile(doc, file_path)\n return tyd_file\n\n\ndef from_file(file_path: str) -> TydFile:\n \"\"\"Returns TydFile object created from a file of path passed.\n\n Parameters\n ----------\n file_path : str\n A string representing filepath.\n\n Returns\n -------\n TydFile\n A TydFile created.\n \"\"\"\n try:\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n read_contents = f.read()\n\n tyd_node_list = list(parse(read_contents))\n tyd_doc = TydDocument(tyd_node_list)\n return from_document(tyd_doc, file_path)\n except Exception as e:\n raise Exception(f\"Exception loading {file_path}: {e}\")\n\n\nclass TydFile:\n \"\"\"This represents tyd file objects.\n\n **Don't instance!**\n The class is not intended to be created by users.\n You can get instances of the class only via class methods.\n \"\"\"\n\n def __init__(self, doc: TydDocument, file_path: Optional[str] = None):\n self._doc: TydDocument = doc\n self._file_path: Optional[str] = file_path\n\n @property\n def document(self) -> TydDocument:\n return self._doc\n\n @document.setter\n def document(self, value: TydDocument) -> None:\n self._doc = value\n\n @property\n def file_path(self) -> Optional[str]:\n return self._file_path\n\n @property\n def file_name(self) -> str:\n return splitext(basename(self._file_path))[0]\n\n def save(self, file_path: Optional[str]=None):\n if file_path is not None:\n self._file_path = file_path\n elif file_path is None:\n raise AttributeError(\n \"When didn't set filepath to TydFile, filepath parameter mustn't be None.\"\n )\n\n builder = []\n\n for node in self._doc:\n builder.append(dump(node) + \"\\n\")\n\n with open(file_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(\"\".join(builder))\n","repo_name":"Lazialize/tyd-python","sub_path":"tyd/tyd_file.py","file_name":"tyd_file.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"}
+{"seq_id":"6240655903","text":"\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_probability as tfp\n\nclass CreateActorNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateActorNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.mean = tf.keras.layers.Dense(n_actions, activation=None)\n self.stddev = tf.keras.layers.Dense(n_actions, activation=\"sigmoid\")\n\n def call(self, state):\n probs = self.fc1(state)\n probs = self.fc2(probs)\n mean = self.mean(probs)\n stddev = self.mean(probs)\n return mean, stddev\n\n def sample_action(self, state):\n mean, stddev = self.call(state)\n dist = tfp.distributions.Normal(mean, stddev)\n action = dist.sample()\n return action, dist.log_prob(action)\n\n\n\nclass CreateCriticNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateCriticNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.v = tf.keras.layers.Dense(1, activation=None)\n\n def call(self, state, action):\n state_values = self.fc1(tf.concat([state, action], 1))\n state_values = self.fc2(state_values)\n value = self.v(state_values)\n return value\n\n\nclass CreateValueNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateValueNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.v = tf.keras.layers.Dense(1, activation=None)\n\n def call(self, state):\n state_values = self.fc1(state)\n state_values = self.fc2(state_values)\n value = self.v(state_values)\n return value\n\n\n\n\n\n\n\n","repo_name":"oeg1n18/RL_Library","sub_path":"Networks/SACNet.py","file_name":"SACNet.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33923061311","text":"import smtplib\nfrom email.mime.text import MIMEText\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport logging\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\n# list for recipients\nemailRecipients = []\nerrorRecipient = []\n\n# enviroment variables setup\nxPath = os.getenv(\"XLSXPATH\")\nlogPath = os.getenv(\"LOGFILEPATH\")\nenvRecipients = os.getenv(\"EMAILRECIPIENTS\")\nerrorRecipient.append(os.getenv(\"ERRORRECIPIENT\"))\nerrorRecipientSTR = os.getenv(\"ERRORRECIPIENT\")\nenvSender = os.getenv(\"SENDER\")\nenvSmtpPass = os.getenv(\"SMTP_PASS\")\n\n# env list\nfor email in envRecipients.split(\",\"):\n emailRecipients.append(email)\n\nlogging.basicConfig(filename=logPath, level=logging.INFO)\n\ndef main():\n try:\n with open(xPath, \"rb\") as f:\n df = pd.read_excel(f, skiprows=3, engine='openpyxl')\n except Exception as err:\n errorMail(err)\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Hiba a file megnyitásakor: \" + f\"{err}\")\n exit(1)\n\n dateCompare = datetime.today() + timedelta(days=40)\n\n for index, row in df.iterrows():\n if row['Dátum'] < dateCompare:\n datum = row['Dátum'].strftime(\"%Y.%m.%d\")\n okmany = row['Okmány']\n subject = f'{okmany} Lejáró okmány {datum}'\n body = f'Emlékeztető email lejáró okmányról.\\n {datum} {okmany}'\n recipients = emailRecipients\n try:\n send_email(subject, body, recipients)\n except smtplib.SMTPException as e:\n errorMail(e)\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Nem sikerült elküldeni a levelet hiba: \" + f\"{e}\")\n exit(1)\n\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" sikeres küldés\")\n\ndef errorMail(err):\n subject = 'Ertesito email hiba'\n body = f\"Hiba - ellenőrizd a logot: \\n {err}\"\n recipients = errorRecipientSTR\n\n try:\n send_email(subject, body, recipients)\n except smtplib.SMTPException as e:\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Nem sikerült elküldeni a levelet hiba: \" + f\"{e}\")\n\n\ndef send_email(subject, body, recipients):\n msg = MIMEText(body.encode('utf-8'), \"plain\", \"utf-8\")\n msg['Subject'] = subject\n msg['From'] = envSender\n msg['To'] = ', '.join(recipients)\n\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as server:\n server.login('f.ferenc@lazarteam.hu', envSmtpPass)\n server.sendmail(envSender, recipients, msg.as_string())\n print(\"Üzenet elküldve!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sontii/lazar-reminder","sub_path":"sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"10714459547","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport re\nfrom sys import argv\n\nif len(argv) < 4:\n\tprint(\"python billboard_fetch.py .sng \")\n\texit()\n\n# fancy log printing stuff\nclass bcolors:\n\tHEADER = '\\033[95m'\n\tOKBLUE = '\\033[94m'\n\tOKGREEN = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tFAIL = '\\033[91m'\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading options for browser...\")\noptions = Options()\noptions.add_argument(\"--headless\")\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Loaded options for browser.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading browser driver...\")\ndriver = webdriver.Firefox(firefox_options=options)\ndriver.set_page_load_timeout(10)\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Loaded broswer driver.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading website {}...\".format(\"https://www.billboard.com/charts/\" + argv[1]))\ntry:\n\tdriver.get(\"https://www.billboard.com/charts/\" + argv[1])\nexcept:\n\tdriver.execute_script(\"window.stop();\")\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Website loaded.\")\n\nsrc = driver.page_source\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Page source code copied.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Closing driver...\")\ndriver.close()\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Driver closed.\")\n\nlines = src.split(\"\\n\")\n\nsng_text = [\"\\n\"]\n\nsng_text.append(\"--{}\".format(argv[3]))\n\nfor i in range(len(lines)):\n\tif lines[i] == '
':\n\t\tblock = [lines[x] for x in range(i, i + 12)]\n\t\tsong = \" ~ \".join([line for line in block if line[0] != \"<\" and line[1] != \"<\"])\n\t\tif song[0] == \" \":\n\t\t\tsong = song[1:]\n\t\tsong = song.replace(\"&\", \"&\")\n\t\tsng_text.append(song)\n\telif '
' in lines[i]:\n\t\tsong = lines[i][37:len(lines[i]) - 6] + \" ~ \"\n\t\tj = i + 1\n\t\twhile any([x == \"<\" for x in lines[j][:2]]):\n\t\t\tj += 1\n\t\tsong += lines[j]\n\t\tif song[0] == \" \":\n\t\t\tsong = song[1:]\n\t\tsong = song.replace(\"&\", \"&\")\n\t\tsng_text.append(song)\n\nsng_text.append(\"--end\")\n\nwith open(argv[2], \"a+\") as sng:\n\tsng.write(\"\\n\".join(sng_text))","repo_name":"roshanmaind/Friday","sub_path":"dev_tools/sam/billboard_fetch.py","file_name":"billboard_fetch.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"22662643219","text":"import pygame\r\nimport random\r\npygame.init()\r\nfrom imgs import Screen\r\nfrom Write import Display_message\r\nfrom Colors import white\r\nfrom Colors import red\r\nfrom Colors import green\r\nfrom Colors import blue\r\n\r\n\r\n\r\n###############################BOARD##################################\r\n\r\nclass Board:\r\n def __init__(self):\r\n self.grid =[[1]*8 for _ in range(8)] \r\n def setship(self,x,y,o):\r\n if(o==1):\r\n self.grid[x][y]=0\r\n self.grid[x+1][y]=0\r\n elif(o==0):\r\n self.grid[x][y]=0\r\n self.grid[x][y+1]=0\r\n def setsubmarine(self,x,y,o):\r\n if(y>4 and x>4):\r\n o=-1\r\n if(y>4 and x<4):\r\n o=1\r\n if(o==-1):\r\n self.grid[x][y]=4\r\n self.grid[x-1][y]=4\r\n self.grid[x-2][y]=4\r\n self.grid[x-3][y]=4\r\n if(o==1):\r\n self.grid[x][y]=4\r\n self.grid[x+1][y]=4\r\n self.grid[x+2][y]=4\r\n self.grid[x+3][y]=4\r\n elif(o==0):\r\n self.grid[x][y]=4\r\n self.grid[x][y+1]=4\r\n self.grid[x][y+2]=4\r\n self.grid[x][y+3]=4\r\n def printboard(self,com=False):\r\n print()\r\n num=-1\r\n nm=-1\r\n for i in range(9):\r\n for j in range(9):\r\n if(i==0 or j==0):\r\n z=\"0\"\r\n if(i==0):\r\n num=str(num)\r\n z=num\r\n num=int(num)\r\n num=num+1\r\n if(j==0):\r\n nm=str(nm)\r\n z=nm\r\n nm=int(nm)\r\n nm=nm+1 \r\n if(z!='-1'):\r\n pygame.draw.rect(Screen, white,((30*i)+20,(30*j)+20,28,28),1)\r\n Display_message(z, (30*i)+32, (30*j)+37, 20, 'Calibri')\r\n pygame.draw.rect(Screen, white,((30*i)+570,(30*j)+370,28,28),1)\r\n Display_message(z, (30*i)+582, (30*j)+387, 20, 'Calibri')\r\n pygame.display.update()\r\n for i in range(len(self.grid)):\r\n for j in range(len(self.grid[i])):\r\n if(com==False):\r\n if(self.grid[i][j]==1):\r\n '''print(\"*\",end=\" \")'''\r\n pygame.draw.rect(Screen, white,((30*i)+50,(30*j)+50,28,28,),1)\r\n pygame.display.update()\r\n elif(self.grid[i][j]==2):\r\n pygame.draw.rect(Screen, red,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n '''print(\"S\",end=\" \")'''\r\n pygame.draw.rect(Screen,green,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==3):\r\n pygame.draw.rect(Screen,blue,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(com==True):\r\n if(self.grid[i][j]==1):\r\n '''print(\"*\",end=\" \")'''\r\n pygame.draw.rect(Screen, white,((30*i)+600,(30*j)+400,28,28),1)\r\n pygame.display.update()\r\n elif(self.grid[i][j]==2):\r\n pygame.draw.rect(Screen, red,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n '''print(\"S\",end=\" \")'''\r\n pygame.draw.rect(Screen, green,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==3):\r\n pygame.draw.rect(Screen, blue,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n print()\r\n \r\n def attack(self,x,y,com=False):\r\n count_ships=0\r\n if(self.grid[x][y]==3 and com ==True):\r\n x=random.choice([1,2,3,4,5,6,7,0])\r\n y=random.choice([1,2,3,4,5,6,7,0])\r\n if(self.grid[x][y]==0 or self.grid[x][y]==4):\r\n self.grid[x][y]=2\r\n elif(self.grid[x][y]==1):\r\n self.grid[x][y]=3\r\n for i in range(len(self.grid)):\r\n for j in range(len(self.grid[i])):\r\n if(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n count_ships=count_ships+1\r\n if(count_ships==0 and com==True):\r\n Display_message(\"Player WON !\", 526, 100, 50, 'Serif')\r\n if(count_ships==0 and com==False):\r\n Display_message(\"Player LOST !\", 526, 100, 50, 'Serif')\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"SyedAR-17/Battle-Ship","sub_path":"Game_Board.py","file_name":"Game_Board.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"72240118866","text":"#Exercise 7\r\n\r\nimport string\r\n\r\nall_char = string.printable\r\nall_num = string.digits\r\nall_special_char = string.punctuation\r\n\r\nall_things = [all_char, all_num, all_special_char]\r\n\r\nfile = open(\"jumble.txt\",\"r\")\r\n\r\nspace, tab, returns = 0,0,0\r\n\r\nfor line in file:\r\n for char in line:\r\n if char == \"\\n\":\r\n returns += 1\r\n elif char not in all_things and not char.isspace():\r\n tab += 1\r\n elif char == \" \":\r\n space += 1\r\n \r\n \r\nprint(\"No. spaces:\",space)\r\nprint(\"No. tabs:\", tab)\r\nprint(\"No. returns:\",returns)","repo_name":"baselhusam/The-Practice-of-Computing-Using-Python-Solved","sub_path":"Chapter 6/Problem 7/Problem 7.py","file_name":"Problem 7.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"26866655856","text":"import sys\nimport ROOT\n\nprint (\"Load cxx analyzers ... \",)\nROOT.gSystem.Load(\"libedm4hep\")\nROOT.gSystem.Load(\"libpodio\")\nROOT.gSystem.Load(\"libFCCAnalyses\")\nROOT.gSystem.Load(\"libFCCAnalysesFlavour\")\n\nROOT.gErrorIgnoreLevel = ROOT.kFatal\n_edm = ROOT.edm4hep.ReconstructedParticleData()\n_pod = ROOT.podio.ObjectID()\n_fcc = ROOT.dummyLoader\n_bs = ROOT.dummyLoaderFlavour\n\n\n\n\nprint ('edm4hep ',_edm)\nprint ('podio ',_pod)\nprint ('fccana ',_fcc)\n\n#\n#\tThis is used to process a file in which the Bs and the Bsbar are forced\n#\tto decay into Jpsi ( -> mu mu) + Phi ( -> K K )\n#\tWe reconstruct the secondary vertex from the 2 muon and 2 kaon tracks.\n# The example also shows how to retrieve the MC and reco'ed Bs legs,\n# as well as the MC Bs, JP]psi and Phis, with their kinematics.\n#\n# Example file: \n# /eos/experiment/fcc/ee/examples/lowerTriangle/p8_ecm91GeV_Zbb_EvtGen_Bs2JpsiPhi_IDEAtrkCov.root\n# \tNote: these events were generated at (0,0,0), i.e.no smearing of the\n#\tprimary vertex.\n#\n\nclass analysis():\n\n #__________________________________________________________\n def __init__(self, inputlist, outname, ncpu):\n self.outname = outname\n if \".root\" not in outname:\n self.outname+=\".root\"\n\n #ROOT.ROOT.EnableImplicitMT(ncpu)\n\n self.df = ROOT.RDataFrame(\"events\", inputlist)\n print (\" done\")\n #__________________________________________________________\n def run(self):\n #df2 = (self.df.Range(1000)\t# to test over 1000 events only\n df2 = (self.df\n\n .Alias(\"Particle1\", \"Particle#1.index\")\n .Alias(\"MCRecoAssociations0\", \"MCRecoAssociations#0.index\")\n .Alias(\"MCRecoAssociations1\", \"MCRecoAssociations#1.index\")\n\n\n # MC event primary vertex\n .Define(\"MC_PrimaryVertex\", \"MCParticle::get_EventPrimaryVertex(21)( Particle )\" )\n\n # number of tracks in the event\n .Define(\"ntracks\",\"ReconstructedParticle2Track::getTK_n(EFlowTrack_1)\")\n\n # Retrieve the decay vertex of all MC particles\n #.Define(\"MC_DecayVertices\", \"MCParticle::get_endPoint( Particle, Particle1)\" )\n\n\n # MC indices of the decay Bs (PDG = 531) -> mu+ (PDG = -13) mu- (PDG = 13) K+ (PDG = 321) K- (PDG = -321)\n # Retrieves a vector of int's which correspond to indices in the Particle block\n # vector[0] = the mother, and then the daughters in the order specified, i.e. here\n # [1] = the mu+, [2] = the mu-, [3] = the K+, [4] = the K-\n # The first boolean below: when set to true, the dsughters specified in the list are looked\n # for among the final, stable particles that come out from the mother, i.e. the decay tree is\n\t # explored recursively if needed.\n # The second boolean: when set to true, the charge conjugate decays are included too.\n # If the event contains more than one such decays,only the first one is kept.\n\t # get_indices_ExclusiveDecay looks for an exclusive decay: if a mother is found, that decays \n # into the particles specified in the list plus other particle(s), this decay is not selected.\n .Define(\"Bs2MuMuKK_indices\", \"MCParticle::get_indices_ExclusiveDecay( 531, {-13,13,321,-321}, true, true) ( Particle, Particle1)\" )\n\n # the MC Bs : the Bs is the first particle in the Bs2MuMuKK_indices vector\n .Define(\"Bs\", \"selMC_leg(0) ( Bs2MuMuKK_indices, Particle )\")\n\n # and the MC legs of the Bs : the mu+ is the second particle in the vector, etc.\n .Define(\"Muplus\", \" selMC_leg(1)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Muminus\", \" selMC_leg(2)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Kplus\", \" selMC_leg(3)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Kminus\", \" selMC_leg(4)( Bs2MuMuKK_indices, Particle )\")\n\n # Kinematics of the Bs legs (MC) :\n .Define(\"Muplus_theta\", \"MCParticle::get_theta( Muplus )\")\n .Define(\"Muplus_phi\", \"MCParticle::get_phi( Muplus )\")\n .Define(\"Muplus_e\", \"MCParticle::get_e( Muplus )\")\n .Define(\"Muminus_theta\", \"MCParticle::get_theta( Muminus )\")\n .Define(\"Muminus_phi\", \"MCParticle::get_phi( Muminus )\")\n .Define(\"Muminus_e\", \"MCParticle::get_e( Muminus )\")\n .Define(\"Kplus_theta\", \"MCParticle::get_theta( Kplus )\")\n .Define(\"Kplus_phi\", \"MCParticle::get_phi( Kplus )\")\n .Define(\"Kplus_e\", \"MCParticle::get_e( Kplus )\")\n .Define(\"Kminus_theta\", \"MCParticle::get_theta( Kminus )\")\n .Define(\"Kminus_phi\", \"MCParticle::get_phi( Kminus )\")\n .Define(\"Kminus_e\", \"MCParticle::get_e( Kminus )\")\n\n\t # Kinematics of the mother Bs (MC)\n .Define(\"Bs_theta\", \"MCParticle::get_theta( Bs )\")\n .Define(\"Bs_phi\", \"MCParticle::get_phi( Bs )\")\n .Define(\"Bs_e\", \"MCParticle::get_e( Bs )\")\n\n \n # Decay vertex of the Bs (MC)\n # Careful with getMC_decayVertex: if Bs -> Bsbar, this returns the prod vertex of the Bsbar !\n #.Define(\"BsDecayVertex\", \"getMC_decayVertex(531, false)( Particle, Particle1)\")\n # Hence, use instead a custom method in Bs2JPsiPhi :\n .Define(\"BsMCDecayVertex\", \"BsMCDecayVertex( Bs2MuMuKK_indices, Particle )\")\n\n # Returns the RecoParticles associated with the four Bs decay products.\n # The size of this collection is always 4 provided that Bs2MuMuKK_indices is not empty,\n # possibly including \"dummy\" particles in case one of the legs did not make a RecoParticle.\n # This is done on purpose, in order to maintain the mapping with the indices - i.e. the 1st particle in \n # the list BsRecoParticles is the mu+, then the mu-, etc.\n # (selRP_matched_to_list ignores the unstable MC particles that are in the input list of indices\n \t # hence the mother particle, which is the [0] element of the Bs2MuMuKK_indices vector).\n .Define(\"BsRecoParticles\", \"ReconstructedParticle2MC::selRP_matched_to_list( Bs2MuMuKK_indices, MCRecoAssociations0,MCRecoAssociations1,ReconstructedParticles,Particle)\")\n\n # the corresponding tracks - here, dummy particles, if any, are removed, i.e. one may have < 4 tracks,\n # e.g. if one muon or kaon was emitted outside of the acceptance\n .Define(\"BsTracks\", \"ReconstructedParticle2Track::getRP2TRK( BsRecoParticles, EFlowTrack_1)\" )\n\n # number of tracks in this BsTracks collection ( = the #tracks used to reconstruct the Bs vertex)\n .Define(\"n_BsTracks\", \"ReconstructedParticle2Track::getTK_n( BsTracks )\")\n\n # Now we reconstruct the Bs decay vertex using the reco'ed tracks.\n # First the full object, of type Vertexing::FCCAnalysesVertex\n .Define(\"BsVertexObject\", \"VertexFitterSimple::VertexFitter_Tk( 2, BsTracks)\" )\n # from which we extract the edm4hep::VertexData object, which contains the vertex positiob in mm\n .Define(\"BsVertex\", \"VertexingUtils::get_VertexData( BsVertexObject )\")\n\n\n\t # We may want to look at the reco'ed Bs legs: in the BsRecoParticles vector, \n # the first particle (vector[0]) is the mu+, etc :\n .Define(\"RecoMuplus\", \"selRP_leg(0)( BsRecoParticles )\")\n .Define(\"RecoMuminus\", \"selRP_leg(1)( BsRecoParticles )\")\n .Define(\"RecoKplus\", \"selRP_leg(2)( BsRecoParticles )\")\n .Define(\"RecoKminus\", \"selRP_leg(3)( BsRecoParticles )\")\n # and their kinematics :\n .Define(\"RecoMuplus_theta\", \"ReconstructedParticle::get_theta( RecoMuplus )\")\n .Define(\"RecoMuplus_phi\", \"ReconstructedParticle::get_phi( RecoMuplus )\")\n .Define(\"RecoMuplus_e\", \"ReconstructedParticle::get_e( RecoMuplus )\")\n .Define(\"RecoMuminus_theta\", \"ReconstructedParticle::get_theta( RecoMuminus )\")\n .Define(\"RecoMuminus_phi\", \"ReconstructedParticle::get_phi( RecoMuminus )\")\n .Define(\"RecoMuminus_e\", \"ReconstructedParticle::get_e( RecoMuminus )\")\n .Define(\"RecoKplus_theta\", \"ReconstructedParticle::get_theta( RecoKplus )\")\n .Define(\"RecoKplus_phi\", \"ReconstructedParticle::get_phi( RecoKplus )\")\n .Define(\"RecoKplus_e\", \"ReconstructedParticle::get_e( RecoKplus )\")\n .Define(\"RecoKminus_theta\", \"ReconstructedParticle::get_theta( RecoKminus )\")\n .Define(\"RecoKminus_phi\", \"ReconstructedParticle::get_phi( RecoKminus )\")\n .Define(\"RecoKminus_e\", \"ReconstructedParticle::get_e( RecoKminus )\")\n\n\t # Looks at the angular separation (3D angles) between the Bs daughters: among\n # all the pairs of particles in BsRecoParticles, retrieve the minimal angular distance,\n # the maximal distance, and the average distance\n .Define(\"deltaAlpha_max\",\"ReconstructedParticle::angular_separationBuilder(0)( BsRecoParticles )\")\n .Define(\"deltaAlpha_min\",\"ReconstructedParticle::angular_separationBuilder(1)( BsRecoParticles )\")\n .Define(\"deltaAlpha_ave\",\"ReconstructedParticle::angular_separationBuilder(2)( BsRecoParticles )\")\n\n\t # To look at the angular separation between the MC Jpsi and the Phi :\n\n\t # First retrieve the indices of the JPsi and the phi :\n # MC indices of the decay Bs (PDG = 531) -> JPsi (PDG = 443) Phi (PDG = 333)\n # Retrieves a vector of int's which correspond to indices in the Particle block\n # vector[0] = the mother, and then the daughters in the order specified, i.e. here\n # [1] = the Jpsi, [2] = the phi\n # The first boolean below (here set to false) means that we look for a JPsi and a Phi\n # among the direct daughters of the mother, i.e. the decay tree is not explored down\n # to the final, stable particles.\n # The second boolean (true) means that the charge conjugate decay isincluded too.\n # If the event contains more than one such decays,only the first one is kept.\n # get_indices_ExclusiveDecay looks for an exclusive decay: if a mother is found, that decays \n # into the particles specified in the list plus other particle(s), this decay is not selected.\n .Define(\"Bs2JPsiPhi_indices\", \"MCParticle::get_indices_ExclusiveDecay( 531, {443,333}, false, true) ( Particle, Particle1)\" )\n\n # This extracts the MC Jpsi. In list of indices determined above, Bs2JPsiPhi_indices,\n # 1 is the position of the Jpsi in the Bs2JPsiPhi_indices vector.\n .Define(\"JPsi\", \"selMC_leg( 1) ( Bs2JPsiPhi_indices , Particle )\")\n # Idem: extract the MC Phi. 2 is the position of the Phi in the Bs2JPsiPhi_indices vector.\n .Define(\"Phi\", \"selMC_leg( 2) ( Bs2JPsiPhi_indices , Particle )\")\n\n # From these two MC particles, determine their angular separation\n .Define(\"Angle_JpsiPhi\", \"MCParticle::AngleBetweenTwoMCParticles( JPsi, Phi)\" )\n\n\n\n # the reco'ed legs, with the momenta at the Bs decay vertex - instead of at their\n\t # point of dca\n .Define(\"RecoMuplus_atVertex\", \"selRP_leg_atVertex(0) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoMuplus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoMuplus_atVertex )\")\n .Define(\"RecoMuplus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoMuplus_atVertex )\")\n .Define(\"RecoMuminus_atVertex\", \"selRP_leg_atVertex(1) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoMuminus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoMuminus_atVertex )\")\n .Define(\"RecoMuminus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoMuminus_atVertex )\")\n .Define(\"RecoKplus_atVertex\", \"selRP_leg_atVertex(2) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoKplus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoKplus_atVertex )\")\n .Define(\"RecoKplus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoKplus_atVertex )\")\n .Define(\"RecoKminus_atVertex\", \"selRP_leg_atVertex(3) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoKminus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoKminus_atVertex )\")\n .Define(\"RecoKminus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoKminus_atVertex )\")\n\n # not so useful here, but for completeness : Bs to JPsi decay ?\n # Returns booleans. e.g. the first one means that the event contains a Bs that decayed to a JPsi (443) + X, \n # not counting the cases where Bs -> Bsbar -> JPsi + X\n .Define(\"Bsdecay\", \"MCParticle::get_decay(531, 443, false)(Particle, Particle1)\")\n .Define(\"Bsbardecay\", \"MCParticle::get_decay(-531, 443, false)(Particle, Particle1)\")\n\n\t # to get the distribution of the d0 of the mu+ track\n\t .Define(\"RecoMuplus_d0\", \"ReconstructedParticle2Track::getRP2TRK_D0( RecoMuplus, EFlowTrack_1) \")\n\t .Define(\"RecoMuplus_z0\", \"ReconstructedParticle2Track::getRP2TRK_Z0( RecoMuplus, EFlowTrack_1) \")\n\n\n )\n\n\n # select branches for output file\n branchList = ROOT.vector('string')()\n for branchName in [\n \"MC_PrimaryVertex\",\n \"ntracks\",\n #\"Bs2JPsiPhi_indices\",\n #\"Bs2MuMuKK_indices\",\n #\"Muplus\",\n #\"Muminus\",\n #\"Kplus\",\n #\"Kminus\",\n\n\t # Kinematics of the MC particles:\n \"Muplus_theta\",\n \"Muplus_phi\",\n \"Muplus_e\",\n \"Muminus_theta\",\n \"Muminus_phi\",\n \"Muminus_e\",\n \"Kplus_theta\",\n \"Kplus_phi\",\n \"Kplus_e\",\n \"Kminus_theta\",\n \"Kminus_phi\",\n \"Kminus_e\",\n \"Bs_theta\",\n \"Bs_phi\",\n \"Bs_e\",\n\n \"Bsdecay\",\n \"Bsbardecay\",\n\n # MC Bs decay vertex :\n \"BsMCDecayVertex\",\n\t\t# Reco'ed Bs vertex :\n \"BsVertex\",\n #\"BsTracks\",\n \"n_BsTracks\",\n\n \"deltaAlpha_max\",\n \"deltaAlpha_min\",\n \"deltaAlpha_ave\",\n #\"BsRecoParticles\",\n\n\t # Kinematics of the Reco'ed particles:\n \"RecoMuplus_theta\",\n \"RecoMuplus_phi\",\n \"RecoMuplus_e\",\n \"RecoMuminus_theta\",\n \"RecoMuminus_phi\",\n \"RecoMuminus_e\",\n \"RecoKplus_theta\",\n \"RecoKplus_phi\",\n \"RecoKplus_e\",\n \"RecoKminus_theta\",\n \"RecoKminus_phi\",\n \"RecoKminus_e\",\n\n \"RecoMuplus_atVertex_theta\",\n \"RecoMuplus_atVertex_phi\",\n \"RecoMuminus_atVertex_theta\",\n \"RecoMuminus_atVertex_phi\",\n \"RecoKplus_atVertex_theta\",\n \"RecoKplus_atVertex_phi\",\n \"RecoKminus_atVertex_theta\",\n \"RecoKminus_atVertex_phi\",\n\n \"Angle_JpsiPhi\",\n\n\t\t\"RecoMuplus_d0\",\n\t\t\"RecoMuplus_z0\"\n\n\n ]:\n branchList.push_back(branchName)\n df2.Snapshot(\"events\", self.outname, branchList)\n\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)==1:\n print (\"usage:\")\n print (\"python \",sys.argv[0],\" file.root\")\n sys.exit(3)\n infile = sys.argv[1]\n #outDir = 'FCCee/'+sys.argv[0].split('/')[1]+'/'\n outDir = './'\n import os\n os.system(\"mkdir -p {}\".format(outDir))\n outfile = outDir+infile.split('/')[-1]\n ncpus = 0\n analysis = analysis(infile, outfile, ncpus)\n analysis.run()\n\n tf = ROOT.TFile(infile)\n entries = tf.events.GetEntries()\n p = ROOT.TParameter(int)( \"eventsProcessed\", entries)\n outf=ROOT.TFile(outfile,\"UPDATE\")\n p.Write()\n","repo_name":"HEP-FCC/FCCeePhysicsPerformance","sub_path":"case-studies/flavour/VertexExamples/analysis_Bs2JPsiPhi.py","file_name":"analysis_Bs2JPsiPhi.py","file_ext":"py","file_size_in_byte":16612,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"}
+{"seq_id":"741921927","text":"import cv2\nfrom cvzone.HandTrackingModule import HandDetector\n\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 1280)\ncap.set(4, 720)\ndetector = HandDetector(detectionCon=0.8, maxHands=4)\n\ncolorR = (255, 255, 255) # 方框原始颜色\ncx, cy = 100, 100 # 方框原始的中心位置qq\nw, h = 200, 200 # 方框的宽(weight)、高(height)\n\nwhile True:\n colorR = (255, 255, 255)\n\n success, img = cap.read()\n img = cv2.flip(img, 1) # 0-垂直翻转,1-水平翻转\n hands, img = detector.findHands(img, flipType=False)\n\n if hands:\n\n lmList = hands[0]['lmList']\n\n distance, _, _ = detector.findDistance(lmList[8], lmList[12], img) # 食指和中指间的距离\n\n cursor = lmList[8] # 食指的位置信息\n if distance < 90:\n if cx - w // 2 < cursor[0] < cx + w // 2 and cy - h // 2 < cursor[1] < cy + h // 2:\n colorR = (0, 255, 0)\n cx, cy = cursor\n\n # 图像框\n cv2.rectangle(img, (cx - w // 2, cy - h // 2), (cx + w // 2, cy + h // 2),\n color=colorR, thickness=cv2.FILLED)\n\n cv2.imshow('Image', img)\n cv2.waitKey(1)\n\n if cv2.waitKey(5) & 0xFF == ord('q'):\n break\n","repo_name":"dengfa02/CV_rookie","sub_path":"virtual_move.py","file_name":"virtual_move.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"12931597817","text":"with open ('Day3/day_3') as f:\r\n data = f.read().splitlines()\r\n\r\n# Part 1\r\n\r\ncommon_values = [0,0,0,0,0,0,0,0,0,0,0,0]\r\nfor number in data:\r\n for i, bit in enumerate(number):\r\n common_values[i] += int(bit)\r\n\r\ngamma_rate = [\"1\" if (c > len(data)//2) else \"0\" for c in common_values ]\r\nepsilon_rate = []\r\nfor b in gamma_rate:\r\n epsilon_rate.append( \"1\" if b == \"0\" else \"0\" )\r\n\r\ngamma_rate = int(\"\".join(gamma_rate), 2)\r\nepsilon_rate = int(\"\".join(epsilon_rate), 2)\r\nprint(gamma_rate ,epsilon_rate, gamma_rate * epsilon_rate)\r\n\r\n# Part 2\r\n\r\ndef find_rating(data, pos = 0, bit_criteria = \"1\"):\r\n if len(data) == 1:\r\n return data[0]\r\n\r\n common_values = [0,0,0,0,0,0,0,0,0,0,0,0]\r\n for number in data:\r\n for i, bit in enumerate(number):\r\n common_values[i] += int(bit)\r\n\r\n if bit_criteria == \"1\":\r\n most_common = [1 if (c >= len(data)/2) else 0 for c in common_values]\r\n if bit_criteria == \"0\":\r\n most_common = [0 if (c >= len(data)/2) else 1 for c in common_values]\r\n\r\n new_list = []\r\n for v in data:\r\n if int(v[pos]) == most_common[pos]:\r\n new_list.append(v)\r\n\r\n return find_rating(new_list, pos + 1, bit_criteria)\r\n\r\noxygen_generator_rating = int(find_rating(data, 0, \"1\"), 2)\r\nCO2_scrubber_rating = int(find_rating(data, 0, \"0\"), 2)\r\n\r\nprint(oxygen_generator_rating, CO2_scrubber_rating, oxygen_generator_rating * CO2_scrubber_rating)","repo_name":"Wolfy7/AdventOfCode2021","sub_path":"Day3/day_3.py","file_name":"day_3.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"28183663720","text":"from game.gui_components.objects.game_object import Game_Object\nimport random\nfrom constants import(\n SCREEN_WIDTH,\n SCREEN_HEIGHT\n)\nclass Fog(Game_Object):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.image = \"FogT4\"\n self.rotation = 90 * random.randint(0,3)\n\n def draw(self, centered_object):\n\n x = self.x - centered_object.x + SCREEN_WIDTH/ 2\n y = self.y - centered_object.y + SCREEN_HEIGHT/ 2\n self.sprite.draw(x, y)\n ","repo_name":"dsjensen19/Final-Project","sub_path":"game/gui_components/objects/fog.py","file_name":"fog.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"27962341276","text":"from math import log\n\n\ndef get_bloom_filters_parameters(rdd, false_positive_ratio):\n \"\"\"\n Given a rdd of films and ratings return a dictionary of parameters n, m ,k for every rating.\n Input: rdd of filmId, rating.\n Output: Dictionary of {rating: [n, m, k]} sorted for rating.\n \"\"\"\n rdd = rdd.map(lambda x: [round(float(x[1])), x[0]]) # map the rdd in the form (rating, film)\n rdd = rdd.map(lambda x: (x[0], 1))\n rdd = rdd.reduceByKey(lambda x, y: x+y)\n rdd = rdd.map(lambda x: get_parameters(x, false_positive_ratio)) # map parameters to every rating\n rdd = rdd.sortByKey() # sort ratings\n rdd.saveAsTextFile(f\"./Data/Output/Parameters\")\n bloom_parameters = rdd.collect()\n bloom_parameters = {parameter[0]: parameter[1] for parameter in bloom_parameters} # transform the list of lists\n # in a dictionary\n return bloom_parameters\n\n\ndef get_parameters(x, false_positive_ratio):\n \"\"\"\n Return parameters n,m and k for the Bloom filter construction.\n Input: (rating, list[filmId])\n Output: (rating, [n, m, k])\n \"\"\"\n n = x[1]\n m = round(-((n * log(false_positive_ratio)) / (log(2)) ** 2))\n k = round((m / n) * log(2))\n return x[0], [n, m, k]\n\n","repo_name":"EhiSuper/BloomFilterInSpark","sub_path":"src/BloomFiltersParameters.py","file_name":"BloomFiltersParameters.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"6266584811","text":"from re import compile\nfrom chars import *\n\nKEYWORDS = set('''\n if elif else\n from import as\n for in while\n True False None\n try except finally\n async def = := lambda del\n await return yield raise\n + - * ** / % @ & | ^\n += -= *= **= /= %= @= &= |= ^=\n == > < is\n != <= >=\n and or not\n ( ) [ ] { } : . ,\n'''.strip().split())\n\ndef _fullmatch(regex):\n return compile(regex).fullmatch\n\ndecimal = _fullmatch('[+-]?(0|[1-9](_?[0-9])*)(\\.([0-9](_?[0-9])*)?)?(e[0-9](_?[0-9])*)?')\noctal = _fullmatch('[+-]?0o[_0-7]+')\noctal = _fullmatch('[+-]?0x[_0-9a-fA-F]+')\nbinary = _fullmatch('[+-]?0b[_01]+')\nident = _fullmatch('[_a-zA-Z][_a-zA-Z0-9]*')\n\ndef number(w):\n return decimal(w) or octal(w) or binary(w)\n\ndef quoted(w) -> 'end_quote' or None:\n if w[0] in QUOTE_CHARS:\n return w[0]\n if len(w) > 1 and w[0] in ('f', 'r') and w[1] in QUOTE_CHARS:\n return w[1]\n\ndef balanced_braces(w):\n imbalance = 0\n found = False\n for c in w:\n if c == '{':\n imbalance += 1\n elif c == '}':\n imbalance -= 1\n found = True\n return found and not imbalance\n","repo_name":"qguv/pysh","sub_path":"patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"10377507308","text":"#: 1일 될때까지 예제의 시간복잡도, 공간 복잡도를 분석한다.\nimport time\nimport os \nimport psutil \nprocess=psutil.Process(os.getpid())\nstart_time=time.time()\n\n\n#n,k=map(int,input('두 수를 공백으로 분리하여 입력하시오>').split()) # N, K을 공백을 기준으로 구분하여 입력 받기\nn=25\nk=4\nresult = 0\nwhile True: # 반복 루프 시작\n target =(n//k)*k \n# K로 나누어 지는 수를 구함, 예) 25 나누기 4 곱하기 4 = 처음 24\n result += (n - target) # N이 K로 나누어 떨어지는 수가 될 때까지 빼기, 1\n n = target # 25를 24로 수정\n if n < k: # N이 K보다 작을 때 (더 이상 나눌 수 없을 때) 반복문 탈출\n break\n result += 1 # 횟수 증가\n n //= k # K로 나누기, n = 6\nresult += (n - 1) # 마지막으로 남은 수에 대하여 1씩 빼기, 6, 5 --> 4일때까지 2번 추가 빼기, 마지막 1번 나누고\nprint(\"1이 도달하기 까지 연산 횟수 :\", result) # 총 5번 횟수 연산\n\n\nend_time=time.time() \nprint(\"time:\",format(end_time-start_time,'.10f'))\nprint('MB bytes:',process.memory_info().rss/(1024.0*1024.0))\n","repo_name":"heosujinnn/py_algorithm","sub_path":"DAY3/1_PLEASE_EX1.py","file_name":"1_PLEASE_EX1.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"35335682385","text":"#!/usr/bin/env python3\n'''\nTest Module which Checks if all files that are pushed to gitlab\nare encrypted the right way\n'''\nimport os\nimport unittest\nimport re\nimport gnupg\nimport yaml\nfrom pprint import PrettyPrinter\nfrom multivault.utilities import util_crypt\nfrom multivault.base import config\nfrom multivault.utilities import util_ldap\n\n\nTESTING_FILE = 'multivault-gitlabtest.yml'\nANSIBLE = None\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nROOT_PATH = os.path.join(DIR_PATH, \"..\", \"..\")\nCONF_PATH = os.path.join(DIR_PATH, TESTING_FILE)\nconfig.init(conf_path=CONF_PATH)\nANSIBLE_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"all.yml\")\nINVENTORY_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"inventory.ini\")\nKEY_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"temp\", \"keys\")\nGNUPG_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"temp\", \"keyring\")\nGNUPG = gnupg.GPG(gnupghome=GNUPG_PATH)\nwith open(ANSIBLE_PATH, \"r\") as ANSIBLE_PT:\n ANSIBLE = yaml.load(ANSIBLE_PT)\nPATTERN = re.compile(\n r'^(?Proles/(?P.*?)/gpg/(?P.*?\\.gpg))$', re.MULTILINE)\nPATTERN2 = re.compile(r'^\\s*(.*?)\\.server\\.selfnet\\.de$', re.MULTILINE)\nPATTERN3 = re.compile(r'^:pubkey.*?keyid (.*?)$', re.MULTILINE)\n\n#config.LDAP_SSH_HOP = 'login'\n\n\nclass TestChangedFiles(unittest.TestCase):\n '''\n Test Class for the gpg check\n '''\n\n def test_changed_files(self):\n '''\n Gets the Information from the changed file and from ldap and the gpg repo\n Validates if a file is encrypted for the right users\n '''\n config.init(conf_path=CONF_PATH)\n util_crypt.update_git_repo(config.GPG_REPO, path=KEY_PATH)\n files = construct_gpg_information(\"master\")\n printer = PrettyPrinter(indent=2)\n for file_info in files:\n print(file_info['filename'])\n print(\n \"+--- Encrypted for {}:\".format(file_info['encrypted_for']))\n users = extract_subkey_for_every_user(file_info['users'])\n if users:\n for user, data in users.items():\n for key in data.keys():\n if key in file_info['encrypted_for']:\n users[user][key]['encrypted_for'] = True\n else:\n pass\n printer.pprint(users)\n for user, data in users.items():\n if user == 'tobiass' or user == 'sebastiann' or user == 'jo':\n self.assertFalse(check_encrypted_for_user(data))\n else:\n self.assertTrue(check_encrypted_for_user(data))\n else:\n pass\n\n\ndef check_encrypted_for_user(key_data):\n '''\n checks for an encryption with one key of an user:\n '''\n for key in key_data.keys():\n if key_data[key]['encrypted_for']:\n return True\n else:\n pass\n return False\n\n\ndef extract_subkey_for_every_user(key_information):\n '''\n Extracts the information out of the\n gnupg.GPG.scan_keys() inside of the file_info object\n '''\n subkeys = {}\n if not key_information:\n return None\n for user, data in key_information.items():\n subkeys[user] = {}\n if data:\n data = data[0]\n for subkey, expire_date in data['subkeys']:\n subkeys[user][subkey] = {}\n subkeys[user][subkey]['expire_date'] = expire_date\n subkeys[user][subkey]['encrypted_for'] = False\n else:\n data = None\n return subkeys\n\n\ndef get_hosts(group_name):\n '''\n Get the hosts out of the\n ansible inventori.ini file\n '''\n return extract_hosts(\n util_crypt.run_cmd(\n [\"ansible\",\n group_name,\n \"-i\",\n INVENTORY_PATH,\n \"--list-hosts\"]))\n\n\ndef get_file_info(file_path):\n '''\n extracts the information out of the gpg file\n given py\n @param file_path\n '''\n return extract_keys(util_crypt.run_cmd([\"gpg\", \"--list-packets\", \"--list-only\", file_path]))\n\n\ndef changed_files(base, ahead):\n '''\n returns all files that differ from base branch\n @param base the base branch\n @param ahead the actual branch\n @return list_of_files by function @method extract()\n '''\n return extract(util_crypt.run_cmd([\"git\", \"diff\", \"--name-only\", base, ahead, \"--\"]))\n\n\ndef all_files(branch):\n '''\n returns all files tracked inside the given branch\n @param branch the branch to list the files of\n @return list_of_all_files @method extract()\n '''\n return extract(util_crypt.run_cmd([\"git\", \"ls-tree\", \"-r\", \"--name-only\", branch]))\n\n\ndef extract(output):\n '''\n Uses regex to extract files from cli\n should be used only in a method _files\n @param output output of @method all_files or @method changed_files\n @return list_of_extracted_files\n '''\n return [m.groupdict() for m in PATTERN.finditer(output)]\n\n\ndef extract_hosts(output):\n '''\n extracts the hosts from cli output of @method get_hosts(...)\n @param output of a subprocess call\n @return list_of_matching_hostnames\n '''\n if '[WARNING]:' in output:\n return None\n return PATTERN2.findall(output)\n\n\ndef extract_keys(output):\n '''\n extract the keys from the cli output of @method get_file_info()\n @param output output of cli\n @return list_of_keys_encrypted_for\n '''\n return PATTERN3.findall(output)\n\n\ndef construct_file_host_role_mapping(files):\n '''\n Reads the all.yml file from ansible and\n substitutes the groups to hostname_lists\n @param files gpg files with extracted role\n @return dict dict with files and their hostnames\n '''\n for playbook in ANSIBLE[1:]:\n hosts = playbook['hosts']\n roles = playbook['roles']\n for fil in files:\n if fil['role'] in roles:\n fil['hosts'] = get_hosts(hosts)\n return files\n\n\ndef construct_gpg_information(base, ahead=\"HEAD\", whole=False):\n '''\n Merges all methods from above to an big\n dictionary\n @param base base branch\n @param ahead actual branch defaults to current HEAD of branch\n @param whole if set to true the whole indexed files are checked\n @return files_and_hosts big dictionary with much information about the files\n '''\n if whole:\n changed = all_files(base)\n else:\n if not ahead:\n return None\n changed = changed_files(base, ahead)\n files_and_hosts = construct_file_host_role_mapping(changed)\n for file_meta in files_and_hosts:\n if 'hosts' in file_meta:\n users = util_ldap.get_authorized(file_meta['hosts'])\n else:\n users = None\n file_meta['path'] = file_meta['path'].split('/')\n path = ROOT_PATH\n for part in file_meta['path']:\n path = os.path.join(path, part)\n file_meta['encrypted_for'] = get_file_info(path)\n gpg_mapping = {}\n if users:\n for user,_ in users:\n gpg_key_file = os.path.join(KEY_PATH, user + \".gpg\")\n if os.path.exists(gpg_key_file):\n gpg_mapping[user] = GNUPG.scan_keys(gpg_key_file)\n else:\n gpg_mapping[user] = None\n file_meta['users'] = gpg_mapping\n else:\n file_meta['users'] = None\n return files_and_hosts\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Selfnet/multivault","sub_path":"tests/test_role_changes.py","file_name":"test_role_changes.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"}
+{"seq_id":"35635639052","text":"#!/usr/bin/python\n\nimport time\nimport RPi.GPIO as GPIO \nfrom RFM69 import Radio, FREQ_433MHZ\n\nfrequency = 434000000 \nmyEncryptionKey = \"cansxINDA\"\nnumber = 0\n\nprint (\"Starting reception program\")\n\ntry:\n with Radio(FREQ_433MHZ, nodeID=2, networkID=100, isHighPower=True, verbose=False, interruptPin=18, resetPin=22, spiDevice=0, autoAcknowledge=False, encryptionKey=myEncryptionKey) as radio, open('database.csv', 'a+') as output:\n print (\"Starting receiving data...\")\n \n radio.calibrate_radio()\n radio.set_power_level(100)\n radio.set_frequency_in_Hz(frequency)\n \n while True:\n number += 1\n \n packet = radio.get_packet(timeout=1)\n \n if packet is not None:\n datas = packet.to_dict()\n print(\"Message %s, RSSI %s= %s\" % (str(number), datas['rssi'], packet.data_string))\n print(\"%s,%s,%s\" % (str(number), datas['rssi'], packet.data_string), file=output)\nfinally:\n GPIO.cleanup()\n","repo_name":"leopard3l/cansx","sub_path":"receive_data.py","file_name":"receive_data.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"26525305679","text":"# http://hamukazu.com/2014/09/26/scipy-sparse-basics/\nfrom scipy.sparse import lil_matrix, csr_matrix\n\n# 疎行列aを用意する(3x3のすべて0の行列)\n# return \n# a = lil_matrix([3, 3])\na = lil_matrix((3, 3)) # 引数がlistだと値をセットしたときエラーが出る、tupleを渡す\n\n# 非ゼロ要素を設定する\na[0, 0]=1;a[0, 2]=2\n\n# lil_matrixをcsr_matrixに変換する\n# return \na = a.tocsr()\n\n# 疎行列bを用意する\nb = lil_matrix((3, 3))\n\n# 非ゼロようそを追加する\nb[1, 1] = 3; b[2, 0] = 4; b[2, 2] = 5\n\n# lil_matrixをcsr_matrixに変換する\nb = b.tocsr()\n\n# aとbの積を計算する\n# なぜdotか?https://qiita.com/masafumi_miya/items/640800cef813acf70caf\nc = a.dot(b)\n\n# # aとbの和を計算する\nd = a + b\n\nprint(d.toarray())\n# toarrayでvisualize できる, cast できる\n# return \n# print(c.toarray())\n","repo_name":"urasin/count_vectorizer","sub_path":"study_sparse_matrix.py","file_name":"study_sparse_matrix.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"37861597912","text":"from unittest import TestCase\nfrom functools import reduce\nfrom math import exp, ceil\nfrom bloomfilter import union, intersection, element, optimal_hash_runs, optimal_filter_bits, len as bf_len\nfrom bloomfilter import _DEFAULT_HASH_RUNS, _DEFAULT_FILTER_BITS\n\n\nclass TestUtils(TestCase):\n def test_optimal_hash_runs(self):\n self.assertEqual(optimal_hash_runs(2000, 16384), 6)\n self.assertEqual(optimal_hash_runs(200, 4096), 14)\n self.assertEqual(optimal_hash_runs(32, 8096), 175)\n\n def test_optimal_filter_bits(self):\n self.assertEqual(optimal_filter_bits(1000, 0.01), 9586)\n self.assertEqual(optimal_filter_bits(100, 0.01), 959)\n\n\nclass TestBloomFilter(TestCase):\n def test_element_is_deterministic(self):\n e1 = element(b\"element\")\n e2 = element(b\"element\")\n\n self.assertEqual(e1, e2)\n\n def test_element_is_int(self):\n e = element(b\"element\")\n\n self.assertTrue(type(e) is int)\n\n def test_intersection_duplicates(self):\n e1 = element(b\"element 1\")\n\n self.assertEqual(intersection(e1, e1),\n e1)\n\n def test_intersection_unions(self):\n e1 = element(b\"element 1\")\n e2 = element(b\"element 2\")\n e3 = element(b\"element 3\")\n\n self.assertEqual(intersection(union(e1, e2, e3),\n union(e1, e2)),\n union(e1, e2))\n\n def test_union_nests(self):\n e1 = element(b\"element 1\")\n e2 = element(b\"element 2\")\n e3 = element(b\"element 3\")\n\n self.assertEqual(union(e1, e2, e3),\n union(e1,\n union(e2, e3)))\n\n def test_false_positive_rate(self, item_count=1000, bloom_size=_DEFAULT_FILTER_BITS, bloom_hashes=_DEFAULT_HASH_RUNS):\n bloom = reduce(union,\n map(lambda c: element(c.to_bytes(c.bit_length(), byteorder='big'),\n bloom_size,\n bloom_hashes),\n range(item_count)))\n false_positive_probability = pow(1 - exp(-bloom_hashes / (bloom_size / item_count)), bloom_hashes)\n\n false_positives = 0\n for c in range(item_count * 2):\n new_c = c + item_count\n e = element(new_c.to_bytes(new_c.bit_length(), byteorder='big'),\n bloom_size,\n bloom_hashes)\n if intersection(bloom, e) == e:\n false_positives += 1\n false_positive_ratio = false_positives / (item_count * 2)\n max_deviation = false_positive_probability * 0.075\n self.assertAlmostEqual(false_positive_ratio, false_positive_probability, delta=max_deviation)\n\n def test_false_positive_rate_at_different_settings(self):\n for test_args in (dict(item_count=3000, bloom_size=_DEFAULT_FILTER_BITS * 2, bloom_hashes=_DEFAULT_HASH_RUNS // 2),\n dict(item_count=300, bloom_size=_DEFAULT_FILTER_BITS // 2, bloom_hashes=_DEFAULT_HASH_RUNS * 2),\n dict(item_count=1100, bloom_size=_DEFAULT_FILTER_BITS * 2, bloom_hashes=_DEFAULT_HASH_RUNS * 2),\n dict(item_count=1000, bloom_size=_DEFAULT_FILTER_BITS, bloom_hashes=optimal_hash_runs(2000, _DEFAULT_FILTER_BITS))\n ):\n with self.subTest(**test_args):\n self.test_false_positive_rate(**test_args)\n\n def test_len(self):\n bloom = 0b0\n for c in range(1, 1001):\n bloom = union(bloom, element(b\"element \" + str(c).encode()))\n with self.subTest(c=c):\n self.assertAlmostEqual(bf_len(bloom), c, delta=ceil(c * 0.025))\n","repo_name":"tommyvn/bloomfilter","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"17283918284","text":"import csv\n\n# Define the input and output file names\ninput_file = 'C:\\\\Users\\\\quchenfu\\\\Downloads\\\\ml-stuttering-events-dataset\\\\SEP-28k_labels_old.csv'\noutput_file = 'C:\\\\Users\\\\quchenfu\\\\Downloads\\\\ml-stuttering-events-dataset\\\\SEP-28k_labels.csv'\n\n# List of strings to remove from the lines\nstrings_to_remove = ['StutteringIsCool', 'StrongVoices']\n\n# Open the input and output CSV files\nwith open(input_file, mode='r', newline='') as infile, open(output_file, mode='w', newline='') as outfile:\n reader = csv.reader(infile)\n writer = csv.writer(outfile)\n\n # Iterate through each row in the input CSV\n for row in reader:\n # Check if any of the strings to remove are in the row\n if not any(s in ' '.join(row) for s in strings_to_remove):\n # If none of the strings are found, write the row to the output CSV\n writer.writerow(row)\n\nprint(f\"Filtered data written to {output_file}\")\n","repo_name":"QuchenFu/stuttering_detection","sub_path":"models/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"}
+{"seq_id":"4836112689","text":"from appium import webdriver\nimport time\n\n\ndesired_caps = {\n \"platformName\": \"Android\",\n \"platformVersion\": \"10\",\n \"automationName\": \"UiAutomator2\",\n \"appActivity\": \".MainActivity\",\n \"appPackage\": \"com.example.chapp_poc\",\n \"deviceName\": \"2NSDU20411004107\",\n \"newCommandTimeout\": 7200,\n \"noReset\": True\n}\n\ndriver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n\ntime.sleep(2)\n\nstart_time = time.time()\nbtn = driver.find_element_by_xpath(\"//*[contains(@text, '纽崔莱')]\")\nend_time = time.time()\n\nprint(\"cost time is: {}\".format(end_time - start_time))\n\nstart_time = time.time()\nbtn.click()\nend_time = time.time()\n\nprint(\"cost time is: {}\".format(end_time - start_time))\n\n","repo_name":"sayidkongtao/scripts","sub_path":"demo/python_appium_demo_chapp_poc.py","file_name":"python_appium_demo_chapp_poc.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"31897523509","text":"import os\nimport math\nimport qrcode\nimport random\nimport asyncio\nimport discord\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# import Discord UI and Constants\nfrom view import *\nfrom constants import *\n\nfrom replit import db\nfrom datetime import datetime\nfrom meme import meme_stealer\nfrom keep_alive import keep_alive\nfrom web_scrapping import find_jobs\nfrom mcv_notify import get_notifications\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\n\n# initial data to replit's database\nif \"interact\" not in db.keys():\n db[\"interact\"] = interact\n\nif \"responding\" not in db.keys():\n db[\"responding\"] = True\n\n\n# function to update word in database\ndef add_words(new_word):\n if \"interact\" in db.keys():\n interact = db[\"interact\"]\n interact.append(new_word)\n db[\"interact\"] = interact\n else:\n db[\"interact\"] = [new_word]\n\n\n# function to update word in database\ndef delete_word(index):\n interact = db[\"interact\"]\n\n if index.isdigit():\n if len(interact) > index:\n del interact[index]\n db[\"interact\"] = interact\n\n elif index in interact:\n interact.remove(index)\n db[\"interact\"] = interact\n\n\n# inform daily schedule\nasync def inform():\n await client.wait_until_ready()\n day_of_week = datetime.today().strftime('%A').lower()\n if (day_of_week != \"saturday\") and (day_of_week != \"sunday\"):\n embedVar = discord.Embed(\n title=\"Schedule\",\n url=CP_DOCS,\n description=schedule[day_of_week],\n color=discord.Color.blue()\n )\n embedVar.add_field(\n name=\"Days of week\",\n value=\"For \" + datetime.today().strftime('%A') +\n \" | [MCV](https://www.mycourseville.com/?q=courseville)\" +\n \" [Grader](https://nattee.net/grader)\",\n inline=False\n )\n\n # embedVar.set_author(name=\"\", icon_url=\"\")\n study_room_channel = client.get_channel(808174559529926666)\n Aqioz_id = os.environ['AQIOZ_ID']\n await study_room_channel.send(f\"มาเรียนว้อย {Aqioz_id}\")\n await study_room_channel.send(embed=embedVar)\n\n\n# Class Client\nclass DiscordClient(discord.Client):\n\n # introduce yourself\n async def on_ready(self):\n await client.wait_until_ready()\n await client.change_presence(activity=discord.Game(name=\"$help\"))\n print('We have logged in as {0.user}'.format(client))\n\n # initializing scheduler -> London : timezone=\"Asia/Bangkok\"\n scheduler = AsyncIOScheduler()\n\n # sends inform at 7 AM (Local Time = London)\n scheduler.add_job(inform, CronTrigger(hour=\"0\", minute=\"0\", second=\"1\"))\n scheduler.start()\n\n # react to word and command\n async def on_message(self, message):\n global interact\n msg = message.content\n\n # not reply to itself\n if message.author == client.user:\n return\n\n # responding\n if db[\"responding\"]:\n option = interact\n if \"interact\" in db.keys():\n option = option + list(db[\"interact\"])\n\n if any(word in msg for word in words):\n await message.channel.send(random.choice(option))\n\n # Detected Word\n if msg.startswith('ไป'):\n await message.reply('ไกปู', mention_author=True)\n\n if msg.startswith('สีเหลือง'):\n await message.reply('เยลโล่ว!', mention_author=True)\n\n if msg.lower().startswith('ma'):\n await message.reply('ลุยยยยยยย', mention_author=True)\n\n if msg.lower().startswith('ฝันดี'):\n await message.reply('ฝันดีคับบบ', mention_author=True)\n\n if msg.lower().startswith('เนอะ'):\n await message.reply('อื้อ', mention_author=True)\n\n if 'จิง' in msg:\n await message.channel.send('ฮ้อยย้าา')\n\n # food menu\n if msg.startswith('กินไร'):\n menu = [\"กระเพราหมูสับ\", \"โจ๊กหมูขอฮาๆ\", 'ข้าวไข่เจียว', 'ข้าวไข่ดาว', 'ข้าวไข่ข้น', 'ข้าวไข่ต้ม']\n idx_answer = random.randint(0, len(menu))\n result = menu[idx_answer]\n await message.reply(result, mention_author=True)\n\n # guessing game\n if msg.startswith('$guess'):\n await message.channel.send('ทายเลขใน 1 ถึง 10 ซิ')\n\n def is_correct(m):\n return m.author == message.author and m.content.isdigit()\n\n answer = random.randint(1, 10)\n\n try:\n guess = await self.wait_for('message', check=is_correct, timeout=5.0)\n except asyncio.TimeoutError:\n return await message.channel.send(\n 'ช้าปายย {}.'.format(answer))\n if int(guess.content) == answer:\n await message.channel.send('แม่นน!')\n else:\n await message.channel.send(\n 'ผิด! ตอบ {} โว้ยย'.format(answer))\n\n # anonymus texting command\n if msg.startswith('$send'):\n channel = msg.split(\" \", 2)[1]\n text = msg.split(\" \", 2)[2]\n if (channel == \"general\"):\n general_channel = client.get_channel(694382265081266280)\n await general_channel.send(text)\n elif (channel == \"music\"):\n music_channel = client.get_channel(791315320648368142)\n await music_channel.send(text)\n elif (channel == \"study-room\"):\n study_room_channel = client.get_channel(808174559529926666)\n await study_room_channel.send(text)\n elif (channel == \"gaming\"):\n gaming_chanel = client.get_channel(809839995287633950)\n await gaming_chanel.send(text)\n else:\n test_bot_channel = client.get_channel(928269670635671653) # test channel\n await test_bot_channel.send(text)\n\n # add new words\n if msg.startswith('$add'):\n new_word = msg.split(\"$add\", 1)[1]\n add_words(new_word)\n await message.channel.send(\"เพิ่มละจ้า\")\n\n # delete word in interact\n if msg.startswith(\"$del\"):\n if msg.split(\"$del\", 1)[1] == '':\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n return await message.channel.send(interact)\n\n interact = []\n if \"interact\" in db.keys():\n index = int(msg.split(\"$del\", 1)[1])\n delete_word(index)\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n else:\n await message.channel.send(\"ว่างแย้วครับพี่\")\n\n # list of word\n if msg.startswith('$list'):\n interact = []\n if \"interact\" in db.keys():\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n else:\n await message.channel.send(\"ว่างครับพี่\")\n\n # responding command\n if msg.startswith(\"$responding\"):\n value = msg.split(\"$responding \", 1)[1]\n\n if value.lower() == \"true\":\n db[\"responding\"] = True\n await message.channel.send(\"online now!\")\n else:\n db[\"responding\"] = False\n await message.channel.send(\"offline bye!\")\n\n # Display all commands usage\n if msg.startswith('$help'):\n embed = discord.Embed(\n title=\"How to use commands\",\n url=\"https://discordpy.readthedocs.io/en/stable/\",\n description=\"use prefix $ | Automatically send schedule at 7 am\",\n color=discord.Color.blue()\n )\n\n command_lists = [\n {'name': '$guess', 'usage': 'Guess number from 1-10\\nYou only have 1 guess!'},\n {'name': '$send', 'usage': 'Send anonymous message\\n`send [channel] [text]`'},\n {'name': '$responding', 'usage': 'Toggle interact message\\n`responding [bool]`'},\n {'name': '$list', 'usage': 'Display all interacting words in database'},\n {'name': '$add', 'usage': 'Add word \\n`add [word]`'},\n {'name': '$del', 'usage': 'Delete word\\n`del [index / word]`'},\n {'name': '$random', 'usage': 'Random from list\\n`random [list]`'},\n {'name': '$qrcode', 'usage': 'Create QR-Code\\n`qrcode [data / link]`'},\n {'name': '$poll', 'usage': 'Create poll with reaction\\n`poll [title] [list]`'},\n {'name': '$code', 'usage': 'Create codeblock\\n`code [language]`'},\n {'name': '$cal', 'usage': 'Act as a calculator\\nType `บาย` to quit'},\n {'name': '$base', 'usage': 'Convert number base\\n`base [number] [old_base] [new_base]`'},\n {'name': '$master1', 'usage': 'Master Theorem for dividing function\\n`master1 [a] [b] [d]`'},\n {'name': '$master2', 'usage': 'Master Theorem for decrease function\\n`master2 [a] [b] [k]`'},\n {'name': '$plot', 'usage': 'Plot random points with various size\\n`plot [number]`'},\n {'name': '$inform', 'usage': 'Inform current `author`\\'s schedule'},\n {'name': '$job', 'usage': 'Filter jobs from website\\n`job [keyword] [filter]`'},\n {'name': '$noti', 'usage': 'Notification from MCV\\n`noti [days] [type]`'},\n {'name': '$invite', 'usage': 'Send invitation link'},\n {'name': '$meme', 'usage': 'Random meme go brrrr'},\n {'name': '$join', 'usage': 'join voice channel'},\n ]\n\n # embed.set_thumbnail(url=\"https://i.pinimg.com/originals/13/8d/52/138d52a8f429510e2c16bd67990dae3c.jpg\")\n for command in command_lists:\n embed.add_field(\n name=command['name'],\n value=command['usage'],\n inline=True\n )\n\n # embed.set_author(name=\"Aqioz\")\n Aqioz_id = os.environ['AQIOZ_ID']\n embed.add_field(name=\"__**Author**__\", value=f\"> Bhuribhat@gmail.com\\n> {Aqioz_id}\", inline=False)\n await message.channel.send(embed=embed, view=GithubButton())\n\n # calculator command\n if msg.startswith(\"$cal\"):\n await message.channel.send('ก็มาดิ')\n try:\n equation = await self.wait_for('message', timeout=10.0)\n if equation.content == \"บาย\":\n await message.channel.send(\"บายน้า\")\n except asyncio.TimeoutError:\n return await message.channel.send('ไปละปวดหมอง')\n while equation.content != \"บาย\":\n answer = \"ตอบ \" + str(eval(equation.content))\n await message.channel.send(answer)\n try:\n equation = await self.wait_for('message', timeout=10.0)\n if equation.content == \"บาย\":\n await message.channel.send(\"บายน้า\")\n except asyncio.TimeoutError:\n return await message.channel.send('ไปละปวดหมอง')\n\n # coding template\n if msg.startswith(\"$code\"):\n language = msg.split(\"$code\", 1)[1].strip()\n study_room_channel = client.get_channel(808174559529926666)\n\n if language != '':\n embedVar = discord.Embed(\n title=\"Coding Template\",\n description=f\"` ```{language}\\n`\\t `\\n``` `\",\n color=0x00ff00\n )\n embedVar.add_field(\n name=\"Description\",\n value=f\"Template for {language} language\",\n inline=False\n )\n await study_room_channel.send(embed=embedVar)\n else:\n await study_room_channel.send(view=CodeMenu())\n\n # random list of thing split by comma (\",\")\n if msg.startswith(\"$random\"):\n list_thing = msg.split(\"$random\", 1)[1].split(\",\")\n if (list_thing == ['']):\n result = \"overflow\"\n value = \"None\"\n else:\n result = random.choice(list_thing)\n value = \", \".join(list_thing)\n embedVar = discord.Embed(title=\"Result\", description=f\"`{result.strip()}`\", color=0xe91e63)\n embedVar.add_field(name=\"Randoming from\", value=value, inline=False)\n await message.channel.send(embed=embedVar)\n\n # calculate time complex using master theorem divide function\n if msg.startswith(\"$master1\"):\n coef = msg.split(\"$master1\", 1)[1].split()\n if (len(coef) == 0):\n await message.channel.send(\"__**usage**__: `$master1 a b d`\")\n result = \"Big Thetha\"\n value = \"`T(n) = aT(n/b) + Θ(n^d)`\\n\\n__**Conditions**__:\\na >= 1, b > 1, c = log_b(a), d >= 0, T(0) = 1\"\n else:\n if (len(coef) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n a, b, d = [int(num) for num in coef]\n if a < 1 or b <= 1 or d < 0:\n await message.channel.send(\"*Invalid Input..*\")\n return\n c = math.log(a, b)\n\n # convert to integer if not decimal\n if is_integer_num(a):\n a = math.ceil(a)\n else:\n a = round(a, 2)\n if is_integer_num(b):\n b = math.ceil(b)\n else:\n b = round(b, 2)\n if is_integer_num(c):\n c = math.ceil(c)\n else:\n c = round(c, 2)\n if is_integer_num(d):\n d = math.ceil(d)\n else:\n d = round(d, 2)\n str_c = f\"log_{b}({a})\"\n\n if a == 1:\n a = ''\n if d == 0:\n value = f\" T(n) = {a}T(n/{b}) + 1\"\n elif d == 1:\n value = f\"T(n) = {a}T(n/{b}) + Θ(n)\"\n else:\n value = f\"T(n) = {a}T(n/{b}) + Θ(n^{d})\"\n if d < c:\n await message.channel.send(\"Master theorem case 1 : d < c\")\n if not is_integer_num(c):\n result = f\"Θ(n^{str_c}) = Θ(n^{c})\"\n else:\n if c == 1:\n result = \"Θ(n)\"\n else:\n result = f\"Θ(n^{c})\"\n elif d == c:\n await message.channel.send(\"Master theorem case 2 : d = c\")\n if not is_integer_num(c):\n result = f\"Θ(n^{str_c}log(n)) = Θ(n^{str_c}log(n))\"\n else:\n if c == 0:\n result = \"Θ(log(n)\"\n elif c == 1:\n result = \"Θ(nlog(n)\"\n else:\n result = f\"Θ(n^{c}log(n)\"\n elif d > c:\n await message.channel.send(\"Master theorem case 3 : d > c\")\n if d == 1:\n result = \"Θ(n)\"\n else:\n result = f\"Θ(n^{d})\"\n\n embedVar = discord.Embed(title=\"Divide Function\", description=value, color=0xd69f09)\n embedVar.add_field(name=\"Time Complexity\", value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # calculate time complex using master theorem decreasing function\n if msg.startswith(\"$master2\"):\n coef = msg.split(\"$master2\", 1)[1].split()\n if (len(coef) == 0):\n await message.channel.send(\"__**usage**__: `$master2 a b k`\")\n result = \"Big Oh Notation\"\n value = \"`T(n) = aT(n - b) + O(n^k)`\\n\\n__**Conditions**__:\\na >= 1, b > 0, k >= 0, T(0) = 1\"\n else:\n if (len(coef) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n a, b, k = [int(num) for num in coef]\n if a < 1 or b <= 0 or k < 0:\n await message.channel.send(\"*Invalid Input..*\")\n return\n\n # convert to integer if it has no decimal, 2 precision otherwise\n if is_integer_num(a):\n a = math.ceil(a)\n else:\n a = round(a, 2)\n if is_integer_num(b):\n b = math.ceil(b)\n else:\n b = round(b, 2)\n if is_integer_num(k):\n k = math.ceil(k)\n else:\n k = round(k, 2)\n\n if a == 1:\n a = ''\n if k == 0:\n value = f\" T(n) = {a}T(n - {b}) + 1\"\n elif k == 1:\n value = f\" T(n) = {a}T(n - {b}) + O(n)\"\n else:\n value = f\" T(n) = {a}T(n - {b}) + O(n^{k})\"\n a = 1 if a == '' else a\n\n if a == 1:\n await message.channel.send(\"Master theorem case 1 : a = 1\")\n if k + 1 > 1:\n result = f\"O(n^{k + 1})\"\n else:\n result = f\"O(n)\"\n\n elif a > 1:\n await message.channel.send(\"Master theorem case 2 : a > 1\")\n if k > 0:\n if k == 1:\n k = \"O(n\"\n else:\n k = f\"O(n^{k}\"\n if b > 1:\n result = f\"{k} * {a}^(n/{b}))\"\n else:\n result = f\"{k} * {a}^n)\"\n else:\n if b > 1:\n result = f\"O({a}^(n/{b}))\"\n else:\n result = f\"O({a}^n)\"\n\n embedVar = discord.Embed(title=\"Decrease Function\", description=value, color=0xd69f09)\n embedVar.add_field(name=\"Time Complexity\", value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # inform\n if msg.startswith(\"$inform\"):\n day_of_week = datetime.today().strftime('%A').lower()\n if (day_of_week != \"saturday\") and (day_of_week != \"sunday\"):\n embedVar = discord.Embed(\n title=\"Schedule\",\n url=\"https://docs.google.com/document/d/1C1sF4aS6kFjqWBtU91vSYUvTSxdh9xxXhA9LeUUTbXg/edit#heading=h.8sb6c0hcl62a\",\n description=schedule[day_of_week],\n color=discord.Color.blue())\n embedVar.add_field(\n name=\"Days of week\",\n value=\"For \" + datetime.today().strftime('%A') +\n \" | [MCV](https://www.mycourseville.com/?q=courseville)\" +\n \" [Grader](https://nattee.net/grader)\",\n inline=False)\n await message.channel.send(embed=embedVar)\n\n # plot graph within 10,000 number\n if msg.startswith(\"$plot\"):\n number = int(msg.split(\"$plot \", 1)[1])\n if (number >= 1000):\n await message.channel.send(\"too much bro\")\n else:\n data = {\n 'a': np.arange(number),\n 'c': np.random.randint(0, number, number),\n 'd': np.random.randn(number)\n }\n data['b'] = data['a'] + 10 * np.random.randn(number)\n data['d'] = np.abs(data['d']) * 100\n\n # Set background color and axis\n plt.figure(figsize=(10, 6), facecolor=\"#303340\")\n ax = plt.axes()\n ax.set_facecolor(\"#303340\")\n ax.tick_params(axis=\"x\", color=\"white\")\n ax.tick_params(axis=\"y\", color=\"white\")\n plt.xticks(color=\"white\")\n plt.yticks(color=\"white\")\n\n # plot graph\n plt.scatter('a', 'b', c='c', s='d', data=data)\n plt.xlabel('Data', color=\"cyan\")\n plt.ylabel('Value', color=\"cyan\")\n plt.title(r'$\\Sigma=$' + str(number), color=\"orange\")\n\n await message.channel.send(f\"min data = {getMinPoint(data)}\")\n await message.channel.send(f\"max data = {getMaxPoint(data)}\")\n\n # send graph to channel\n plt.savefig('.\\\\assets\\\\graph.png', bbox_inches='tight')\n await message.channel.send(file=discord.File('.\\\\assets\\\\graph.png'))\n\n # QR-Code PNG\n if msg.startswith(\"$qrcode\"):\n QR = qrcode.QRCode(version=1, box_size=10, border=2)\n data = msg.split(\"$qrcode \", 1)[1]\n QR.add_data(data)\n QR.make(fit=True)\n\n # fill_color='black', back_color='white'\n img = QR.make_image()\n img.save('.\\\\assets\\\\QRCode.png')\n\n # send picture to channel\n await message.channel.send(file=discord.File('.\\\\assets\\\\QRCode.png'))\n\n if msg.startswith(\"$base\"):\n attr = msg.split(\"$base\", 1)[1].split()\n if (len(attr) == 0):\n await message.channel.send(\"__**usage**__: `$base number base convert_base`\")\n result = \"Number in base X\"\n value = \"base >= 2\"\n base = 'n'\n convert_base = 'x'\n else:\n if (len(attr) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n number, base, convert_base = [int(num) for num in attr]\n result = numberAnyBase(number, base, convert_base)\n value = number\n\n embedVar = discord.Embed(title=\"From base \" + str(base), description=value, color=0xa84300)\n embedVar.add_field(name=\"Convert to base \" + str(convert_base), value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # jobs seeker with csv file\n if msg.startswith(\"$job\"):\n attr = msg.split(\"$job\", 1)[1].split()\n if (len(attr) == 0):\n await message.channel.send(\"__**usage**__: `$job keyword unwanted_skill`\")\n return\n else:\n if (len(attr) > 2):\n await message.channel.send(\"*Invalid Input..*\")\n return\n\n keyword, unwant_skill = attr\n df = find_jobs(keyword, unwant_skill)\n df = df.drop(['Job Description', 'More Information', 'Skills Required'], axis=1)\n \n length = df.shape[0]\n df = df.head(5).to_string()\n\n embedVar = discord.Embed(title=f\"All Jobs with {keyword} skill\", description=f\"filter out {unwant_skill}\", color=0xa84300)\n embedVar.add_field(name=f\"Found {length} jobs\", value=f\"```{df}```\", inline=False)\n await message.channel.send(embed=embedVar)\n await message.channel.send(\"for more detail please check `csv file`\")\n await message.channel.send(file=discord.File(\".\\\\assets\\\\jobs.csv\"))\n\n # poll with reactions\n if msg.startswith(\"$poll\"):\n emoji = ['1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣']\n choices = msg.split(\"$poll\")[1].split()\n if (len(choices) == 0):\n await message.channel.send(\"__**usage**__: `$poll title `\")\n return\n if (len(choices) > len(emoji) + 1):\n await message.channel.send(\"too much bro\")\n return\n\n title = choices[0]\n display_choices = ''\n for i in range(len(choices[1:])):\n display_choices += f\"{emoji[i]} {choices[1:][i]}\\n\"\n embedVar = discord.Embed(title=f\"Please vote!\", color=0x64395d)\n embedVar.add_field(name=title, value=f\"```{display_choices}```\", inline=False)\n pollmsg = await message.channel.send(embed=embedVar)\n\n for i in range(len(choices[1:])):\n await pollmsg.add_reaction(emoji[i])\n\n # get mcv notifications\n if msg.startswith(\"$noti\"):\n attr = msg.split(\"$noti\")[1].split()\n if len(attr) > 2:\n await message.channel.send(\"__**usage**__: `$noti days type`\")\n await message.channel.send(\"type must be between `Assignment, Material, Announcement`\")\n return\n\n if len(attr) == 0:\n await message.channel.send(\"ํYou can select up to 2 filters!\")\n await message.channel.send(view=NotiMenu())\n return\n elif len(attr) == 1:\n if attr[0].isnumeric():\n notifications = get_notifications(days=attr[0])\n elif attr[0].title() in ['Assignment', 'Material', 'Announcement']:\n notifications = get_notifications(select=attr[0])\n else:\n await message.channel.send(\"type must be between `Assignment, Material, Announcement`\")\n return\n else:\n notifications = get_notifications(attr[0], attr[1])\n \n embedVar = discord.Embed(title=\"MCV Notification\", color=discord.Color.blue())\n for notification in notifications:\n value = f\"```{notification[1]}```{notification[2]}\"\n embedVar.add_field(name=notification[0], value=value, inline=False)\n await message.channel.send(embed=embedVar)\n\n # send meme\n if msg.startswith(\"$meme\"):\n meme_stealer()\n await message.channel.send(file=discord.File('.\\\\assets\\\\meme.png'))\n\n # send invitation\n if msg.startswith(\"$invite\"):\n inv = await message.channel.create_invite()\n await message.channel.send(\"Click the button below to invite someone!\", view=InviteButton(str(inv)))\n\n # join voice channel\n if msg.startswith(\"$join\"):\n channel = message.author.voice.channel\n await channel.connect()\n\n\nif __name__ == '__main__':\n intents = discord.Intents.default()\n intents.message_content = True\n\n # driver code\n client = DiscordClient(intents=intents)\n keep_alive()\n client.run(TOKEN)\n","repo_name":"Bhuribhat/Discord-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"30687833282","text":"#\n# based on meshOptMWE.cpp from Ketan Mittal\n#\n# python test_tmop.py -m ../data/square01.mesh -o 2 -rs 2 -mid 80 -tid 5 -ni 50 -qo 4 -vl 2 -ae 0\nimport sys\nimport os\nfrom os.path import expanduser, join\nimport numpy as np\n\nif len(sys.argv) > 1 and sys.argv[1] == '-p':\n import mfem.par as mfem\n use_parallel = True\n from mfem.common.mpi_debug import nicePrint as print\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n sys.argv = [sys.argv[0]] + sys.argv[2:]\n\nelse:\n import mfem.ser as mfem\n use_parallel = False\n myid = 0\n\nclass discrete_size_2d(mfem.PyCoefficient):\n def EvalValue(self, x):\n opt = 2;\n small = 0.001\n big = 0.01\n val = 0.;\n \n xc = x[0] - 0.0\n yc = x[1] - 0.5\n r = np.sqrt(xc*xc + yc*yc)\n r1 = 0.45\n r2 = 0.55\n sf=30.0\n val = 0.5*(1+np.tanh(sf*(r-r1))) - 0.5*(1+np.tanh(sf*(r-r2)))\n\n val = max(0.,val)\n val = min(1.,val)\n\n return val * small + (1.0 - val) * big\n \ndef run(args):\n mesh_file = expanduser(\n join(os.path.dirname(__file__), '..', 'data', args.mesh))\n\n mesh_poly_deg = args.order\n rs_levels = args.refine_serial\n metric_id = args.metric_id\n target_id = args.target_id\n quad_type = args.quad_type\n quad_order = args.quad_order\n solver_type = args.solver_type\n lin_solver = args.lin_solver\n normalization = args.normalization\n\n verbosity_level = args.verbosity_level\n \n n_hr_iter=args.n_hr_iter\n n_h_iter=args.n_h_iter\n \n solver_iter = args.newton_iters\n solver_rtol= args.newton_rel_tolerance\n solver_art_type = args.adaptive_rel_tol\n max_lin_iter = args.lin_iter\n hradaptivity = args.hr_adaptivity\n visualization = not args.no_visualization\n adapt_eval= args.adaptivity_evaluator\n\n devopt = \"cpu\";\n\n # 2. Initialize and refine the starting mesh.\n mesh = mfem.Mesh(mesh_file, 1, 1, False)\n for i in range(rs_levels):\n mesh.UniformRefinement()\n dim = mesh.Dimension();\n\n fec = mfem.H1_FECollection(mesh_poly_deg, dim)\n fespace = mfem.FiniteElementSpace(mesh, fec, dim)\n\n mesh.SetNodalFESpace(fespace)\n\n b = mfem.Vector(0)\n\n x = mfem.GridFunction(fespace)\n mesh.SetNodalGridFunction(x)\n x.SetTrueVector()\n x.SetFromTrueVector()\n \n # 9. Save the starting (prior to the optimization) mesh to a file. This\n # output can be viewed later using GLVis: \"glvis -m perturbed.mesh\".\n mesh.Print(\"perturbed.mesh\")\n\n # 10. Store the starting (prior to the optimization) positions.\n x0 = mfem.GridFunction(fespace)\n x0.Assign(x)\n\n metric = mfem.tmop.TMOP_Metric_080(0.5)\n\n ind_fec = mfem.H1_FECollection(mesh_poly_deg, dim)\n ind_fes = mfem.FiniteElementSpace(mesh, ind_fec)\n size = mfem.GridFunction(ind_fes)\n\n \n if target_id == 5: # Discrete size 2D or 3D\n target_t = mfem.tmop.TargetConstructor.IDEAL_SHAPE_GIVEN_SIZE\n \n tc = mfem.tmop.DiscreteAdaptTC(target_t)\n \n if adapt_eval == 0:\n tc.SetAdaptivityEvaluator(mfem.tmop.AdvectorCG())\n else:\n if \"InterpolatorFP\" in dir(mfem.tmop):\n evaluator = mfem.tmop.InterpolatorFP()\n tc.SetAdaptivityEvaluator(evaluator)\n else:\n assert False, \"MFEM is not built with GSLIB.\"\n if dim == 2:\n #size_coeff = mfem.FunctionCoefficient(discrete_size_2d)\n size_coeff = discrete_size_2d()\n size.ProjectCoefficient(size_coeff)\n else:\n assert False, \"only dim == 2 supported for this MWE.\"\n \n tc.SetSerialDiscreteTargetSize(size)\n target_c = tc;\n\n else:\n print(\"Unknown target_id: \" + str(target_id))\n return\n\n if target_c is None:\n target_c = mfem.tmop.TargetConstructor(target_t);\n\n target_c.SetNodes(x0)\n \n tmop_integ = mfem.tmop.TMOP_Integrator(metric, target_c)\n \n # Setup the quadrature rules for the TMOP integrator.\n if quad_type == 1:\n irules = mfem.IntegrationRules(0, mfem.Quadrature1D.GaussLobatto)\n elif quad_type == 2: \n irules = mfem.IntRules\n elif quad_type == 3: \n irules = mfem.IntegrationRules(0, mfem.Quadrature1D.ClosedUniform)\n else:\n print( \"Unknown quad_type: \" + str(quad_type))\n return 3\n\n tmop_integ.SetIntegrationRules(irules, quad_order)\n\n if normalization:\n tmop_integ.EnableNormalization(x0)\n\n a = mfem.NonlinearForm(fespace)\n a.AddDomainIntegrator(tmop_integ)\n\n # For HR tests, the energy is normalized by the number of elements.\n init_energy = a.GetGridFunctionEnergy(x);\n \n # Visualize the starting mesh and metric values.\n # Note that for combinations of metrics, this only shows the first metric.\n if visualization:\n mfem.tmop.vis_tmop_metric_s(mesh_poly_deg, metric, target_c, mesh, \"Initial metric values\", 0);\n\n # 13. Fix all boundary nodes, or fix only a given component depending on the\n # boundary attributes of the given mesh. Attributes 1/2/3 correspond to\n # fixed x/y/z components of the node. Attribute 4 corresponds to an\n # entirely fixed node. Other boundary attributes do not affect the node\n # movement boundary conditions.\n ess_bdr = mfem.intArray([1]*mesh.bdr_attributes.Max())\n a.SetEssentialBC(ess_bdr);\n\n\n # 14. As we use the Newton method to solve the resulting nonlinear system,\n # here we setup the linear solver for the system's Jacobian.\n linsol_rtol = 1e-12;\n if lin_solver == 0:\n S = mfem.DSmoother(1, 1.0, max_lin_iter)\n elif lin_solver == 1:\n cg = mfem.CGSolver()\n cg.SetMaxIter(max_lin_iter)\n cg.SetRelTol(linsol_rtol)\n cg.SetAbsTol(0.0)\n cg.SetPrintLevel(3 if verbosity_level >= 2 else -1)\n S = cg\n else:\n minres = mfem.MINRESSolver()\n minres.SetMaxIter(max_lin_iter)\n minres.SetRelTol(linsol_rtol)\n minres.SetAbsTol(0.0)\n if verbosity_level > 2:\n minres.SetPrintLevel(1)\n minres.SetPrintLevel(3 if verbosity_level == 2 else -1)\n if lin_solver == 3 or lin_solver == 4:\n ds = mfem.DSmoother((0 if lin_solver == 3 else 1), 1.0, 1)\n ds.SetPositiveDiagonal(True)\n minres.SetPreconditioner(ds)\n S = minres;\n \n #/ Perform the nonlinear optimization.\n ir = irules.Get(fespace.GetFE(0).GetGeomType(), quad_order)\n solver = mfem.tmop.TMOPNewtonSolver(ir, solver_type)\n solver.SetIntegrationRules(irules, quad_order)\n if solver_type == 0:\n # Specify linear solver when we use a Newton-based solver.\n solver.SetPreconditioner(S)\n\n print(dir(solver))\n solver.SetMaxIter(solver_iter)\n solver.SetRelTol(solver_rtol)\n solver.SetAbsTol(0.0)\n if solver_art_type > 0:\n solver.SetAdaptiveLinRtol(solver_art_type, 0.5, 0.9)\n\n solver.SetPrintLevel(1 if verbosity_level >= 1 else -1)\n\n hr_solver = mfem.tmop.TMOPHRSolver(mesh, a, solver, x, False, hradaptivity,\n mesh_poly_deg, metric_id, n_hr_iter, n_h_iter)\n hr_solver.AddGridFunctionForUpdate(x0)\n hr_solver.Mult()\n\n # 15. Save the optimized mesh to a file. This output can be viewed later\n # using GLVis: \"glvis -m optimized.mesh\".\n mesh.Print(\"optimized.mesh\", 14)\n\n fin_energy = a.GetGridFunctionEnergy(x)\n print(\"Initial strain energy: \" + \"{:g}\".format(init_energy))\n print(\" Final strain energy: \" + \"{:g}\".format(fin_energy))\n print(\"The strain energy decreased by: \" + \n \"{:g}\".format((init_energy - fin_energy) * 100.0 / init_energy))\n\n # 16. Visualize the final mesh and metric values.\n if visualization:\n mfem.tmop.vis_tmop_metric_s(mesh_poly_deg, metric, target_c, mesh, \"Final metric values\", 600);\n\n # 17. Visualize the mesh displacement.\n if visualization:\n x0 -= x\n sock = mfem.socketstream(\"localhost\", 19916)\n sock << \"solution\\n\" << mesh << x0\n sock.flush()\n sock << \"window_title 'Displacements'\\n\" << \"window_geometry \"\n sock << 1200 << \" \" << 0 << \" \" << 600 << \" \" << 600 << \"\\n\"\n sock << \"keys jRmclA\"\n sock.flush() \n\n\nif __name__ == \"__main__\":\n from mfem.common.arg_parser import ArgParser\n\n parser = ArgParser(description='meshOptMWE')\n parser.add_argument('-m', '--mesh',\n default='square01.mesh', # icf.mesh\n action='store', type=str,\n help='Mesh file to use.')\n parser.add_argument('-o', '--order',\n action='store', default=1, type=int,\n help=\"Finite element order (polynomial degree) or -1 for isoparametric space.\")\n parser.add_argument('-rs', '--refine-serial',\n action='store', default=2, type=int,\n help=\"Number of times to refine the mesh uniformly in serial\")\n parser.add_argument(\"-mid\", \"--metric-id\",\n action=\"store\", default=80, type=int,\n help=\"\\n\".join([\"Mesh optimization metric:\",\n \"\\tT-metrics\",\n \"2 : 0.5|T|^2/tau-1 -- 2D shape (condition number)\\n\\t\"]))\n parser.add_argument(\"-tid\", \"--target-id\",\n action=\"store\", default=5, type=int, \n help=\"\\n\".join([\"Target (ideal element) type:\",\n \"\\t5: Ideal shape, given size (in physical space)\"]))\n parser.add_argument(\"-qt\", \"--quad-type\",\n action=\"store\", default=1, type=int, \n help=\"\\n\".join([\"Quadrature rule type:\",\n \"\\t1: Gauss-Lobatto\",\n \"\\t2: Gauss-Legendre\"\n \"\\t3: Closed uniform points\"]))\n parser.add_argument(\"-qo\", \"--quad_order\",\n action=\"store\", default=4, type=int, \n help=\"Order of the quadrature rule.\")\n parser.add_argument(\"-st\", \"--solver-type\",\n action=\"store\", default=0, type=int,\n help = \" Type of solver: (default) 0: Newton, 1: LBFGS\")\n parser.add_argument(\"-ni\", \"--newton-iters\",\n action=\"store\", default=80, type=int, \n help=\"Maximum number of Newton iterations.\")\n parser.add_argument(\"-rtol\", \"--newton-rel-tolerance\",\n action=\"store\", default=1e-10, type=float,\n help=\"Relative tolerance for the Newton solver.\")\n parser.add_argument(\"-art\", \"--adaptive-rel-tol\",\n action=\"store\", default=0, type=int, \n help=\"\\n\".join([\"Type of adaptive relative linear solver tolerance:\",\n \"\\t0: None (default)\",\n \"\\t1: Eisenstat-Walker type 1\",\n \"\\t2: Eisenstat-Walker type 2\"]))\n parser.add_argument(\"-ls\", \"--lin-solver\",\n action=\"store\", default=2, type=int, \n help=\"\\n\".join([\"Linear solver:\",\n \"\\t0: l1-Jacobi\",\n \"\\t1: CG\",\n \"\\t2: MINRES\",\n \"\\t3: MINRES + Jacobi preconditioner\",\n \"\\t4: MINRES + l1-Jacobi preconditioner\"]))\n parser.add_argument(\"-li\", \"--lin-iter\",\n action=\"store\", default=100, type=int, \n help=\"Maximum number of iterations in the linear solve.\")\n parser.add_argument(\"-hr\", \"--hr-adaptivity\", \n action='store_true',\n help=\"Enable hr-adaptivity.\")\n parser.add_argument(\"-nor\", \"--normalization\", \n action='store_true',\n help=\"Make all terms in the optimization functional unitless.\")\n parser.add_argument('-no-vis', '--no-visualization',\n action='store_true',\n help='Enable GLVis visualization')\n parser.add_argument(\"-vl\", \"--verbosity-level\",\n action=\"store\", default=2, type=int, \n help=\"Set the verbosity level - 0, 1, or 2.\")\n\n\n parser.add_argument(\"-ae\", \"--adaptivity-evaluator\",\n action=\"store\", default=1, type=int, \n help=\"0 - Advection based (DEFAULT), 1 - GSLIB.\");\n\n parser.add_argument(\"-nhr\", \"--n_hr_iter\",\n action=\"store\", default=5, type=int,\n help=\"Number of hr-adaptivity iterations.\")\n\n parser.add_argument(\"-nh\", \"--n_h_iter\",\n action=\"store\", default=1, type=int,\n help=\"Number of h-adaptivity iterations per r-adaptivity\")\n \n args = parser.parse_args()\n parser.print_options(args)\n \n run(args)\n","repo_name":"mfem/PyMFEM","sub_path":"test/test_tmop.py","file_name":"test_tmop.py","file_ext":"py","file_size_in_byte":13613,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"48"}
+{"seq_id":"39648823076","text":"\"\"\"\nugvkp Dataloader\njieli_cn@163.com\n2019/1/11\n\"\"\"\nimport os\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nfrom torchvision.transforms import ToTensor\nfrom training.datasets.ugv_data.UGV_data_pipeline import UGVKeyPoints\nfrom training.datasets.dataloader import sDataLoader\n\n\ndef get_loader(json_path, data_dir, mask_dir, inp_size, feat_stride, preprocess,\n batch_size, params_transform, training=True, shuffle=True, num_workers=1, aug=False, classification = False):\n \"\"\" Build a COCO dataloader\n :param json_path: string, path to jso file\n :param datadir: string, path to coco data\n :returns : the data_loader\n \"\"\"\n\n json_data = list()\n if isinstance(json_path, list): # 将多个root\n\n root = os.path.dirname(json_path[0])\n for json_i in json_path:\n print(json_i)\n with open(json_i) as data_file:\n json_data_i = json.load(data_file)\n json_data.extend(json_data_i)\n # print(len(json_data_i), type(json_data_i))\n # print(len(json_data))\n\n elif isinstance(json_path, str):\n root = os.path.dirname(json_path)\n with open(json_path) as data_file:\n json_data = json.load(data_file)\n # data_this = json.load(data_file)\n # data = data_this['root']\n\n num_samples = len(json_data)\n train_indexes = []\n val_indexes = []\n for count in range(num_samples):\n if json_data[count]['isValidation'] != 0.:\n val_indexes.append(count)\n else:\n train_indexes.append(count)\n\n # print('train dataset len:', len(train_indexes), ' val dataset len:', len(val_indexes))\n\n # root = data_dir\n # root = os.path.dirname(json_path)\n\n kik_data = UGVKeyPoints(root=root,\n index_list=train_indexes if training else val_indexes,\n data=json_data, feat_stride=feat_stride,\n preprocess=preprocess, transform=ToTensor(), params_transform=params_transform,\n numkeypoints=4, numlims=4, aug=aug, classification=classification) # Mod by Jie.\n\n data_loader = sDataLoader(kik_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n\n return data_loader\n\n","repo_name":"waterljwant/UGV-KPNet","sub_path":"training/datasets/ugvkp.py","file_name":"ugvkp.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"}
+{"seq_id":"33406185734","text":"from ulakbus import settings\nfrom ulakbus.models import BAPFirma, User\nfrom zengine.lib.test_utils import BaseTestCase\n\n\nclass TestCase(BaseTestCase):\n \"\"\"\n Firmaların, teklife açık bütçe kalemlerine \n teklif vermesini sağlayan iş akışı testi.\n\n \"\"\"\n\n def test_bap_firma_basvuru_degerlendirme(self):\n firma = BAPFirma.objects.get(\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.prepare_client('/bap_firma_basvuru_degerlendirme',\n username='bap_koordinasyon_birimi_1')\n resp = self.client.post()\n\n # listeleme ekranı\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n assert \"Firma Adı\" in resp.json['objects'][0]\n assert \"Vergi Kimlik Numarası\" in resp.json['objects'][0]\n action_names = [\"Karar Ver\", \"İncele\"]\n for action in resp.json['objects'][1]['actions']:\n assert action['name'] in action_names\n\n # incele\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"incele\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n assert \"Firması Kayıt Başvurusu Değerlendirme\" in resp.json['forms']['schema']['title']\n assert resp.json['object']['Firma Adı'] == 'Veli Usta Dondurma'\n assert resp.json['object']['Vergi No'] == '8402384024802'\n\n # geri don\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\")\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n\n # karar, geri don\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n assert \"Firması Başvuru Değerlendirme Kararı\" in resp.json['forms']['schema']['title']\n assert \"değerlendirme kararınızı veriniz\" in resp.json['forms']['form'][0]['helpvalue']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"red\", form={'red': 1})\n assert \"Firması Başvuru Reddi Gerekçesi\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"gonder\",\n form={'gerekce': \"Belgeler eksik\"})\n assert \"Firması Başvuru Reddi\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\",\n form={'geri': 1})\n assert \"Firması Başvuru Değerlendirme Kararı\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\",\n form={'geri': 1})\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n\n # karar, red\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"red\", form={'red': 1})\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"gonder\",\n form={'gerekce': \"Belgeler eksik\"})\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n assert resp.json['msgbox']['title'] == \"Firma Başvuru Kaydı Değerlendirme\"\n assert 'firma yetkilisine başarıyla iletilmiştir' in resp.json['msgbox']['msg']\n\n del resp.json['objects'][0]\n firma_adlari_list = [obj['fields'][0] for obj in resp.json['objects']]\n assert \"Veli Usta Dondurma\" not in firma_adlari_list\n kullanici = firma.Yetkililer[0].yetkili\n assert User.objects.filter(key=kullanici.key).count() == 0\n assert BAPFirma.objects.filter(key=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\").count() == 0\n firma = BAPFirma.objects.filter(key=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\", deleted=True)[0]\n kullanici = User.objects.filter(key=kullanici.key, deleted=True)[0]\n firma.deleted = False\n firma.save()\n kullanici.deleted = False\n kullanici.save()\n\n # karar, onayla\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"OzRUS2vPOp12ju4Oj47CwaeRvV6\")\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n assert \"Firması Başvuru Kabulü\" in resp.json['forms']['schema']['title']\n assert \"onaylıyor musunuz\" in resp.json['forms']['form'][0]['helpvalue']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n\n firma = BAPFirma.objects.get(\"OzRUS2vPOp12ju4Oj47CwaeRvV6\")\n assert firma.durum == 2\n\n assert resp.json['msgbox']['title'] == \"Firma Başvuru Kaydı Değerlendirme\"\n assert firma.ad in resp.json['msgbox']['msg']\n\n del resp.json['objects'][0]\n firma_adlari_list = [obj['fields'][0] for obj in resp.json['objects']]\n assert firma.ad not in firma_adlari_list\n\n kullanici = firma.Yetkililer[0].yetkili\n assert kullanici.is_active == True\n role = kullanici.role_set[0].role\n assert 'bap_firma_teklif' in role.get_permissions()\n\n firma.durum = 1\n firma.blocking_save()\n kullanici.is_active = False\n kullanici.blocking_save()\n\n # belge indir\n firma = BAPFirma.objects.get(\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.prepare_client('/bap_firma_basvuru_degerlendirme',\n username='bap_koordinasyon_birimi_1')\n self.client.post()\n\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"incele\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"indir\",\n form={\"belge_indir\": 1})\n assert resp.json['download_url'] == \"%s%s\" % (\n settings.S3_PUBLIC_URL, firma.faaliyet_belgesi)\n","repo_name":"zetaops/ulakbus","sub_path":"tests/test_bap_firma_basvuru_degerlendirme.py","file_name":"test_bap_firma_basvuru_degerlendirme.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"tr","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"}
+{"seq_id":"18402958281","text":"import threading\nimport time\n\nclass Queue:\n def __init__(self):\n self.sth=-1\n self.cond=threading.Condition()\n\n def put(self,sth):\n with self.cond:\n while self.sth != -1:\n self.cond.wait()\n self.sth=sth\n self.cond.notify()\n\n def take(self):\n with self.cond:\n while self.sth == -1:\n self.cond.wait()\n res=self.sth\n self.sth=-1\n self.cond.notify()\n return res\n\ndef producer(queue):\n for sth in range(5):\n queue.put(sth)\n print(\"队列中放入{}\".format(sth))\n time.sleep(1)\n\ndef consumer(queue):\n for i in range(5):\n res=queue.take()\n print(\"队列中取出{}\".format(res))\n time.sleep(1)\n\nqueue=Queue()\np = threading.Thread(target=producer,args=(queue,))\nc = threading.Thread(target=consumer,args=(queue,))\n\np.start()\nc.start()\n\n\n\n\n","repo_name":"hellozepp/gitbyhellozepp","sub_path":"python/iotest-pyy/iotestpy/parallel/thread/conditiontest1.py","file_name":"conditiontest1.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"}
+{"seq_id":"73823467985","text":"from flask import redirect, render_template, request, session, abort\nfrom functools import wraps\nimport ast\n\nimport datetime\n\nAPP_DATE_FORMAT = '%d/%m/%Y'\n\ndef loginRequired(route):\n \"\"\" Verify if user is logged-in for pages where it is required. \"\"\"\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/login')\n return route(*args, **kwargs)\n return decorated_route\n\ndef loggedInNotAllowed(route):\n \"\"\" Verify if a logged user is trying to access a page for not logged in users. \"\"\"\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if not(session.get('user_id') is None):\n return redirect('/')\n return route(*args, **kwargs)\n return decorated_route\n\ndef checkAllowance(allowance):\n \"\"\"Verify if user has the allowance level for the selected page\"\"\"\n def innerDecor(route):\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if session.get('user_info'):\n if session['user_info']['allowance'] >= allowance:\n return route(*args, **kwargs)\n else:\n return abort(403)\n else:\n return abort(403)\n return decorated_route\n return innerDecor\n\ndef renderEditorData(editor_data):\n WARNING_SYMBOL = ''\n editor_data = ast.literal_eval(editor_data)\n rendered_html = ''\n for item in editor_data:\n new_html = ''\n if not rendered_html == '':\n rendered_html = rendered_html + '\\n'\n if item['type'] == 'paragraph':\n new_html = '