diff --git "a/3493.jsonl" "b/3493.jsonl" new file mode 100644--- /dev/null +++ "b/3493.jsonl" @@ -0,0 +1,2105 @@ +{"seq_id":"37290288020","text":"import sys\nimport zeep.exceptions\n\nfrom vat_rpc import VatRpcClient\n\nSUCCESS = 0\nEXCEPTION = 1\nINCORRECT_ARGUMENTS = 2\n\n\ndef main(client=VatRpcClient()):\n if len(sys.argv) != 2:\n return 'You must inform exactly one argument', INCORRECT_ARGUMENTS\n\n vat_number = sys.argv[1]\n try:\n valid = client.is_valid(vat_number)\n if valid:\n return 'Valid', SUCCESS\n else:\n return 'Invalid', SUCCESS\n except zeep.exceptions.Error:\n return 'Exception', EXCEPTION\n\nif __name__ == '__main__':\n message, status = main()\n print(message)\n sys.exit(status)\n","repo_name":"diegocedrim/vat-validator","sub_path":"vat_validator.py","file_name":"vat_validator.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25830414575","text":"\n\nt = open(\"notes.txt\",\"r\")\nr = open(\"note_edit.txt\",\"w\")\n\nj = []\n\nfor line in t:\n j.append(line)\n if len(j) == 3:\n note, freq, wave = j\n note = note.split(\"/\")[0].strip()\n freq = freq.split(\".\")[0].strip()\n j = []\n print(note, freq)\n r.write(note+\" \"+freq+\"\\n\")\n","repo_name":"TheRealCubeAD/BANDO","sub_path":"Bennos Projekte/GetYourMedicine/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29577958489","text":"import csv \nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# first page (TASK: need to iterate)\nurl = \"https://www.sephora.com/search?keyword=skincare&pageSize=5¤tPage=709\"\n\n# opens up url in Google Chrome\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--no-sandbox\")\n# --incognito\ndriver = webdriver.Chrome(chrome_options=chrome_options)\ndriver.get(url)\n\n# closes Sephora sign-in pop up window\n\npopUp_close = driver.find_element_by_class_name(\"css-ll28en\").click()\ntime.sleep(3)\n\n\"\"\"\ndef next_button_exist():\n WebDriverWait(driver, 5)\n \n next_button_xpath = \"//*[name()='path' and @d='M57 142.5L9.5 95 0 104.5l38 38-38 38 9.5 9.5L57 142.5z']//../..\"\n if driver.find_element_by_xpath(next_button_xpath).get_attribute(\"disabled\"):\n print(\"Button disabled\")\n #print(\"This is the last page!\")\n #driver.quit()\n \n else: \n print(\"Button enabled\")\n #print(\"Next button found\")\n #driver.driver.find_elements_by_xpath(\"//*[name()='path' and @d='M57 142.5L9.5 95 0 104.5l38 38-38 38 9.5 9.5L57 142.5z']//../..\").click()\n ## driver.find_element_by_class_name(\"css-xswd36\").click()\n #get_info()\n\"\"\"\n\ndef next_button_exist():\n WebDriverWait(driver, 5)\n \n next_button_xpath = \"//*[name()='path' and @d='M57 142.5L9.5 95 0 104.5l38 38-38 38 9.5 9.5L57 142.5z']//..//../following-sibling::button[last()]\"\n if driver.find_element_by_xpath(next_button_xpath).get_attribute(\"disabled\"):\n print(\"Button disabled\")\n #print(\"This is the last page!\")\n time.sleep(3)\n driver.quit()\n \n else: \n print(\"Button enabled\")\n print(\"Next button found\")\n driver.find_element_by_xpath(next_button_xpath).click()\n ## driver.find_element_by_class_name(\"css-xswd36\").click()\n #get_info()\n time.sleep(3)\n get_info()\n \n\ndef get_info():\n\n soup = BeautifulSoup(driver.page_source, 'lxml') \n soup.prettify()\n\n # scroll\n body_elem = driver.find_element_by_tag_name(\"body\")\n\n no_of_pagedowns = 7\n\n while no_of_pagedowns:\n body_elem.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.2)\n no_of_pagedowns-=1\n\n list_links = [link.get_attribute('href') for link in driver.find_elements_by_xpath(\"//a[contains(@href,'/product/')]\")]\n\n checked_links = []\n for link in list_links:\n print(link)\n \n driver.get(link)\n driver.back()\n \"\"\"\n if link != set(checked_links):\n driver.get(link)\n driver.back()\n else: \n break\n \"\"\"\n\n # driver.find_element_by_class_name(\"css-1be47h1\").click()\n time.sleep(1)\n next_button_exist()\n \nget_info()\n\n\n","repo_name":"timmy224/Webscrapers","sub_path":"WebScraper_Sephora/Sephora_2.py","file_name":"Sephora_2.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"13117819043","text":"'''\n El director de una escuela está organizando un viaje de estudios, y requiere determinar cuánto debe cobrar a cada alumno y cuánto debe pagar\n a la compañía de viajes por el servicio. La forma de cobrar es la siguiente: si son 100 alumnos o más, el costo por cada alumno es de 65 euros;\n de 50 a 99 alumnos, el costo es de 70 euros, de 30 a 49, de 95 euros, y si son menos de 30, el costo de la renta del autobús es de 4000 euros,\n sin importar el número de alumnos. Realiza un programa que permita determinar el pago a la compañía de autobuses y lo que debe pagar cada alumno\n por el viaje.\n\n Autor: Andrés Castillero Moriana\n\n Fecha: 19/10/2021\n\n Algoritmo:\n Pedimos el número de alumnos.\n Si hay 100 o más alumnos, el precio será el número de estos por 65.\n Si no, se comprueba si es mayor o igual a 50 para multiplicar por 70 el número de alumnos.\n Si no, se comprueba si es mayor o igual a 30 para multiplicar por 95 el número de alumnos.\n Si no, el precio será de 4000€.\n\n Variables:\n prize: (float)\n studnt: (int)\n'''\n\nprint(\"Programa que calcula el precio del viaje y el precio individual de cada alumno.\")\n\nstudnt = int(input(\"Introduzca el número de alumnos que harán el viaje: \"))\n\nif studnt >= 100:\n prize = studnt*65\nelif studnt >= 50:\n prize = studnt*70\nelif studnt >= 30:\n prize = studnt*95\nelse:\n prize = 4000\n\nprint(\"El precio total es de\", prize, \"€ y el precio por alumno\", prize/studnt, \"€.\")","repo_name":"a19camoan/Ejercicios_Programacion_Python","sub_path":"EjerciciosAlternativasPython/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35630173491","text":"#!/bin/python3\n\nimport os\n\n# Complete the repeatedString function below.\ndef repeatedString(s, n):\n if \"a\" not in s:\n return 0\n str_length = len(s)\n if str_length == 1:\n return n\n return s.count(\"a\") * (n // str_length) + s[:(n % str_length)].count(\"a\")\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n n = int(input())\n\n result = repeatedString(s, n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"ayrusme/HackerRank","sub_path":"Interview Preparation Kit/warm_up/repeated_string.py","file_name":"repeated_string.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74480182294","text":"from Constants.Cards import cartas_aventura\n\nimport logging as log\n\nimport copy\nimport random\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, Update, ForceReply, Update\n\nfrom Wavelength.Boardgamebox.State import State\nfrom Boardgamebox.Board import Board as BaseBoard\n\nfrom Wavelength.Constants.Cards import WAVECARDS\n\nlog.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=log.INFO)\n\n\nlogger = log.getLogger(__name__)\n\nclass Board(BaseBoard):\n\tdef __init__(self, playercount, game):\n\t\tBaseBoard.__init__(self, playercount, game)\n\t\tself.wave_cards = random.sample(copy.deepcopy(WAVECARDS), len(WAVECARDS))\t\n\t\tself.discarded_wave_cards = []\n\t\t# Se seteara en difficultad el doom inicial\n\t\tself.state = State()\n\t\n\tdef print_board(self, bot, game):\n\t\t#import Arcana.Controller as ArcanaController\n\t\tboard = \"\"\n\t\tboard += \"--- *Estado de Partida Turno {}* ---\\n\".format(game.turncount)\t\t\n\t\tboard += \"--- *Orden de jugadores* ---\\n\"\n\t\tfor team in game.board.state.teams:\n\t\t\tif game.board.state.active_team == team:\n\t\t\t\tboard += \"*{} ({})*: \".format(team.name, team.score)\n\t\t\telse:\n\t\t\t\tboard += \"{} ({}): \".format(team.name, team.score)\n\t\t\tfor player in team.player_sequence:\n\t\t\t\tnombre = player.name.replace(\"_\", \" \")\n\t\t\t\tif team.active_player == player:\n\t\t\t\t\tboard += \"*\" + nombre + \"*\" + \" \" + u\"\\u27A1\\uFE0F\" + \" \"\n\t\t\t\telse:\n\t\t\t\t\tboard += nombre + \" \" + u\"\\u27A1\\uFE0F\" + \" \"\n\t\t\tboard = board[:-3]\n\t\t\tboard += u\"\\U0001F501\"\n\t\t\tboard += \"\\n\\n\"\n\t\tbot.send_message(game.cid, board, parse_mode=ParseMode.MARKDOWN)\n\t\n\tdef new_wave_card(self):\n\t\t# I discard the previous wave_card\n\t\tif self.state.active_wave_card != None:\n\t\t\tself.discard_wave_card()\n\t\t# Set next wave_card, add discard and shuffle the deck\n\t\tif len(self.wave_cards) == 0:\n\t\t\tself.wave_cards.extend(self.discarded_wave_cards)\n\t\t\tself.discarded_wave_cards = []\n\t\t\trandom.shuffle(self.wave_cards)\n\t\n\t\tself.state.active_wave_card = self.wave_cards.pop()\n\t\t# Set current point in wavelength Random (0 - 180)\n\t\tself.state.wavelength = random.randint(0, 180)\n\t\tself.state.reference = None\n\t\tself.state.team_choosen_grade = -1\n\t\tself.state.opponent_team_choosen_left_right = -1\n\t\t\n\tdef discard_wave_card(self):\n\t\tself.discarded_wave_cards.append(self.state.active_wave_card)\n\t\tself.state.active_wave_card = None\n","repo_name":"leviatas/MultigamesV2","sub_path":"Wavelength/Boardgamebox/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"26307623343","text":"from __future__ import division, print_function, unicode_literals\n\n# This code is so you can run the samples without installing the package\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n#\n\ntestinfo = \"t 0.1, s, t 2, s, t 4, s, q\"\ntags = \"schedule, position\"\n\nimport cocos\nfrom cocos.director import director\nfrom cocos.sprite import Sprite\nimport pyglet\nimport random\nfrom math import sin, cos\n\nclass TestLayer(cocos.layer.Layer):\n def __init__(self):\n super( TestLayer, self ).__init__()\n\n self.sprite = Sprite('grossini.png')\n self.add( self.sprite )\n\n w,h = director.get_window_size()\n self.radius = h/3.0\n self._elapsed = 0.0\n self.schedule( self.change_sprite_pos )\n self.change_sprite_pos(0.0)\n\n def change_sprite_pos(self, dt):\n self._elapsed += dt\n w,h = director.get_window_size()\n self.sprite.position = ( w//2 + self.radius * cos(self._elapsed * 1.5),\n h//2 + self.radius * sin(self._elapsed * 1.5))\n\ndescription = \"\"\"\nGrossini sprite will circle around the center of the screen\n\"\"\"\n\ndef main():\n print(description)\n director.init()\n test_layer = TestLayer ()\n main_scene = cocos.scene.Scene (test_layer)\n director.run (main_scene)\n\nif __name__ == '__main__':\n main()\n","repo_name":"los-cocos/cocos","sub_path":"test/test_schedule.py","file_name":"test_schedule.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":631,"dataset":"github-code","pt":"67"} +{"seq_id":"37239425570","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^upload_img/', views.upload_img, name='upload_img'),\r\n url(r'^upload_check/', views.upload_check, name='upload_check'),\r\n url(r'^get_imgs/?', views.get_imgs, name='get_imgs'),\r\n url(r'^get_img_stylized/?', views.get_img_stylized, name='get_img_stylized'),\r\n url(r'^get_tags/?', views.get_tags, name='get_tags'),\r\n url(r'^get_types/?', views.get_types, name='get_types'),\r\n]\r\n\r\n\r\n","repo_name":"tjsoul32/sample_gallery","sub_path":"gallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18620106302","text":"#########\n#IMPORTS#\n#########\n\nimport torch\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\n#Dataset (with Transforms)\nfrom utils.Create_Datasets import create_datasets\n#Model\nfrom utils.Baseline_Model import Baseline_Model\nimport pandas as pd\nfrom tqdm import tqdm\nfrom utils.Gaussian_Map2 import gaussian_map2\n\n\n#####################\n#MODEL AND TEST DATA#\n#####################\n\ngpu = True\n#module for center bias\nclass Center_Bias(nn.Module):\n def __init__(self, gpu=False):\n super(Center_Bias, self).__init__()\n self.sigma = nn.Parameter(torch.Tensor([100]), requires_grad=True)\n self.w = nn.Parameter(torch.Tensor([1]), requires_grad=True)\n self.gpu = gpu\n\n def forward(self, x):\n x = gaussian_map(x, self.sigma, self.w, gpu) * x\n return x\n\nclass Center_Bias2(nn.Module):\n def __init__(self, gpu=False):\n super(Center_Bias2, self).__init__()\n self.a = nn.Parameter(torch.Tensor([1]), requires_grad=True)\n self.gpu = gpu\n\n def forward(self, x):\n #concatenate along feature channel dimension\n x = torch.cat((x,gaussian_map2(x, self.a, self.gpu)),1)\n return x\n\n\nclass TestNet(nn.Module):\n\n def __init__(self, gpu=False):\n super(TestNet, self).__init__()\n #3 input image channels (color-images), 64 output channels, 3x3 square convolution kernel\n #padding to keep dimensions of output at 100x100\n self.conv1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(64, 128, 3, stride=1, padding=1)\n self.conv2_bn = nn.BatchNorm2d(128)\n self.conv3 = nn.Conv2d(128, 1, 3, stride=1, padding=1)\n self.conv3_bn = nn.BatchNorm2d(1)\n self.conv4 = nn.Conv2d(1, 1, 1, stride=1, padding=0)\n self.center_bias2 = Center_Bias2(gpu)\n self.conv5 = nn.Conv2d(2, 1, 1)\n self.gpu = gpu\n if gpu:\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n self.cuda()\n \n def forward(self, x):\n #x = torch.rand(128,3,100,100)\n #x = torch.ones(128,3,100,100)\n #x = x.to('cuda')\n #print(\"input sum at beginning of forward pass: {}\".format(torch.sum(x)))\n x = functional.relu(self.conv1(x))\n #print(\"input sum after first conv and relu: {}\".format(torch.sum(x)))\n x = self.conv1_bn(x)\n #print(\"input sum after first batch normalization: {}\".format(torch.sum(x)))\n x = functional.relu(self.conv2(x))\n #print(\"input sum after second conv and relu: {}\".format(torch.sum(x)))\n x = self.conv2_bn(x)\n #print(\"output shape: {}\".format(x.size()))\n #print(\"input sum after second batch normalization: {}\".format(torch.sum(x)))\n x = functional.relu(self.conv3(x))\n #print(\"input sum after third conv and relu: {}\".format(torch.sum(x)))\n x = self.conv3_bn(x)\n #print(\"input sum after third batch normalization: {}\".format(torch.sum(x)))\n x = self.conv4(x)\n #print(\"input sum after fourth conv: {}\".format(torch.sum(x)))\n x = self.center_bias2(x)\n #print(\"input sum after center bias: {}\".format(torch.sum(x)))\n x = self.conv5(x)\n #print(\"input sum after last conv: {}\".format(torch.sum(x)))\n return x\n\n#initilaize the NN\nmodel = TestNet(gpu=True)\n\n\n#load the pretrained parameters\nname = \"checkpoint_batch_size_128_lr_0.1.pt\"\nmodel.load_state_dict(torch.load(name))\n\n#create test data\nbatch_size = 1\n_, _, test_loader = create_datasets(batch_size)\n\n#create loss-functions for likelihood-computation\nnll_model = nn.PoissonNLLLoss(log_input=True, full=True, reduction='none')\nnll_null = nn.PoissonNLLLoss(log_input=False, full=True, reduction='none')\nnll_saturated = nn.PoissonNLLLoss(log_input=False, full=True, reduction='none')\n\n#lists to store results\nlhs_model = []\nlhs_null = []\nlhs_saturated = []\n\n#go through test data: calculate likelihood for each of the three models per image and append to respective lists\nt = tqdm(iter(test_loader), desc=\"[Computing Likelihoods\")\nfor i, example in enumerate(t): #start at index 0\n data = example[\"image\"]\n target = example[\"fixations\"]\n data = data.to('cuda')\n target = target.to('cuda')\n \n output = model(data)\n\n #############\n #LIKELIHOODS#\n #############\n\n #calculate likelihood for each model per image\n\n\n ###########\n #Our Model#\n ###########\n\n likelihood_model = 0\n target_fl = target.view(-1)\n output_fl = output.view(-1)\n for i in range(output_fl.size()[0]):\n likelihood_model += torch.exp(-1 * nll_model(output_fl[i], target_fl[i])).item()\n lhs_model.append(likelihood_model)\n\n\n ############\n #Null Model#\n ############\n\n likelihood_null = 0\n target_fl = target.view(-1)\n null_prediction = torch.sum(target) / 10000\n for elem in target_fl:\n likelihood_null += torch.exp(-1 * nll_null(null_prediction, elem)).item()\n lhs_null.append(likelihood_null)\n\n #################\n #Saturated Model#\n #################\n\n likelihood_saturated = 0\n target_fl = target.view(-1)\n #per item, as we calcualte nll with the loss-function and need to go to positive likelihood before summing\n for elem in target_fl:\n likelihood_saturated += torch.exp(-1 * nll_saturated(elem, elem)).item()\n lhs_saturated.append(likelihood_saturated)\n \nresults = pd.DataFrame({\"lhs_null\": lhs_null, \"lhs_model\": lhs_model, \"lhs_saturated\": lhs_saturated})\nresults.to_csv(\"likelihoods_center_bias.csv\", header=True, index=False)","repo_name":"Mathis1993/FixationSequences","sub_path":"baseline_model/b_pnll_loss/Evaluation_Metric_Likelihood.py","file_name":"Evaluation_Metric_Likelihood.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16560623471","text":"import numpy as np\nimport sys\nfrom sklearn.decomposition import PCA\n\nimport pandas as pd\nimport torch\nimport sys\n\nimport torch.nn as nn\n\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\n\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nimport shutil\nimport os\n\ndef extract_covariates(df,static_only=False):\n df_clean=df.sort_values(by=\"UNIQUE_ID\")\n X=df_clean.drop_duplicates(subset=[\"UNIQUE_ID\"],keep=\"first\")[[\"UNIQUE_ID\",\"BIRTH_DATE\",\"first_edss\",\"first_visit_date\",\"Time_first2last\",\"gender_class\",\"max_previous_edss\",\"previous_edss\",\"CIS\",\"PP\",\"PR\",\"RR\",\"SP\",\"onset_date\"]]\n X[\"duration\"]=X[\"Time_first2last\"].dt.days/365\n X[\"Age_at_onset\"]=(X[\"onset_date\"]-X[\"BIRTH_DATE\"]).dt.days/365\n X[\"duration_at_T0\"]=(X[\"first_visit_date\"]-X[\"onset_date\"]).dt.days/365\n X[\"edss_diff\"]=X[\"previous_edss\"]-X[\"first_edss\"]\n #X=X[[\"edss_diff\",\"duration\",\"gender_class\",'max_previous_edss',\"Age_at_onset\",\"duration_at_T0\",\"CIS\",\"PP\",\"PR\",\"RR\",\"SP\"]].values\n if static_only:\n X=X[[\"previous_edss\",\"gender_class\",\"CIS\",\"PP\",\"PR\",\"RR\",\"SP\"]].values\n else:\n X=X[[\"edss_diff\",\"gender_class\",'max_previous_edss',\"CIS\",\"PP\",\"PR\",\"RR\",\"SP\"]].values\n return(X)\n\ndef train_val_test_split(X,sizes=(0.2,0.1)):\n train_val_idx, test_idx=train_test_split(X,test_size=sizes[1])\n t_idx, v_idx = train_test_split(np.arange(train_val_idx.shape[0]),test_size=sizes[0])\n train_idx=train_val_idx[t_idx]\n val_idx=train_val_idx[v_idx]\n return(train_idx, val_idx, test_idx)\n\ndef train_test_model(L2_param,latent_path,tags,train_idx,test_idx):\n device=torch.device(\"cuda:0\")\n latents_train,latents_test=PCA_macau_samples(dir_path=latent_path,idx_train=train_idx,idx_val=test_idx)\n data_test=latent_dataset(latents_test,tags[test_idx])\n data_train=latent_dataset(latents_train,tags[train_idx])\n mod=MLP_class_mod(data_train.get_dim())\n dataloader=DataLoader(data_train,batch_size=5000,shuffle=True,num_workers=2)\n\n criterion=nn.BCEWithLogitsLoss()\n for epoch in range(100):\n if epoch<40:\n l_r=0.01\n elif epoch<60:\n l_r=0.005\n else:\n l_r=0.0005\n optimizer=torch.optim.Adam(mod.parameters(),lr=l_r,weight_decay=L2_param)\n loss=0\n for idx,sampled_batch in enumerate(dataloader):\n optimizer.zero_grad()\n target=sampled_batch[1]\n preds=mod.fwd(sampled_batch[0])\n loss=criterion(preds,target)\n loss.backward()\n optimizer.step()\n\n with torch.no_grad():\n target=data_test.tags\n preds=F.sigmoid(mod.fwd(data_test.latents))\n loss_test=roc_auc_score(target,preds)\n\n print(f\"Loss on test is : {loss_test}\")\n\n torch.save(mod.state_dict(),\"complete_model.pt\")\n\ndef get_best_params(path):\n dir_path=\"/home/edward/ray_results/\"+path\n dirs= os.listdir(dir_path)\n for i,dir in enumerate(dirs):\n file_name=os.path.join(os.path.join(dir_path,dir),\"progress.csv\")\n df=pd.read_csv(file_name)\n if i==0:\n results=df.tail(1)\n else:\n results=results.append(df.tail(1))\n\n best_run=results.loc[results[\"mean_accuracy\"]==results[\"mean_accuracy\"].max()]\n\n best_config=eval(best_run[\"config\"].iloc[0])\n\n best_L2=best_config[\"L2\"]\n return(best_L2)\n\n\ndef PCA_macau_samples(dir_path,idx_train=None,idx_val=None,num_samples=50,n_dim=None,fold=None):\n sum_sim=np.load(dir_path+\"sum_sim.npy\").item()\n N_latents=sum_sim[\"N_latents\"]\n N_samples=sum_sim[\"N_samples\"]\n\n if fold is not None:\n prefix=f\"_macau_fold_{fold}\"\n else:\n prefix=\"_macau\"\n\n concat_lat=np.loadtxt(dir_path+str(N_latents)+prefix+\"-sample1-U1-latents.csv\",delimiter=\",\")\n\n print(\"Concat sample\")\n for n in np.linspace(10,N_samples,num_samples,dtype='int'):\n concat_lat=np.concatenate((concat_lat,np.loadtxt(dir_path+str(N_latents)+prefix+\"-sample%d-U1-latents.csv\"%n,delimiter=\",\")))\n print(\"Done\")\n if idx_train is not None:\n concat_subset=concat_lat[:,idx_train]\n else:\n concat_subset=concat_lat\n\n pca=PCA()\n pca.fit(concat_subset.T)\n #print(np.cumsum(pca.explained_variance_ratio_))\n\n if n_dim is None:\n n_kept=np.min(np.where(np.cumsum(pca.explained_variance_ratio_)>0.9))\n else:\n n_kept=n_dim\n pca=PCA(n_components=n_kept)\n pca.fit(concat_subset.T)\n\n pca_latents=pca.transform(concat_lat.T)\n\n np.save(dir_path+\"pca_latents\",pca_latents)\n print(n_kept)\n\n return(pca_latents[idx_train,:],pca_latents[idx_val,:])\n\ndef box_plots_comparisons(AUC_vecs,names):\n data=np.vstack(AUC_vecs)\n fig, ax = plt.subplots()\n ax.boxplot(data.T)\n plt.title(\"AUC Performance Comparison for EDSS worsening\")\n ax.set_xticklabels(names)\n plt.savefig(\"./comparisons_results/box_plot.pdf\")\n\nclass MLP_class_mod(nn.Module):\n def __init__(self,input_dim):\n super(MLP_class_mod,self).__init__()\n self.layer_1=nn.Linear(input_dim,100)\n #self.layer_1bis=nn.Linear(200,70)\n self.layer_2=nn.Linear(100,20)\n self.layer_3=nn.Linear(20,1)\n def fwd(self,x):\n out=F.relu(self.layer_1(x))\n #out=F.relu(self.layer_1bis(out))\n out=F.relu(self.layer_2(out))\n out=self.layer_3(out).squeeze(1)\n return(out)\n\nclass latent_and_cov_dataset(Dataset):\n def __init__(self,latents,tags,covs):\n self.lats=torch.Tensor(latents)\n self.covs=torch.Tensor(covs)\n print(self.covs[:2,:])\n self.latents=torch.cat((self.lats,self.covs),1)\n self.tags=torch.from_numpy(tags).float()\n def __len__(self):\n return(self.latents.size(0))\n def __getitem__(self,idx):\n return([self.latents[idx,:],self.tags[idx]])\n def get_dim(self):\n return self.latents.size(1)\n\nclass latent_dataset(Dataset):\n def __init__(self,latents,tags):\n self.latents=torch.Tensor(latents)\n self.tags=torch.from_numpy(tags).float()\n def __len__(self):\n return(self.latents.size(0))\n def __getitem__(self,idx):\n return([self.latents[idx,:],self.tags[idx]])\n def get_dim(self):\n return self.latents.size(1)\n","repo_name":"edebrouwer/msbase2020","sub_path":"ms/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11719433237","text":"n = int(input().strip())\n\nitems = set()\nocc = {}\nfor i in range(n):\n number = input().strip()\n items.add(number)\n \n occ[number] = [0 for x in range(10)] \n for x in range(10):\n occ[number][x] = number.count(str(x))\n\nn = len(items)\nnumbers = list(items)\n\ng = [] \nk = 0\nmaxs = 0 \nfor i in range(n):\n check = True\n for j in range(len(g)):\n if i in g[j]:\n check = False\n \n if check:\n g.append([])\n k = len(g) - 1\n g[k].append(i)\n for j in range(i+1, n):\n if occ[numbers[i]] == occ[numbers[j]]:\n g[k].append(j) \n if len(g[k]) > maxs:\n maxs = len(g[k])\n\nprint(len(g))\n\nmaxg = [] \nfor i in range(len(g)):\n if len(g[i]) == maxs:\n order = []\n for x in g[i]:\n order.append(numbers[x])\n maxg.append(sorted(order, reverse=True))\n \nmaxg.sort()\nfor i in range(len(maxg)):\n print(\" \".join(maxg[i]))\n","repo_name":"lorosanu/online-coding","sub_path":"Hackerrank/contests/womens_code_sprint/secret_message_group.py","file_name":"secret_message_group.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31658547200","text":"import json\n\nfrom pymongo import MongoClient\n\nclient = MongoClient(\"localhost\",27017)\ndb = client.hire_scikey\n\nall_data = []\nfile = open(\"new.json\",\"w+\")\nfor category_detail in db.category_details.find():\n all_data.append(category_detail)\n\nfile.write(json.dumps(all_data, default=str))\n\nfile.close()","repo_name":"Kavyeshs41/pythonic_ways","sub_path":"JsonExporter/mongoDataDumpIntoFile.py","file_name":"mongoDataDumpIntoFile.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27880899498","text":"'''\nWe're going to output this data as a csv with id, value so we can import it into Unreal as a float curve\n'''\nimport os\n\ninputFile = 'D:\\Advent_of_Code_2020\\ozzmeister00\\PyAdventOfCode2020\\inputs\\day01.txt'\noutputFile = 'D:\\Advent_of_Code_2020\\ozzmeister00\\PyAdventOfCode2020\\inputs\\day01.csv'\n\nwith open(inputFile, 'r') as fh:\n data = fh.read()\n lines = data.split('\\n')\n\n outData = ''\n\n for i, line in enumerate(lines):\n outData += str(i) + ',' + line + '\\n'\n\n with open(outputFile, 'w') as fh:\n fh.write(outData)\n","repo_name":"techartorg/Advent_of_Code_2020","sub_path":"ozzmeister00/PyAdventOfCode2020/inputProcessors/processDay01Input.py","file_name":"processDay01Input.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"24806027604","text":"#!/usr/bin/python3\n\"\"\"0-prime_game module\n\"\"\"\n\n\ndef is_prime(n):\n \"\"\"is_prime function verifies if n is a prime number\n \"\"\"\n if n < 2:\n return False\n i = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += 1\n return True\n\n\ndef get_index_of_multiples(n, nums):\n \"\"\"get_multiples of n in the nums list\n \"\"\"\n return [i for i, x in enumerate(nums) if x is not None and x % n == 0]\n\n\ndef find_prime(list_nums):\n \"\"\"find_prime is a function to find a prime number in a list\n Returns:\n - index of the number or None\n \"\"\"\n for i, num in enumerate(list_nums):\n if is_prime(num):\n return i\n\n\ndef isWinner(x, nums):\n \"\"\"isWinner function\n Args:\n - x: number of rounds\n - nums: is an array of n elements\n Return:\n - the name of the player who won or None if it's even\n \"\"\"\n if x < 1:\n return None\n maria_score = 0\n ben_score = 0\n who_plays = 1 # maria is odd numbers, ben is even numbers\n # go through number of rounds\n for i in range(x):\n round_numbers = list(range(1, nums[i] + 1))\n while len(round_numbers) != 1:\n index = find_prime(round_numbers)\n if index is not None:\n # remove prime and it's multiples\n prime = round_numbers.pop(index)\n multiple_indexes = get_index_of_multiples(prime, round_numbers)\n # transform multiples of the prime number into None\n for i in multiple_indexes:\n round_numbers[i] = None\n # remove None from the list\n res = list(filter(\n lambda item: item is not None, round_numbers))\n round_numbers = res\n else:\n break\n who_plays += 1\n # it means that Maria was the last one who could play\n if who_plays % 2 == 0:\n maria_score += 1\n # it means that Ben was the last one who could play\n elif who_plays % 2 == 1:\n ben_score += 1\n # restart for test purposes\n who_plays = 1\n if maria_score == ben_score:\n return None\n if maria_score > ben_score:\n return 'Maria'\n return 'Ben'\n","repo_name":"Uss-Momas/alx-interview","sub_path":"0x0A-primegame/0-prime_game.py","file_name":"0-prime_game.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37162701387","text":"from tkinter import *\nfrom PIL import ImageTk\nfrom tkinter import ttk, messagebox\nimport mysql.connector\n\nclass UpdateEmp:\n def __init__(self, win, entry_values):\n self.win = win\n self.win.geometry(\"540x500\")\n self.win.title(\"Update Employee Details\")\n self.win.configure(bg=\"#ffffff\")\n self.win.focus_force()\n\n self.empid = IntVar()\n self.name = StringVar()\n self.gender = StringVar()\n self.mail = StringVar()\n self.dob = StringVar()\n self.phone = StringVar()\n \n self.doj = StringVar()\n self.profile = StringVar()\n self.dept = StringVar()\n self.salary = DoubleVar()\n self.conveyance = DoubleVar()\n\n # Set Entry Variables Before Updation\n self.empid.set(entry_values[0])\n self.name.set(entry_values[1])\n self.gender.set(entry_values[3])\n self.mail.set(entry_values[5])\n self.dob.set(entry_values[2])\n self.phone.set(entry_values[4])\n \n self.doj.set(entry_values[6])\n self.profile.set(entry_values[7])\n self.dept.set(entry_values[8])\n self.salary.set(entry_values[9])\n self.conveyance.set(entry_values[10])\n\n title = Label(self.win,text=\"Update Employee Details\", font=('Calibari',11),fg=\"#ffffff\",bg='#0d6efd').place(x=0,y=0,height=30,width=540)\n\n #Personal Details Form\n prsnl_detail_frame = LabelFrame(self.win, text=\"Personal Details\",font=('Calibari',10),bg=\"#ffffff\")\n prsnl_detail_frame.place(x=20,y=50,width=500,height=150)\n\n emp_id_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Employee ID\",bg=\"#ffffff\").place(x=10,y=20,height=20)\n emp_id_entry = Entry(prsnl_detail_frame,textvariable=self.empid,font=('Calibari',10),bg=\"#fffff0\").place(x=95,y=20,width=100,height=20)\n\n name_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Name\",bg=\"#ffffff\").place(x=210,y=20,height=20)\n name_entry = Entry(prsnl_detail_frame,textvariable=self.name,font=('Calibari',10),bg=\"#fffff0\").place(x=255,y=20,width=225,height=20)\n\n gender_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Gender\",bg=\"#ffffff\").place(x=10,y=55,height=20)\n gender_entry = Entry(prsnl_detail_frame,textvariable=self.gender,font=('Calibari',10),bg=\"#fffff0\").place(x=95,y=55,width=100,height=20)\n\n email_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Email\",bg=\"#ffffff\").place(x=210,y=55,height=20)\n email_entry = Entry(prsnl_detail_frame,textvariable=self.mail,font=('Calibari',10),bg=\"#fffff0\").place(x=255,y=55,width=225,height=20)\n\n dob_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Date Of Birth\",bg=\"#ffffff\").place(x=10,y=90,height=20)\n dob_entry = Entry(prsnl_detail_frame,textvariable=self.dob,font=('Calibari',10),bg=\"#fffff0\").place(x=95,y=90,width=100,height=20)\n\n phone_label = Label(prsnl_detail_frame,font=('Calibari',10), text=\"Mobile No.(add +91)\",bg=\"#ffffff\").place(x=210,y=90,height=20)\n pnone_entry = Entry(prsnl_detail_frame,textvariable=self.phone,font=('Calibari',10),bg=\"#fffff0\").place(x=340,y=90,width=140,height=20)\n\n\n #Comapany Details Form\n comp_detail_frame = LabelFrame(self.win, text=\"Company Details\",font=('Calibari',10),bg=\"#ffffff\")\n comp_detail_frame.place(x=20,y=220,width=500,height=150)\n print(type(comp_detail_frame))\n\n doj_label = Label(comp_detail_frame,font=('Calibari',10), text=\"Date Of Joining\",bg=\"#ffffff\").place(x=10,y=20,height=20)\n doj_entry = Entry(comp_detail_frame,textvariable=self.doj,font=('Calibari',10),bg=\"#fffff0\").place(x=110,y=20,width=100,height=20)\n\n profile_label = Label(comp_detail_frame,font=('Calibari',10), text=\"Work Profile\",bg=\"#ffffff\").place(x=220,y=20,height=20)\n profile_entry = Entry(comp_detail_frame,textvariable=self.profile,font=('Calibari',10),bg=\"#fffff0\").place(x=305,y=20,width=175,height=20)\n\n salary_label = Label(comp_detail_frame,font=('Calibari',10), text=\"Salary(in Rs)\",bg=\"#ffffff\").place(x=10,y=56,height=20)\n salary_entry = Entry(comp_detail_frame,textvariable=self.salary,font=('Calibari',10),bg=\"#fffff0\").place(x=110,y=56,width=100,height=20)\n\n dept_label = Label(comp_detail_frame,font=('Calibari',10), text=\"Department\",bg=\"#ffffff\").place(x=220,y=56,height=20)\n dept_entry = Entry(comp_detail_frame,textvariable=self.dept,font=('Calibari',10),bg=\"#fffff0\").place(x=305,y=56,width=175,height=20)\n\n conveyance_label = Label(comp_detail_frame,font=('Calibari',10), text=\"Conveyance(in Rs)\",bg=\"#ffffff\").place(x=10,y=92,height=20)\n conveyance_entry = Entry(comp_detail_frame,textvariable=self.conveyance,font=('Calibari',10),bg=\"#fffff0\").place(x=130,y=92,width=100,height=20)\n\n submit_btn = Button(self.win, text=\"Update\", font=('Cilibari', 10),bg=\"#0d6efd\",command=self.UpdateEmpData).place(x=210,y=390,height=30,width=120)\n\n date_label = Label(self.win,font=('Calibari',10), text=\"Date Format :- YYYY-MM-DD\",bg=\"#ffffff\").place(x=20,y=450)\n \n self.win.mainloop()\n\n #==========================================================================================================#\n # functions\n\n def UpdateEmpData(self):\n try:\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"root\",\n database=\"empmangsys\"\n )\n mycursor = mydb.cursor()\n\n if self.empid.get() == \"\" or self.name.get() == \"\" or self.gender.get() == \"\" or self.mail.get() == \"\" or self.dob.get() == \"\" or self.phone.get() == \"\" or self.doj.get() == \"\" or self.profile.get() == \"\" or self.dept.get() == \"\" or self.salary.get() == \"\" or self.conveyance.get() == \"\":\n messagebox.showerror(\"Error\", \"Fill All Required Details\", parent=self.win)\n else:\n query = \"SELECT * FROM employee WHERE EMPID = %s;\"\n value = (self.empid.get(),)\n\n mycursor.execute(query, value)\n row = mycursor.fetchone()\n print(row)\n \n if row == None:\n messagebox.showerror(\"Error\", \"Invalid Employee ID! Please Check and Try Again.\")\n return\n\n '''query = \"UPDATE employee SET Salary=35000.00 WHERE EMPID=1;\"'''\n\n '''\n Date Format : YYYY-MM-DD\n '''\n\n query = \"UPDATE employee SET Name = %s, DOB = %s, Gender = %s, Mobile = %s, Email = %s, DOJ = %s, Profile = %s, Department = %s, Salary = %s, Conveyance = %s WHERE EMPID = %s;\"\n vals = (self.name.get(), self.dob.get(), self.gender.get(), self.phone.get(), self.mail.get(), self.doj.get(), self.profile.get(), self.dept.get(),self.salary.get(),self.conveyance.get(),self.empid.get(),)\n\n mycursor.execute(query, vals)\n\n query1 = \"UPDATE salary SET Basic_Salary = %s, Conveyance_Rs = %s WHERE EMPID = %s;\"\n vals1 = (self.salary.get(),self.conveyance.get(),self.empid.get(),)\n\n mycursor.execute(query1, vals1)\n\n mydb.commit()\n\n messagebox.showinfo(\"Done\", \"Employee Data Updated Successfully\", parent=self.win)\n\n except Exception as Ex:\n messagebox.showerror(\"Error\", str(Ex))\n\nif __name__ == '__main__':\n root = Tk()\n new_win = UpdateEmp(root)","repo_name":"ashutosh288/EmpMonitor","sub_path":"update_emp_details.py","file_name":"update_emp_details.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16020495675","text":"\"\"\"Wrapped RandomForest for tabular datasets.\"\"\"\n\nimport logging\n\nfrom copy import copy\nfrom typing import Dict\nfrom typing import Tuple\nfrom typing import Union\n\nimport numpy as np\n\nfrom pandas import Series\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom ..pipelines.selection.base import ImportanceEstimator\nfrom ..utils.logging import get_stdout_level\nfrom ..validation.base import TrainValidIterator\nfrom .base import TabularDataset\nfrom .base import TabularMLAlgo\nfrom .tuning.base import Uniform\n\n\nlogger = logging.getLogger(__name__)\n\nRFModel = Union[RandomForestClassifier, RandomForestRegressor]\n\n\nclass RandomForestSklearn(TabularMLAlgo, ImportanceEstimator):\n \"\"\"Random forest algorigthm from Sklearn.\n\n default_params: All available parameters listed in lightgbm documentation:\n - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html\n - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html\n\n freeze_defaults:\n - ``True`` : params may be rewritten depending on dataset.\n - ``False``: params may be changed only manually or with tuning.\n timer: :class:`~lightautoml.utils.timer.Timer` instance or ``None``.\n \"\"\"\n\n _name: str = \"RFSklearn\"\n\n _default_params = {\n \"bootstrap\": True,\n \"ccp_alpha\": 0.0,\n \"max_depth\": None,\n \"max_features\": \"auto\",\n \"max_leaf_nodes\": None,\n \"max_samples\": None,\n \"min_samples_leaf\": 1,\n \"min_samples_split\": 2,\n \"min_weight_fraction_leaf\": 0.0,\n \"n_estimators\": 250,\n \"n_jobs\": 4,\n \"oob_score\": False,\n \"random_state\": 42,\n \"warm_start\": False,\n }\n\n def _infer_params(self) -> dict:\n \"\"\"Infer parameters for RF.\n\n Returns:\n Tuple (params, verbose).\n \"\"\"\n params = copy(self.params)\n\n # Logging\n if \"verbose\" not in params:\n level = get_stdout_level()\n if level <= logging.INFO:\n params[\"verbose\"] = 0\n elif level == logging.DEBUG:\n params[\"verbose\"] = 2\n else:\n params[\"verbose\"] = 1\n\n return params\n\n def init_params_on_input(self, train_valid_iterator: TrainValidIterator) -> dict:\n \"\"\"Get model parameters depending on dataset parameters.\n\n Args:\n train_valid_iterator: Classic cv-iterator.\n\n Returns:\n Parameters of model.\n \"\"\"\n rows_num = len(train_valid_iterator.train)\n features_num = len(train_valid_iterator.features)\n task = train_valid_iterator.train.task.name\n suggested_params = copy(self.default_params)\n\n if \"criterion\" not in suggested_params:\n suggested_params[\"criterion\"] = \"mse\" if ((task == \"reg\") or (task == \"multi:reg\")) else \"gini\"\n\n if self.freeze_defaults:\n # if user change defaults manually - keep it\n return suggested_params\n\n # just for speed training\n if rows_num <= 10000:\n suggested_params[\"n_estimators\"] = 500\n else:\n suggested_params[\"n_estimators\"] = 250\n\n # say no to overfitting\n if rows_num > 10000:\n suggested_params[\"min_samples_leaf\"] = 8 if ((task == \"reg\") or (task == \"multi:reg\")) else 16\n else:\n suggested_params[\"min_samples_leaf\"] = 32 if ((task == \"reg\") or (task == \"multi:reg\")) else 64\n\n # how many features to check\n if features_num > 50:\n suggested_params[\"max_features\"] = \"sqrt\"\n elif features_num > 10:\n suggested_params[\"max_features\"] = 0.75\n else:\n suggested_params[\"max_features\"] = 1.0\n\n return suggested_params\n\n def _get_default_search_spaces(self, suggested_params: Dict, estimated_n_trials: int) -> Dict:\n \"\"\"Sample hyperparameters from suggested.\n\n Args:\n suggested_params: Dict with parameters.\n estimated_n_trials: Maximum number of hyperparameter estimations.\n\n Returns:\n dict with sampled hyperparameters.\n\n \"\"\"\n optimization_search_space = {}\n\n optimization_search_space[\"min_samples_leaf\"] = Uniform(\n low=1,\n high=256,\n q=1,\n )\n\n optimization_search_space[\"max_depth\"] = Uniform(\n low=1,\n high=10,\n q=1,\n )\n\n return optimization_search_space\n\n def fit_predict_single_fold(self, train: TabularDataset, valid: TabularDataset) -> Tuple[RFModel, np.ndarray]:\n \"\"\"Implements training and prediction on single fold.\n\n Args:\n train: Train Dataset.\n valid: Validation Dataset.\n\n Returns:\n Tuple (model, predicted_values)\n \"\"\"\n params = self._infer_params()\n\n task = self.task.name\n\n if (task == \"reg\") or (task == \"multi:reg\"):\n model = RandomForestRegressor(**params)\n model.fit(train.data, train.target, train.weights)\n val_pred = model.predict(valid.data)\n else:\n model = RandomForestClassifier(**params)\n model.fit(train.data, train.target, train.weights)\n val_pred = model.predict_proba(valid.data)\n if task == \"binary\":\n val_pred = val_pred[:, 1]\n elif task == \"multilabel\":\n val_pred = np.moveaxis(np.array(val_pred)[:, :, 1], 1, 0)\n\n metric = self.task.losses[\"sklearn\"].metric_func\n score = metric(valid.target, val_pred, valid.weights)\n logger.info2(\"Score for RF model: {:5f}\".format(score))\n\n return model, val_pred\n\n def predict_single_fold(self, model: RFModel, dataset: TabularDataset) -> np.ndarray:\n \"\"\"Predict target values for dataset.\n\n Args:\n model: Lightgbm object.\n dataset: Test Dataset.\n\n Returns:\n Predicted target values.\n\n \"\"\"\n task = self.task.name\n if (task == \"reg\") or (task == \"multi:reg\"):\n pred = model.predict(dataset.data)\n else:\n pred = model.predict_proba(dataset.data)\n if task == \"binary\":\n pred = pred[:, 1]\n elif task == \"multilabel\":\n pred = np.moveaxis(np.array(pred)[:, :, 1], 1, 0)\n\n return pred\n\n def get_features_score(self) -> Series:\n \"\"\"Computes feature importance as mean values of feature importance provided by RandomForest per all models.\n\n Returns:\n Series with feature importances.\n \"\"\"\n imp = 0\n for model in self.models:\n imp = imp + model.feature_importances_\n\n imp = imp / len(self.models)\n\n return Series(imp, index=self.features).sort_values(ascending=False)\n\n def fit(self, train_valid: TrainValidIterator):\n \"\"\"Just to be compatible with :class:`~lightautoml.pipelines.selection.base.ImportanceEstimator`.\n\n Args:\n train_valid: Classic cv-iterator.\n \"\"\"\n self.fit_predict(train_valid)\n","repo_name":"sb-ai-lab/LightAutoML","sub_path":"lightautoml/ml_algo/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"67"} +{"seq_id":"26769891700","text":"import random\nimport torch\n\nclass TicTacGame():\n def __init__(self):\n self.winstates = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6],\n ]\n self.gameState = [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"]\n self.XTurnToPlay = True\n self.winner = \"TicTacToe Demo\"\n self.windex = -1\n self.gameHistory = []\n self.qTrain = []\n self.qVal = []\n self.qTest = []\n self.lTrain = []\n self.lVal = []\n self.lTest = []\n self.gameCount = 0\n self.pindex = -1\n\n def gameLoop(self):\n self.reset()\n self.getNextState()\n while not self.isBoardFilled():\n self.getNextState()\n if self.isWinState():\n break\n # if self.isWinState():\n # print((\"O\" if self.XTurnToPlay else \"X\"), \"wins\")\n # else:\n # print(\"game was a draw\")\n #self.printState()\n self.qUpdate()\n #print(\"game count =\", self.gameCount)\n\n def qUpdate(self):\n qHist = []\n if not self.isBoardFilled() and self.XTurnToPlay:\n return;\n for i in range(len(self.gameHistory)):\n qState = []\n gState = self.gameHistory.pop()\n for gp in gState:\n if gp == 'X':\n qState.append((1, 0))\n elif gp == 'O':\n qState.append((-1, 0))\n elif gp == '-':\n qState.append((0, 0))\n else:\n qState.append((1, 1))\n qHist = [[f for f, c in qState]\n , [c for f, c in qState].index(1)]\n # print(qHist[0])\n if self.gameCount % 5 == 0 and self.gameCount % 45 != 0:\n self.qTest.append(qHist[0])\n self.lTest.append(qHist[1])\n elif self.gameCount % 9 == 0:\n self.qVal.append(qHist[0])\n self.lVal.append(qHist[1])\n elif self.gameCount % 5 != 0 or self.gameCount % 9 != 0:\n self.qTrain.append(qHist[0])\n self.lTrain.append(qHist[1])\n self.gameCount += 1\n\n def reset(self):\n self.gameState = [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"]\n self.XTurnToPlay = True\n self.winner = \"TicTacToe Demo\"\n self.windex = -1\n self.gameHistory = []\n self.pindex = -1\n\n def gamePlay(self):\n if self.isWinState() or self.isBoardFilled():\n self.reset()\n self.getNextState()\n\n def getNextState(self):\n v = random.randint(0, 8)\n while self.gameState[v] != \"-\":\n v = random.randint(0, 8)\n if self.XTurnToPlay:\n self.gameState[v] = '*'\n # print(self.gameState)\n self.gameHistory.append([x for x in self.gameState])\n self.gameState[v] = 'X'\n else:\n self.gameState[v] = \"O\"\n self.XTurnToPlay = not self.XTurnToPlay\n self.winner = ((\"O\" if self.XTurnToPlay else \"X\") + \" wins\") if self.isWinState() else (\n \"game was a draw\" if self.isBoardFilled() else self.winner)\n # this.testWinState();\n\n def isWinState(self):\n winstate = False\n for i in range(len(self.winstates)):\n if (self.gameState[self.winstates[i][0]] != \"-\" and self.gameState[self.winstates[i][0]]\n == self.gameState[self.winstates[i][1]] and\n self.gameState[self.winstates[i][1]] == self.gameState[self.winstates[i][2]]):\n self.windex = i\n winstate = True\n break\n return winstate\n\n def isBoardFilled(self):\n return \"-\" not in self.gameState\n\n def printState(self):\n sb = \"\"\n for i in range(3):\n for j in range(3):\n sb += self.gameState[i * 3 + j]\n print(sb)\n sb = \"\"\n\n @staticmethod\n def minibatch():\n ttt=TicTacGame()\n for i in range(50):\n ttt.gameLoop()\n return torch.tensor(ttt.qTrain, dtype=torch.float), torch.tensor(ttt.lTrain, dtype=torch.long), \\\n torch.tensor(ttt.qVal, dtype=torch.float), torch.tensor(ttt.lVal, dtype=torch.long), \\\n torch.tensor(ttt.qTest, dtype=torch.float), torch.tensor(ttt.lTest, dtype=torch.long)\n\ndef main():\n ttt = TicTacGame()\n ttt.gameLoop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"u1273400/rpynotes","sub_path":"pytorch/trl/qGame.py","file_name":"qGame.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24248603260","text":"# Tornado libraries\nfrom tornado.ioloop import IOLoop\nfrom tornado.web import RequestHandler, Application, removeslash\nfrom tornado.httpserver import HTTPServer\nfrom tornado.gen import coroutine\n\n# Other libraries\nimport json\nimport os\nimport sys\nimport redis\nfrom utility import utility, cron\nfrom dotenv import load_dotenv\n# from urllib.parse import urlparse, parse_qs\n# from selenium import webdriver\n# driver = webdriver.Firefox()\n\n\n\n\n\"\"\"\n@api {get} /leaderboard org leaderboard \n@apiName org leaderboard\n@apiGroup all\n@apiParamExample {json} response-example\n{\n status: 200,\n message: \"OK\",\n payload: {\n L04DB4L4NC3R: 82,\n Angad Sharma: 16816,\n bhaveshgoyal27: 19,\n dependabot-preview[bot]: 3743,\n shashu421: 2150,\n HRITISHA: 1105,\n alan478: 8805,\n Krishn157: 930\n }\n}\n\"\"\"\nclass LeaderBoard(RequestHandler):\n def initialize(self, redis, token, org):\n self.token = token\n self.org = org\n self.redis = redis\n\n def set_default_headers(self):\n print(\"setting headers!!!\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')\n\n @coroutine\n def get(self):\n res = utility.leaderboard(self.token, self.org, self.redis)\n\n jsonData = {\n 'status': 200,\n 'message': 'OK',\n 'payload': res\n }\n self.write(json.dumps(jsonData))\n \n def write_error(self, status_code, **kwargs):\n jsonData = {\n 'status': int(status_code),\n 'message': \"Internal server error\",\n 'answer': 'NULL'\n }\n self.write(json.dumps(jsonData))\n\n def options(self):\n self.set_status(204)\n self.finish()\n \n\"\"\"\n@api {get} /topcontributors top contributors of the org\n@apiName top contributors of the org\n@apiGroup all\n@apiParamExample {json} response-example\n{\n status: 200,\n message: \"OK\",\n payload: {\n CodeCombat: \"Angad Sharma\",\n skin-cancer-detection: \"shashu421\",\n cc-website-prototype-19: \"HRITISHA\",\n github-orgs-api: \"Angad Sharma\",\n digital-beacon: \"Angad Sharma\",\n vit-tourist-guide: \"alan478\",\n DevSoc2K19-Website: \"Angad Sharma\",\n love-open-source: \"Angad Sharma\",\n notes-map-analytics: \"Angad Sharma\",\n smart-park: \"Angad Sharma\",\n webinars: \"L04DB4L4NC3R\"\n }\n}\n\"\"\"\nclass TopContributors(RequestHandler):\n def initialize(self, redis, token, org):\n self.token = token\n self.org = org\n self.redis = redis\n\n def set_default_headers(self):\n print(\"setting headers!!!\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')\n \n @coroutine\n def get(self):\n response = utility.topcontributor(self.token, self.org, self.redis)\n jsonData = {\n 'status' : 200,\n 'message' : 'OK',\n 'payload' : response\n \n }\n self.write(json.dumps(jsonData))\n \n def write_error(self, status_code, **kwargs):\n jsonData = {\n 'status': int(status_code),\n 'message': \"Internal server error\",\n 'answer': 'NULL'\n }\n self.write(json.dumps(jsonData))\n def options(self):\n self.set_status(204)\n self.finish()\n\n\"\"\"\n@api {get} /repos data related to repos\n@apiName data related to repos\n@apiGroup all\n@apiParamExample {json} response-example\n{\n status: 200,\n message: \"OK\",\n payload: [\n {\n ref: {\n target: {\n history: {\n edges: [\n {\n node: {\n deletions: 1,\n additions: 1,\n author: {\n date: \"2019-06-04T20:37:49+05:30\",\n name: \"Angad Sharma\"\n }\n }\n }\n ]\n }\n }\n },\n name: \"github-orgs-api\"\n }]\n\"\"\"\nclass Repos(RequestHandler):\n def initialize(self, redis, token, org):\n self.token = token\n self.org = org\n self.redis = redis\n def set_default_headers(self):\n print(\"setting headers!!!\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')\n @coroutine\n def get(self):\n response = utility.repos(self.token, self.org, self.redis)\n jsonData = {\n 'status' : 200,\n 'message' : 'OK',\n 'payload' : response\n \n }\n self.write(json.dumps(jsonData))\n def write_error(self, status_code, **kwargs):\n jsonData = {\n 'status': int(status_code),\n 'message': \"Internal server error\",\n 'answer': 'NULL'\n }\n self.write(json.dumps(jsonData))\n def options(self):\n self.set_status(204)\n self.finish()\n\n\"\"\"\n@api {get} /seed manually seed cache \n@apiName manually seed cache\n@apiGroup all\n\"\"\"\nclass ManualSeed(RequestHandler):\n def initialize(self, redis, token, org):\n self.token = token\n self.org = org\n self.redis = redis\n def set_default_headers(self):\n print(\"setting headers!!!\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')\n @coroutine\n def get(self):\n utility.cache_response(self.token, self.org, self.redis)\n self.write(\"Cache seeded\")\n def write_error(self, status_code, **kwargs):\n jsonData = {\n 'status': int(status_code),\n 'message': \"Internal server error\",\n 'answer': 'NULL'\n }\n self.write(json.dumps(jsonData))\n def options(self):\n self.set_status(204)\n self.finish()\n\nclass Welcome(RequestHandler):\n def set_default_headers(self):\n print(\"setting headers!!!\")\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header('Access-Control-Allow-Methods', 'GET, OPTIONS')\n @coroutine\n def get(self):\n self.write(\"Hello world\")\n def write_error(self, status_code, **kwargs):\n jsonData = {\n 'status': int(status_code),\n 'message': \"Internal server error\",\n 'answer': 'NULL'\n }\n self.write(json.dumps(jsonData))\n def options(self):\n self.set_status(204)\n self.finish()\n\n\n\nsettings = dict(\n debug=True\n)\n\n\nif __name__ == \"__main__\":\n load_dotenv(dotenv_path=\"./.env\", verbose=True)\n # token = os.environ.get(\"TOKEN\")\n # org = os.environ.get(\"ORGANIZATION\")\n # url=driver.current_url\n # parsed = urlparse(url)\n token=sys.argv[1]\n org=sys.argv[2]\n if len(sys.argv) > 1 and sys.argv[1] == \"--with-cache\":\n print(\"Connecting to redis....\")\n r = redis.from_url(os.environ.get(\"REDIS_URL\"))\n if r is None:\n print(\"[ERROR] cannot connect to caching layer\")\n exit(2)\n utility.cache_response(token=token, org=org, rd=r) # seed cache\n cron.start_cache_job(token, org, r)\n else:\n r = None\n \n if token is None or org is None:\n print(\"Token or Organization was null\")\n exit(1)\n \n # starting application\n application = Application([(r'/leaderboard', LeaderBoard, dict(redis=r, token=token, org=org)),\n (r'/topcontributors', TopContributors, dict(redis=r, token=token, org=org)),\n (r'/repos', Repos, dict(redis=r, token=token, org=org)),\n (r'/seed', ManualSeed, dict(redis=r, token=token, org=org)),\n (r'/', Welcome)\n ], **settings)\n server = HTTPServer(application)\n server.listen(os.environ.get(\"PORT\", 5000))\n IOLoop.current().start()\n","repo_name":"NavyaaSharma/git-and-me-hack-insider","sub_path":"python/api_server.py","file_name":"api_server.py","file_ext":"py","file_size_in_byte":8296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11972142594","text":"import numpy as np\nimport os\nimport torch\nfrom torch.utils.data import SequentialSampler,DistributedSampler,DataLoader\nfrom utils_glue import compute_metrics\nfrom tqdm import tqdm\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef predict(model,eval_datasets,step, eval_lang, args, taskname=None):\n eval_task = taskname\n eval_output_dir = args.output_dir\n lang_results = {}\n for lang,eval_dataset in zip(eval_lang, eval_datasets):\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n logger.info(\"Predicting...\")\n logger.info(\"***** Running predictions *****\")\n logger.info(\" task name = %s\", eval_task)\n logger.info(\" lang : %s\", lang)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.predict_batch_size)\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.predict_batch_size)\n model.eval()\n\n pred_logits = []\n label_ids = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\", disable=None):\n token_type_ids = batch.get('token_type_ids',None)\n if token_type_ids is not None:\n token_type_ids = token_type_ids.to(args.device)\n input_ids, input_mask, labels = batch['input_ids'],batch['attention_mask'],batch['label']\n input_ids = input_ids.to(args.device)\n input_mask = input_mask.to(args.device)\n #segment_ids = segment_ids.to(args.device)\n with torch.no_grad():\n outputs= model(input_ids, input_mask,token_type_ids=token_type_ids)\n logits = outputs[0]\n pred_logits.append(logits.detach().cpu())\n label_ids.append(labels)\n pred_logits = np.array(torch.cat(pred_logits),dtype=np.float32)\n label_ids = np.array(torch.cat(label_ids),dtype=np.int64)\n\n preds = np.argmax(pred_logits, axis=1)\n results = compute_metrics(eval_task, preds, label_ids)\n\n logger.info(\"***** Eval results {} Lang {} *****\".format(step, lang))\n for key in sorted(results.keys()):\n logger.info(f\"{lang} {key} = {results[key]:.5f}\")\n lang_results[lang] = results\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n\n write_results(output_eval_file,step,lang_results, eval_lang)\n model.train()\n return lang_results\n\ndef write_results(output_eval_file,step,lang_results, eval_lang):\n with open(output_eval_file, \"a\") as writer:\n writer.write(f\"step: {step:<8d} \")\n line = \"Acc/F1:\"\n\n for lang in eval_lang:\n acc = lang_results[lang]['acc']\n if 'f1' in lang_results[lang]:\n f1 = lang_results[lang]['f1']\n line += f\"{lang}={acc:.5f}/{f1:.5f} \"\n else:\n line += f\"{lang}={acc:.5f} \"\n writer.write(line+'\\n')","repo_name":"airaria/GRAIN","sub_path":"scripts/glue/predict_function.py","file_name":"predict_function.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"36756901506","text":"# %% [markdown]\r\n# # **Baixar vídeos do Youtube**\r\n\r\n# %%\r\nfrom pytube import YouTube\r\n\r\nyoutube = YouTube('https://youtu.be/e-fNXcdDKNg')\r\n\r\nyoutube.streams.get_highest_resolution().download()\r\n\r\n# %% [markdown]\r\n# ### **Baixar áudio e converter para .MP3**\r\n# \r\n# ATENÇÃO: necessário passar o caminho da pasta, onde deseja salvar o arquivo.\r\n# \r\n# * **Exemplo: D:\\Users\\Micro3\\Downloads**\r\n\r\n# %%\r\nfrom pytube import YouTube\r\nimport os\r\nyt = YouTube(str(input(\"Digite o URL do vídeo do youtube: \\n \")))\r\nvideo = yt.streams.filter(only_audio=True).first()\r\nprint(\"Digite o endereço de destino (deixe em branco para salvar no diretório atual)\")\r\ndestination = str(input(\" \")) or '.'\r\nout_file = video.download(output_path=destination)\r\nbase, ext = os.path.splitext(out_file)\r\nnew_file = base + '.mp3'\r\nos.rename(out_file, new_file)\r\nprint(yt.title + \" Áudio baixado com sucesso!\")\r\n\r\n\r\n","repo_name":"C410-M/C0D3S","sub_path":"Youtube_Downloader/downloader_YoutuBIU.py","file_name":"downloader_YoutuBIU.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23604458553","text":"import argparse \nimport pandas as pd\nimport os\nfrom Bio import AlignIO \nfrom Bio import SeqIO\nimport numpy as np\n\ndef cut_function(msa,st,end):\n if(end > st):\n cutted = msa[:,int(st-1):int(end-1)] # biopython start from 0 !!\n output = msapath+'_'+str(st)+'_'+str(end)+'.fa'\n AlignIO.write(cutted,output,'fasta')\n\n else:\n last = msa.get_alignment_length()\n cut_first = msa[:,int(st-1):(last-1)] # biopython start from 0 !!\n cut_second =msa[:,0:int(end-1)]\n cutted = cut_first + cut_second\n output = msapath+'_'+str(st)+'_'+str(end)+'.fa'\n AlignIO.write(cutted,output,'fasta')\n\n\ndef pseudocircular(a,overlap):\n pseudocircular = np.pad(a, pad_width=(0, overlap-1), mode='wrap')\n return(pseudocircular)\n\ndef auto_split_points(msa,length,overlap = None):\n if overlap == None:\n overlap = int(round(length/2))\n\n ncols = msa.get_alignment_length()\n \n circular = np.arange(ncols)+1\n circular = pseudocircular(circular,overlap=length)\n \n ind_start = np.arange(start = 0,stop = len(circular)-1,step = overlap)\n ind_start = [i for i in ind_start if i i >= 0 and not B[i]:\n B[i] = True\n count += 1\n break\n return count\n\n\nimport sys\ninput = sys.stdin.readline\nn, k = map(int, input().split())\nA = input().rstrip()\nprint(answer(A))\n","repo_name":"kjh03160/Algorithm_Basic","sub_path":"practice/Greedy/Hamberger_19941.py","file_name":"Hamberger_19941.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14393796190","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport unittest\nimport requests\n\nclass QQtest(unittest.TestCase):\n '''qq号测试吉凶'''\n def setUp(self):\n '''测试前置'''\n self.url = \"http://japi.juhe.cn/qqevaluate/qq\"\n self.headers={\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0\"\n }\n self.s = requests.session()\n\n def tearDown(self):\n '''清除数据'''\n self.s.close()\n def test001(self):\n '''测试用例001'''\n self.params ={\n \"qq\":\"741841851\",\n \"key\":\"49c47ae3d1e8e3e49578f03cee1e7a7a\"\n }\n r = self.s.get(self.url,params=self.params,headers=self.headers,verify=False)\n result = r.json()\n # print(result)\n reason = result['reason']#获取接口状态\n data = result['result']['data']['conclusion']#获取吉凶数据\n self.assertEqual(reason,'success')\n\n def test002(self):\n '''测试用例002'''\n self.params ={\n \"qq\":\"\",\n \"key\":\"49c47ae3d1e8e3e49578f03cee1e7a7a\"\n }\n r = self.s.get(self.url,params=self.params,headers=self.headers,verify=False)\n result = r.json()\n print(result)\n reason = result['reason']#获取接口状态\n # data = result['result']['data']['conclusion']#获取吉凶数据\n self.assertEqual(reason,'错误的请求参数')\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"hhppyy/test_api","sub_path":"test_case/test_qqtest.py","file_name":"test_qqtest.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38876287948","text":"# Definition for a street.\n# class Street:\n# def openDoor(self):\n# pass\n# def closeDoor(self):\n# pass\n# def isDoorOpen(self):\n# pass\n# def moveRight(self):\n# pass\n# def moveLeft(self):\n# pass\nclass Solution:\n def houseCount(self, street: Optional['Street'], k: int) -> int:\n for i in range(k):\n street.closeDoor()\n street.moveLeft()\n res = 0\n while not street.isDoorOpen():\n street.openDoor()\n res += 1\n street.moveLeft()\n return res\n","repo_name":"kai3n/Daily-commit-project","sub_path":"jamespak/week30/2728. Count Houses in a Circular Street.py","file_name":"2728. Count Houses in a Circular Street.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37897848806","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport time\n\nraf_mean=np.array([119.85, 136.35, 174.27])\n\nclass Vgg_face:\n def __init__(self):\n\n self.train = True\n self.vgg16_npy_path = './pre-train model/vgg16-save.npy'\n self.var_dict = {}\n\n def build(self, img, dropout):\n \"\"\"\n load variable from npy to build the VGG\n :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]\n \"\"\"\n self.data_dict = np.load(self.vgg16_npy_path, encoding='latin1').item()\n print(\"npy file loaded\")\n print(\"build model started\")\n\n assert img.get_shape().as_list()[1:] == [224, 224, 3]\n\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=img)\n\n img_norm = tf.concat(axis=3, values=[\n red - raf_mean[0],\n green - raf_mean[1],\n blue - raf_mean[2],\n ])\n\n self.conv1_1 = self.conv_layer(img_norm, 3, 64, \"conv1_1\", mode='fixed')\n self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, \"conv1_2\", mode='fixed')\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, 64, 128, \"conv2_1\", mode='fixed')\n self.conv2_2 = self.conv_layer(self.conv2_1, 128, 128, \"conv2_2\", mode='fixed')\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, 128, 256, \"conv3_1\", mode='fixed')\n self.conv3_2 = self.conv_layer(self.conv3_1, 256, 256, \"conv3_2\", mode='fixed')\n self.conv3_3 = self.conv_layer(self.conv3_2, 256, 256, \"conv3_3\", mode='fixed')\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, 256, 512, \"conv4_1\", mode='fixed')\n self.conv4_2 = self.conv_layer(self.conv4_1, 512, 512, \"conv4_2\", mode='fixed')\n self.conv4_3 = self.conv_layer(self.conv4_2, 512, 512, \"conv4_3\", mode='fixed')\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, 512, 512, \"conv5_1\", mode='fixed')\n self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, \"conv5_2\", mode='fixed')\n self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, \"conv5_3\", mode='fixed')\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, 25088, 2048, \"fc6\", mode='fixed') \n self.relu6 = tf.nn.relu(self.fc6)\n self.relu6 = tf.nn.dropout(self.relu6, dropout)\n\n self.fc7 = self.fc_layer(self.relu6, 2048, 1024, \"fc7\", mode='fixed')\n self.relu7 = tf.nn.relu(self.fc7)\n self.relu7 = tf.nn.dropout(self.relu7, dropout)\n\n self.fc8 = self.fc_layer(self.relu7, 1024, 8, \"fc8\", mode='fine-tune')\n\n self.prob = tf.nn.softmax(self.fc8, name=\"prob\")\n\n self.data_dict = None\n\n def avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def conv_layer(self, bottom, in_channels, out_channels, name, mode):\n\n with tf.variable_scope(name):\n\n filt, conv_biases = self.get_conv_var(3, in_channels, out_channels, name, mode)\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n bias = tf.nn.bias_add(conv, conv_biases)\n relu = tf.nn.relu(bias)\n\n return relu\n\n def fc_layer(self, bottom, in_size, out_size, name, mode):\n with tf.variable_scope(name):\n\n weights, biases = self.get_fc_var(in_size, out_size, name, mode)\n x = tf.reshape(bottom, [-1, in_size])\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n\n return fc\n\n def get_conv_var(self, filter_size, in_channels, out_channels, name, mode):\n\n if mode == 'fixed' or self.train == False:\n\n filters = self.data_dict[name][0]\n biases = self.data_dict[name][1]\n\n elif mode == 'fine-tune':\n\n filters = tf.Variable(self.data_dict[name][0], name = name + '_weights')\n biases = tf.Variable(self.data_dict[name][1], name = name + '_biases')\n elif mode == 'retrain':\n\n filters = tf.Variable(tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, 0.001), \\\n name = name + '_weights')\n biases = tf.Variable(tf.truncated_normal([out_channels], 0.0, 0.001), \\\n name = name + 'biases')\n else:\n print ('Mode should be fixed/fine-tune/retrain')\n\n self.var_dict[(name, 0)] = filters\n self.var_dict[(name, 1)] = biases\n\n return filters, biases\n\n def get_fc_var(self, in_size, out_size, name, mode):\n\n if mode == 'fixed' or self.train == False:\n\n weights = self.data_dict[name][0]\n biases = self.data_dict[name][1] \n elif mode == 'fine-tune':\n\n weights = tf.Variable(self.data_dict[name][0], name = name + '_weights')\n biases = tf.Variable(self.data_dict[name][1], name = name + '_biases')\n elif mode == 'retrain':\n\n weights = tf.Variable(tf.truncated_normal([in_size, out_size], 0.0, 0.001), name = name + '_weights')\n biases = tf.Variable(tf.truncated_normal([out_size], 0.0, 0.001), name = name + 'biases')\n else:\n print ('Mode should be fixed/fine-tune/retrain')\n\n self.var_dict[(name, 0)] = weights\n self.var_dict[(name, 1)] = biases\n\n return weights, biases\n\n def save_npy(self, sess, npy_path=\"../model/vgg16-save.npy\"):\n assert isinstance(sess, tf.Session)\n\n data_dict = {}\n\n for (name, idx), var in list(self.var_dict.items()):\n var_out = sess.run(var)\n if name not in data_dict:\n data_dict[name] = {}\n data_dict[name][idx] = var_out\n\n np.save(npy_path, data_dict)\n print(\"file saved\", npy_path)\n # return npy_path \n\nclass DGN:\n \n def __init__(self, path='./pre-train model/dgn_model.npy'):\n\n self.var_dict = {}\n self.data_dict = np.load(path, encoding='latin1').item()\n self.train = True\n \n def build(self, lm, dropout):\n\n self.fc1 = self.fc_layer(lm, 51*2, 128, 'fc1', mode = 'fixed')\n self.relu1 = tf.nn.selu(self.fc1)\n self.drop1 = tf.nn.dropout(self.relu1, dropout)\n\n self.fc2 = self.fc_layer(self.drop1, 128, 256, 'fc2', mode = 'fixed')\n self.relu2 = tf.nn.selu(self.fc2)\n self.drop2 = tf.nn.dropout(self.relu2, dropout)\n\n self.fc3 = self.fc_layer(self.drop2, 256, 8, 'fc3', mode = 'fine-tune')\n self.prob = tf.nn.softmax(self.fc3, name='prob')\n\n def fc_layer(self, bottom, in_size, out_size, name, mode):\n with tf.variable_scope(name):\n\n weights, biases = self.get_fc_var(in_size, out_size, name, mode)\n x = tf.reshape(bottom, [-1, in_size])\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n\n return fc\n\n def get_fc_var(self, in_size, out_size, name, mode):\n\n if mode == 'fixed' or self.train == False:\n\n weights = self.data_dict[name][0]\n biases = self.data_dict[name][1]\n elif mode == 'fine-tune':\n\n weights = tf.Variable(self.data_dict[name][0], name = name + '_weights')\n biases = tf.Variable(self.data_dict[name][1], name = name + '_biases')\n elif mode == 'retrain':\n\n weights = tf.Variable(tf.truncated_normal([in_size, out_size], 0.0, 0.001), name = name + '_weights')\n biases = tf.Variable(tf.truncated_normal([out_size], 0.0, 0.001), name = name + '_biases')\n else:\n print ('Mode should be fixed/fine-tune/retrain')\n\n self.var_dict[(name, 0)] = weights\n self.var_dict[(name, 1)] = biases\n\n return weights, biases\n\n def save_npy(self, sess, npy_path=\"./model/dgn-save.npy\"):\n # assert isinstance(sess, tf.Session)\n\n data_dict = {}\n\n for (name, idx), var in list(self.var_dict.items()):\n var_out = sess.run(var)\n if name not in data_dict:\n data_dict[name] = {}\n data_dict[name][idx] = var_out\n\n np.save(npy_path, data_dict)\n print((\"file saved\", npy_path))\n return npy_path","repo_name":"gamborino/RoBoHoN","sub_path":"vgg_face.py","file_name":"vgg_face.py","file_ext":"py","file_size_in_byte":8451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5276068091","text":"from __future__ import annotations\nimport logging\n\nfrom pathlib import Path\nfrom sys import platform\nfrom typing import Optional, Type, Union\n\nfrom bs4 import BeautifulSoup\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.chrome.service import Service as ChromeDriverService\nfrom selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.edge.options import Options as EdgeOptions\nfrom selenium.webdriver.edge.service import Service as EdgeDriverService\nfrom selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom selenium.webdriver.firefox.service import Service as GeckoDriverService\nfrom selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.safari.options import Options as SafariOptions\nfrom selenium.webdriver.safari.webdriver import WebDriver as SafariDriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager\n\nBrowserOptions = Union[ChromeOptions, EdgeOptions, FirefoxOptions, SafariOptions]\n\nimport argparse\n\n# parser = argparse.ArgumentParser(description=\"Give the url\")\n# parser.add_argument('-u','--url', type=str, help='website', default='https://www.scrapethissite.com/')\n# args = parser.parse_args()\n\n\n\ndef scrape_text_with_selenium(url: str=\" https://verify.bmdc.org.bd/\") -> tuple[WebDriver, str]:\n \"\"\"Scrape text from a website using selenium\n\n Args:\n url (str): The url of the website to scrape\n\n Returns:\n Tuple[WebDriver, str]: The webdriver and the text scraped from the website\n \"\"\"\n class Config:\n selenium_web_browser = \"chrome\"\n selenium_headless = False\n\n config = Config()\n\n logging.getLogger(\"selenium\").setLevel(logging.INFO)\n\n options_available: dict[str, Type[BrowserOptions]] = {\n \"chrome\": ChromeOptions,\n \"edge\": EdgeOptions,\n \"firefox\": FirefoxOptions,\n \"safari\": SafariOptions,\n }\n\n options: BrowserOptions = options_available[config.selenium_web_browser]()\n options.add_argument(\n \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36\"\n )\n\n if config.selenium_web_browser == \"firefox\":\n if config.selenium_headless:\n options.headless = True\n options.add_argument(\"--disable-gpu\")\n driver = FirefoxDriver(\n service=GeckoDriverService(GeckoDriverManager().install()), options=options\n )\n elif config.selenium_web_browser == \"edge\":\n driver = EdgeDriver(\n service=EdgeDriverService(EdgeDriverManager().install()), options=options\n )\n elif config.selenium_web_browser == \"safari\":\n # Requires a bit more setup on the users end\n # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari\n driver = SafariDriver(options=options)\n else:\n if platform == \"linux\" or platform == \"linux2\":\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--remote-debugging-port=9222\")\n\n options.add_argument(\"--no-sandbox\")\n if config.selenium_headless:\n options.add_argument(\"--headless=new\")\n options.add_argument(\"--disable-gpu\")\n\n chromium_driver_path = Path(\"/usr/bin/chromedriver\")\n\n driver = ChromeDriver(\n service=ChromeDriverService(str(chromium_driver_path))\n if chromium_driver_path.exists()\n else ChromeDriverService(ChromeDriverManager().install()),\n options=options,\n )\n driver.get(url)\n\n WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.TAG_NAME, \"body\"))\n )\n\n # Get the HTML content directly from the browser's DOM\n page_source = driver.execute_script(\"return document.body.outerHTML;\")\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n for script in soup([\"script\", \"style\"]):\n script.extract()\n\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = \"\\n\".join(chunk for chunk in chunks if chunk)\n return driver, text\n\n\nif __name__==\"main\":\n print(args.url)\n _, text = scrape_text_with_selenium(url=args.url)\n\n print(text)","repo_name":"shadman-shahid/bmdc_scraper","sub_path":"scraper_test.py","file_name":"scraper_test.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2513580657","text":"import logging\nfrom common.middleware.middleware import Middleware\nfrom common.rabbit.rabbit_exchange import RabbitExchange\nfrom common_utils.protocol.message import Message\n\nDATA_EXCHANGE = 'data'\nDATA_EXCHANGE_TYPE = 'direct'\nTRIPS_KEY = 'trips'\nSTATIONS_KEY = 'stations'\nWEATHER_KEY = 'weather'\nFLUSH_EXCHANGE_NAME = 'flush'\nFLUSH_EXCHANGE_TYPE = 'fanout'\n\n\nclass LoaderMiddleware(Middleware):\n def __init__(self, hostname: str):\n super().__init__(hostname)\n self._data_exchange = RabbitExchange(\n rabbit_connection=self._rabbit_connection,\n exchange_name=DATA_EXCHANGE,\n exchange_type=DATA_EXCHANGE_TYPE,\n )\n self._flush_exchange = None\n\n def send_trips(self, message, routing_key_postfix):\n self._data_exchange.publish(message, routing_key=f\"{TRIPS_KEY}_{routing_key_postfix}\")\n\n def send_stations(self, message, routing_key_postfix):\n self._data_exchange.publish(message, routing_key=f\"{STATIONS_KEY}_{routing_key_postfix}\")\n\n def send_weather(self, message, routing_key_postfix):\n self._data_exchange.publish(message, routing_key=f\"{WEATHER_KEY}_{routing_key_postfix}\")\n\n def send_flush(self, message):\n if self._flush_exchange:\n self._flush_exchange.publish(message)\n\n def create_flush_channel(self):\n self._flush_exchange = RabbitExchange(\n rabbit_connection=self._rabbit_connection,\n exchange_name=FLUSH_EXCHANGE_NAME,\n exchange_type=FLUSH_EXCHANGE_TYPE,\n )\n","repo_name":"leogm99/tp3-bike-rides-analyzer","sub_path":"server/common/loader/loader_middleware.py","file_name":"loader_middleware.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25600371163","text":"import os\nimport tempfile\n\nfrom snakemake.shell import shell\n\n\nwith tempfile.TemporaryDirectory() as tmpdir:\n shell(\n \"fastqc \"\n \" --outdir {tmpdir}\"\n \" --extract\"\n \" --threads {snakemake.threads}\"\n \" --dir {tmpdir}\"\n \" {snakemake.input.reads}\"\n \" > {snakemake.log.out}\"\n \" 2> {snakemake.log.err}\"\n )\n\n PRESUMED_SUFFIX = \".fastq.gz\"\n if not snakemake.input.reads.endswith(PRESUMED_SUFFIX):\n raise ValueError(f\"{snakemake.input.reads} does not ends with {PRESUMED_SUFFIX}\")\n\n base_name = os.path.basename(snakemake.input.reads).replace(\".fastq.gz\", \"\")\n html_path = os.path.join(tmpdir, f\"{base_name}_fastqc.html\")\n zip_path = os.path.join(tmpdir, f\"{base_name}_fastqc.zip\")\n\n fastqc_datapath = os.path.join(tmpdir, f\"{base_name}_fastqc\", \"fastqc_data.txt\")\n summary_path = os.path.join(tmpdir, f\"{base_name}_fastqc\", \"summary.txt\")\n\n shell(\"mv {html_path} {snakemake.output.html}\")\n shell(\"mv {zip_path} {snakemake.output.zip}\")\n shell(\"mv {fastqc_datapath} {snakemake.output.data}\")\n shell(\"mv {summary_path} {snakemake.output.txt}\")\n\n","repo_name":"jbudis/snakelines","sub_path":"rules/paired_end/reads/report/quality_report/scripts/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"42083685172","text":"class Solution:\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n nums = [i for i in range(1,n+1)]\n result = []\n\n def backtrack(i, data):\n if len(data) == k:\n result.append(data.copy())\n else:\n for j in range(i+1, len(nums)):\n data.append(nums[j])\n backtrack(j, data)\n data.pop()\n\n backtrack(-1, [])\n return result\n\n\nprint(Solution().combine(6,5))","repo_name":"HNYuuu/Leetcode-","sub_path":"77.py","file_name":"77.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"5677744408","text":"import torch\nimport json\nfrom tqdm import tqdm\nfrom infer import ModelInference\n\n\ndef get_feature_from_json(json_list,\n save_name,\n model_inference,\n n=256,\n type=\"nmr\",\n flag_get_value=False):\n context = []\n print(\"start parse json\")\n for file in json_list:\n with open(file, \"r\") as f:\n context_tmp = json.loads(f.read())\n context_tmp = [\n i[type][0] for i in tqdm(context_tmp) if len(i[type]) > 0\n ]\n context += context_tmp\n print(\"Size of the library: \", len(context))\n if flag_get_value == \"only\":\n return context\n if type == \"nmr\":\n fn = model_inference.nmr_encode\n elif type == \"smiles\":\n fn = model_inference.smiles_encode\n contexts = []\n print(\"start load batch\")\n for i in range(0, len(context), n):\n contexts.append(context[i:i + n])\n print(\"start encode batch\")\n result = [fn(i).cpu() for i in tqdm(contexts)]\n result = torch.cat(result, 0)\n if flag_get_value is True:\n if save_name is not None:\n torch.save((result, context), save_name)\n return result, context\n\n if save_name is not None:\n torch.save(result, save_name)\n return result\n\n\ndef get_topK_result(nmr_feature, smiles_feature, topK):\n indices = []\n scores = []\n with torch.no_grad():\n for i in tqdm(nmr_feature):\n nmr_smiles_distances_tmp = (\n i.unsqueeze(0) @ smiles_feature.t()).cpu()\n scores_, indices_ = nmr_smiles_distances_tmp.topk(topK,\n dim=1,\n largest=True,\n sorted=True)\n indices.append(indices_)\n scores.append(scores_)\n indices = torch.cat(indices, 0)\n scores = torch.cat(scores, 0)\n return indices, scores\n\n\nif __name__ == \"__main__\":\n # Load the model\n config_path = \"models/2_5_w_model/8.json\"\n pretrain_model_path = \"models/2_5_w_model/8.pth\"\n model_inference = ModelInference(config_path=config_path,\n pretrain_model_path=pretrain_model_path,\n device=\"cpu\")\n\n # C(O)C1=CC2(C)CCC3C(C)CCC32C1C\n nmr_list = [\n 17.7, 20.0, 22.9, 28.9, 29.9, 35.8, 37.6, 39.7, 50.9, 57.3, 61.3, 64.1,\n 64.9, 134.0, 146.7\n ]\n\n # Extract NMR spectral feature vector \n nmr_feature = model_inference.nmr_encode(nmr_list)\n\n # Construct a reference library by extracting structural feature vectors from SMILES strings\n # This might take a long time\n smiles_feature, smiles_list = get_feature_from_json(\n json_list=[\"data/val.json\", \"data/train.json\"],\n model_inference=model_inference,\n n=64,\n save_name=None,\n type=\"smiles\",\n flag_get_value=True)\n\n # Get top10 candidates by searching library \n indices, scores = get_topK_result(nmr_feature, smiles_feature, 10)\n\n # Print the result\n for (sco, idx) in zip(scores, indices):\n for ii, i in enumerate(idx):\n print(\"top:\", ii, \"scores:\", sco[ii].item(), \"smiles:\",\n smiles_list[i])\n","repo_name":"Qihoo360/CReSS","sub_path":"example_search_library.py","file_name":"example_search_library.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"1288664946","text":"\"\"\"\nData by the Price Monitoring Center, NDRC was not available. As it turns out, the National Bureau of Statistics of China also does not\nprovide data about the average price of electricity. Right now, the function \"retrieve_data\" returns monthly data on \"Output of Electricity, \nCurrent Period(100 million kwh)\". If one wants to retrieve data for the Sichuan province only, he/she should look into the following two\nrepositories to understand how to change the API URLs below: https://github.com/khaeru/data/blob/master/cn_nbs.py and https://github.com/mbk-dev/nbsc.\nThe Sichuan province has code 510000. Therefore, one probably has to insert \"®=[{\"wdcode\":\"reg\",\"valuecode\":\"510000\"}]\" into the URL. \nThis code might be incomplete since I do not use this data. I also did not check if all days are included in the data for the specified data \nrange. One can get the needed code from the file google_trends_data.py, for exmaple. Also, apparently, the URLs get constantly updated, but \nI think there are also ways to create permanently working URLs. It is best to look at one of the two mentioned GitHub repositories.\n\nThe function \"retrieve_data\" has the following arguments:\n- path: The path where the user intends to store the data. The default is \"\".\n- download: Whether the user wants to download the data or get them returned. The default is True.\n\"\"\"\n\n__all__ = [\"retrieve_data\"]\n\ndef retrieve_data(path=\"\", download=True):\n import requests, pandas as pd, os\n if path != \"\":\n file_names = os.listdir(path)\n # the website browser can be found at https://data.stats.gov.cn/english/index.htm\n # there, one can also see the format for the date range to be specifical in the API URL\n # if one wishes to find the API URL of another data series, one has to open the developer tool, go to network, then Fetch/XHR, navigate to the desired data series in the browser, and then inspect the different queries\n # for URL encoding: https://de.wikipedia.org/wiki/URL-Encoding\n api_url = \"https://data.stats.gov.cn/english/easyquery.htm?m=QueryData&dbcode=hgyd&rowcode=zb&colcode=sj&wds=%5B%5D&dfwds=%5B%7B%22wdcode%22%3A%22zb%22%2C%22valuecode%22%3A%22A03010G%22%7D%5D&k1=1672874627369\"\n # the value code for there data series here is A03010G\n print(\"The API URL is: \" + api_url + \".\")\n response = requests.get(api_url, verify=False)\n meta_info = response.json()[\"returndata\"][\"wdnodes\"]\n name = meta_info[0][\"nodes\"][0][\"cname\"]\n print(name)\n response = response.json()\n data = []\n dates = []\n i = 0\n while True:\n dates.append(int(response[\"returndata\"][\"datanodes\"][i][\"wds\"][1][\"valuecode\"]))\n data.append(response[\"returndata\"][\"datanodes\"][i][\"data\"][\"data\"])\n if response[\"returndata\"][\"datanodes\"][i][\"wds\"][1][\"valuecode\"] == \"201101\":\n break\n i += 1\n df = pd.DataFrame.from_dict({\"dates\": dates, \"Output of Electricity, Current Period(100 million kwh)\": data})\n df[\"date\"] = pd.to_datetime(data[\"date\"])\n df = df.drop_duplicates(subset=\"date\")\n df.set_index(\"date\", inplace=True, drop=True)\n date_range = pd.date_range(start=start_date, end=end_date, freq=\"D\")\n df = df.reindex(date_range)\n if download:\n if \"nbsc_data.csv\" not in file_names:\n df.to_csv(path + \"/nbsc_data.csv\")\n else:\n if input(\"The file already exists. Do you want to replace it? Y/N \") == \"Y\":\n os.remove(\"/nbsc_data.csv\")\n df.to_csv(path + \"/nbsc_data.csv\")\n else:\n print(\"Could not create a new file.\")\n else: \n return df","repo_name":"MarcGehring97/Crypto-Currency-Asset-Pricing","sub_path":"data_retrieval/nbsc.py","file_name":"nbsc.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2263843819","text":"def plot(txtaddress):\n \"\"\"\n @Author: Haoyang Ding\n plot the word cloud image\n :param txtaddress: file address, string\n :return: None\n \"\"\"\n\n assert isinstance(txtaddress, str)\n import numpy as np\n from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\n import matplotlib.pyplot as plt\n from PIL import Image\n import os\n\n f=open(txtaddress).read()\n earthpath=os.path.abspath(os.path.join(os.getcwd(), 'Data\\VisualizationData\\CO2_GDP', 'Equal_Earth_projection_SW2.jpg'))\n earthfig=np.array(Image.open(earthpath))\n stopwords = set(STOPWORDS)\n stopwords.add(\"said\")\n wordcloud = WordCloud(background_color=\"white\", mask=earthfig, stopwords=stopwords, collocations=False).generate(f)\n fig, axes = plt.subplots(1, 1)\n fig.set_size_inches(55, 50)\n image_colors = ImageColorGenerator(earthfig)\n axes.imshow(wordcloud.recolor(color_func=image_colors), interpolation=\"bilinear\")\n axes.set_axis_off()\n plt.savefig(r'Image\\CO2_GDP\\wordcloud.jpg', bbox_inches='tight')\n plt.show()\n","repo_name":"yueyang1101/ece143team15","sub_path":"src/plots/CO2_GDP/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"72223223575","text":"from flask import Flask, render_template, request\nimport time\nfrom fetchData import fetchData\n \napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n start = time.time()\n keyword = request.form.get('keyword')\n school = request.form.get('school')\n if (len(keyword) < 3 or not school):\n return render_template('home.html', data=[], noResults=True)\n result =fetchData(keyword, school)\n noResults = True if len(result) == 0 else False\n end = time.time()\n return render_template('home.html', data=result, time=end-start, dataLength=len(result), noResults=noResults, keyword=keyword, school=school)\n return render_template('home.html', data=[])\n\n@app.errorhandler(404) \ndef not_found(e): \n return render_template(\"404.html\") \n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"femathic/PGS","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"13897776330","text":"import dns.resolver\nimport socket\nimport deets\n\ndef verify(dnsserver):\n myResolver = dns.resolver.Resolver()\n myResolver.timeout = 5\n myResolver.lifetime = 5\n try:\n # Is IP?\n socket.inet_aton(dnsserver)\n # Then set our DNS server to the IP\n myResolver.nameservers = [dnsserver]\n except socket.error:\n # Is not IP? Then resolve the hostname\n dnsquery = myResolver.query(dnsserver, \"A\")\n # We only want the 1st A record\n #myResolver.nameservers = [arecord]\n arecord = dnsquery[0]\n myResolver.nameservers = [arecord]\n try:\n # Resove the FQDN we are testing\n cdnip = myResolver.query(monitoredsite, \"A\")\n edgeip = str(cdnip[0])\n except:\n edgeip = \"Failure\"\n return edgeip\n","repo_name":"lileddie/keepit100","sub_path":"monitorverify.py","file_name":"monitorverify.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10188640316","text":"# Integración: Regla de los trapecios\n# Usando una función fx()\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# INGRESO\n\n\ndef fx(x): return np.sin(x)\n\n\n# intervalo de integración\na = 0\nb = np.pi/2\ntramos = 100\n\n# PROCEDIMIENTO\n# Regla del Trapecio\n# Usando tramos equidistantes en intervalo\nh = (b-a)/tramos\nxi = a\nsuma = fx(xi)\nfor i in range(0, tramos-1, 1):\n xi = xi + h\n suma = suma + 2*fx(xi)\nsuma = suma + fx(b)\narea = h*(suma/2)\n\n# SALIDA\nprint('Tramos: ', tramos)\nprint('Integral(Area Bajo la Curva): ', area)\n\n# GRAFICA\n# Puntos de muestra\nmuestras = tramos + 1\nxi = np.linspace(a, b, muestras)\nfi = fx(xi)\n# Linea suave\nmuestraslinea = tramos*10 + 1\nxk = np.linspace(a, b, muestraslinea)\nfk = fx(xk)\n\n# Graficando\nplt.plot(xk, fk, label='f(x)')\nplt.plot(xi, fi, marker='o',\n color='#ebdb34', label='muestras')\n\nplt.xlabel('x')\nplt.ylabel('f(x)')\nplt.title('Método del trapecio')\nplt.legend()\n\n# Trapecios\nplt.fill_between(xi, 0, fi, color='#00948d')\nfor i in range(0, muestras, 1):\n plt.axvline(xi[i], color='w')\n\nplt.show()\n","repo_name":"kiovahnleon/CuartoSemestre","sub_path":"metodosNum/Trapecio/trapecio2.py","file_name":"trapecio2.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27153862302","text":"#!/usr/bin/python\n#\n\nfrom bottle import route, run, request, abort\nimport redis\nimport time\nimport json\n\nr_server = redis.Redis(host='localhost', port=6380, db=0)\n\nIPranges = ['1023', '1037', '1038', '1039', '1044', '1045', '1049', '1050', '1051', '1055', '1056', '1061', '10200', '10221', '10230', '7768', '87104', '17228']\n\n\n@route('/v1/stats/err/:type/:callback', method='GET')\ndef getDecodeErr(type, callback):\n errPrBoxArr = []\n htmlErrArr = {}\n\n searchString = 'IP16' + getCurretTime()\n\n for key in IPranges:\n decodeErr = r_server.hget(searchString + key, type)\n totalOnline = r_server.hget(searchString + key, 'totalOnline')\n\n if decodeErr:\n errPrBox = round(float(decodeErr)/float(totalOnline), 4)\n errPrBox = str(round(errPrBox, 3))\n errPrBoxArr.append(errPrBox)\n else:\n errPrBoxArr.append('0')\n\n i = 0\n for i, elem in enumerate(errPrBoxArr):\n IPrange = IPranges[i]\n htmlErrArr[str(IPrange)] = str(elem)\n i += 1\n\n return jsonp(callback, dict(htmlErrArr))\n\n\n@route('/status')\ndef status():\n return {'status': 'online', 'servertime': time.time()}\n\n\ndef getCurretTime():\n timeNow = time.time()\n datetimeUnix = str((int(timeNow)+120)/300*300) # round to strict 5 min interval\n return str(int(datetimeUnix) - 300) # search the previous 5 min interval\n\n\ndef jsonp(request, dictionary):\n return \"%s(%s)\" % (request, dictionary)\n return dictionary\n\nrun(host='88.83.68.98', port=5124, reloader=True)\n","repo_name":"morathekid/beewatch","sub_path":"bin/statsApi.py","file_name":"statsApi.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5507114878","text":"class Solution:\r\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\r\n # topological sort problem (I use dfs but you can also use BFS)\r\n # find indegrees (number of prereqs) for each course\r\n # have a dictionary with key of course that is a prereq and value of courses its a prereq for\r\n # (this is kind of opposite logic for dictionary as what you'd do in dfs)\r\n # start with courses that have no prereqs\r\n # pop a course that has no prereq and add it to list order\r\n # check which courses have that course as a pre req and get rid of it (decrement by 1)\r\n # add any courses that now have 0 other prereqs\r\n\r\n # return list if length of list is same as numCourses\r\n \r\n # time complexity: O(E + V), space complexity: O(3V) => O(V)\r\n\r\n indegrees = [0] * numCourses\r\n\r\n prereq_for = {i: [] for i in range(numCourses)}\r\n\r\n for c, p in prerequisites:\r\n prereq_for[p].append(c)\r\n indegrees[c] += 1\r\n \r\n res = []\r\n\r\n no_in_stack = []\r\n\r\n for idx, val in enumerate(indegrees):\r\n if val == 0:\r\n no_in_stack.append(idx)\r\n\r\n while no_in_stack:\r\n cur = no_in_stack.pop()\r\n res.append(cur)\r\n\r\n for course in prereq_for[cur]:\r\n indegrees[course] -= 1\r\n if indegrees[course] == 0:\r\n # have to do == 0 instead of <= 0 in case it goes negative\r\n no_in_stack.append(course)\r\n \r\n return res if len(res) == numCourses else []\r\n \r\n# neetcode recursive dfs solution:\r\n# O(E + V) time complexity\r\n\r\n# a course has 3 possible states:\r\n# visited -> course has been added to output\r\n# visiting -> course not added to output yet, but added to cycle\r\n# unvisited -> course not added to output or cycle\r\n\r\n# have a visit set for nodes that have been fully visited and added to output\r\n# have a cycle set that is for nodes that are currently being checked, to make sure there are no cycles\r\n# for a node, add it to the cycle and then recursively check all its prereqs\r\n# if all its prereqs return True (all the prereqs have confirmed all their prereqs), then \r\n# remove the node from the cycle, add the node to visited, and add it to the output array and return True\r\n# do this for every node\r\n\r\nclass Solution:\r\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\r\n prereq = {c: [] for c in range(numCourses)}\r\n for crs, pre in prerequisites:\r\n prereq[crs].append(pre)\r\n\r\n output = []\r\n visit, cycle = set(), set()\r\n\r\n def dfs(crs):\r\n if crs in cycle:\r\n return False\r\n if crs in visit:\r\n return True\r\n\r\n cycle.add(crs)\r\n for pre in prereq[crs]:\r\n if dfs(pre) == False:\r\n return False\r\n cycle.remove(crs)\r\n visit.add(crs)\r\n output.append(crs)\r\n return True\r\n\r\n for c in range(numCourses):\r\n if dfs(c) == False:\r\n return []\r\n return output","repo_name":"alexjooho/dsa","sub_path":"graphs/210_course_schedule_II.py","file_name":"210_course_schedule_II.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1853925483","text":"import random\n\n\ndef objective_function(vector):\n return sum([x**2 for x in vector])\n\n\ndef random_within_search_space(lower_bound, upper_bound):\n return lower_bound + (upper_bound-lower_bound) * random.random()\n\n\ndef generate_random_vector(search_space):\n return [random_within_search_space(lower_bound, upper_bound) for lower_bound, upper_bound in search_space]\n\n\ndef mutate_with_information(candidate, beliefs, search_space):\n new_vector = [0] * len(candidate['vector'])\n for i in range(len(candidate['vector'])):\n new_vector[i] = random_within_search_space(\n beliefs['normative'][i][0], beliefs['normative'][i][1])\n new_vector[i] = max(new_vector[i], search_space[i][0])\n new_vector[i] = min(new_vector[i], search_space[i][1])\n\n return {'vector': new_vector}\n\n\ndef binary_tournament(population):\n i, j = random.randint(0, len(population) -\n 1), random.randint(0, len(population)-1)\n while j == i:\n j = random.randint(0, len(population)-1)\n return population[i] if population[i]['fitness'] < population[j]['fitness'] else population[j]\n\n\ndef initialize_beliefspace(search_space):\n beliefspace = {}\n beliefspace['situational'] = None\n beliefspace['normative'] = [search_space[i]\n for i in range(len(search_space))]\n\n return beliefspace\n\n\ndef update_beliefspace_situational(beliefspace, best_individual):\n current_leader = beliefspace['situational']\n if current_leader == None or best_individual['fitness'] < current_leader['fitness']:\n beliefspace['situational'] = best_individual\n\n\ndef update_beliefspace_normative(beliefspace, newly_accepted_beliefs):\n for i, normative_beliefspace_bounds in enumerate(beliefspace['normative']):\n normative_beliefspace_bounds[0] = min(\n newly_accepted_beliefs, key=lambda x: x['vector'][i])['vector'][i]\n normative_beliefspace_bounds[1] = max(\n newly_accepted_beliefs, key=lambda x: x['vector'][i])['vector'][i]\n\n\ndef search(max_gens, search_space, population_size, number_of_accepted_beliefs):\n population = [{'vector': generate_random_vector(\n search_space)} for _ in range(population_size)]\n\n beliefspace = initialize_beliefspace(search_space)\n\n for individual in population:\n individual['fitness'] = objective_function(individual['vector'])\n\n population = sorted(population, key=lambda x: x['fitness'])\n best = population[0]\n\n update_beliefspace_situational(beliefspace, best)\n\n for _ in range(max_gens):\n children = [mutate_with_information(\n individual, beliefspace, search_space) for individual in population]\n for child in children:\n child['fitness'] = objective_function(child['vector'])\n\n children = sorted(children, key=lambda x: x['fitness'])\n best = children[0]\n\n update_beliefspace_situational(beliefspace, best)\n population = [binary_tournament(\n population + children) for _ in range(population_size)]\n\n population = sorted(population, key=lambda x: x['fitness'])\n accepted = population[:number_of_accepted_beliefs]\n\n update_beliefspace_normative(beliefspace, accepted)\n\n return beliefspace['situational']\n\n\nif __name__ == '__main__':\n problem_size = 2\n search_space = [[-5, 5] for _ in range(problem_size)]\n max_gens = 200\n population_size = 100\n number_of_accepted_beliefs = round(population_size * 0.2)\n\n best = search(max_gens, search_space, population_size,\n number_of_accepted_beliefs)\n\n print(f\"done! Solution: f={best['fitness']}, s={best['vector']}\")\n","repo_name":"UPocek/NatureInspiredAlgorithms","sub_path":"physical/cultural_algorithm.py","file_name":"cultural_algorithm.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6989417935","text":"# -*- coding: utf-8 -*-\nfrom celery import task\nfrom celery.utils.log import get_task_logger\nimport time\nfrom course.models import *\n\n\nlogger = get_task_logger(__name__)\nlogger.setLevel('DEBUG')\n\n@task(name='save_event')\ndef save_event(user, message, type, lesson, text_size, text_color, text_align):\n logger.info('Save message...')\n e = Event()\n e.tp = type\n e.text = message\n e.lesson = lesson\n e.text_size = text_size\n e.text_color = text_color\n e.text_align = text_align\n e.save()\n\n@task(name='save_chat_message')\ndef save_chat_message(user, message, lesson):\n logger.info('Save chat message...')\n m = ChatMessages()\n m.text = message\n m.lesson = lesson\n m.user = user\n m.save()\n\n","repo_name":"zdimon/quiz-angular-django","sub_path":"examples/co/course/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71065396373","text":"# -*- coding: utf-8 -*-\n\"\"\"\n为Chaco添加圆形选择工具,可选择以某点为中心,在指定半径范围内的所有数据点\n\"\"\"\nimport numpy as np\nfrom enthought.traits.api import HasTraits, Instance, Enum, Property, Float, Event\nfrom enthought.traits.ui.api import View, Item\nfrom enthought.enable.api import color_table\nfrom enthought.enable.component_editor import ComponentEditor \nfrom enthought.chaco.api import Plot, ArrayPlotData, AbstractController\nfrom enthought.chaco.api import AbstractOverlay, ScatterInspectorOverlay\n\n\nclass CircleSelectionOverlay(AbstractOverlay): \n metadata = Property(depends_on = 'component') \n \n def _get_metadata(self):\n return self.component.index.metadata \n \n def overlay(self, component, gc, view_bounds=None, mode=\"normal\"):\n if self.metadata.has_key('circle_center'):\n x, y = self.metadata['circle_center'] \n r = self.metadata['circle_radius']\n gc.save_state()\n gc.set_alpha(0.4)\n gc.set_fill_color(color_table[\"lightskyblue\"])\n gc.set_stroke_color(color_table[\"dodgerblue\"])\n gc.set_line_width(1)\n gc.set_line_dash(None)\n gc.arc(x, y, r, 0.0, 2*np.pi) \n gc.draw_path()\n gc.restore_state()\n\n\nclass CircleSelection(AbstractController): \n metadata = Property(depends_on = 'component')\n event_state = Enum('normal', 'selecting', 'selected', 'moving') \n selection_update = Event \n x = Float # 圆心X坐标\n y = Float # 圆心Y坐标\n r = Float # 半径\n mx = Float # 移动开始时的鼠标的X坐标\n my = Float # 移动开始时的鼠标的Y坐标\n x0 = Float # 移动开始时的圆心X坐标\n y0 = Float # 移动开始时的圆心Y坐标\n\n def _get_metadata(self):\n return self.component.index.metadata\n \n\n def normal_left_down(self, event):\n self.x, self.y = event.x, event.y\n self.metadata['circle_center'] = self.x, self.y\n self.metadata['circle_radius'] = 0\n self.event_state = 'selecting'\n \n def selecting_mouse_move(self, event):\n self.r = np.sqrt((self.x-event.x)**2 + (self.y-event.y)**2)\n self.metadata['circle_radius'] = self.r\n self._update_selection()\n \n def selecting_left_up(self, event):\n self.event_state = 'selected'\n \n def selected_left_down(self, event):\n r = np.sqrt((self.x-event.x)**2 + (self.y-event.y)**2)\n if r > self.r:\n del self.metadata['circle_center']\n del self.metadata['circle_radius']\n del self.metadata['selections']\n self.selection_update = True\n self.event_state = 'normal'\n else:\n self.mx, self.my = event.x, event.y\n self.x0, self.y0 = self.x, self.y\n self.event_state = 'moving'\n \n def moving_mouse_move(self, event):\n self.x = self.x0 + event.x - self.mx\n self.y = self.y0 + event.y - self.my\n self.metadata['circle_center'] = self.x, self.y\n self._update_selection()\n \n def moving_left_up(self, event):\n self.event_state = 'selected'\n \n \n def _update_selection(self):\n points = np.transpose(np.array((self.component.index.get_data(), \n self.component.value.get_data()))) \n screen_points = self.component.map_screen(points) \n tmp = screen_points - np.array([self.x, self.y])\n tmp **= 2\n dist = np.sum(tmp, axis=1)\n self.metadata['selections'] = dist < self.r*self.r \n self.selection_update = True\n\n\nclass CircleSelectionDemo(HasTraits):\n plot = Instance(Plot) \n data = Instance(ArrayPlotData)\n traits_view = View(\n Item('plot',editor=ComponentEditor(), show_label=False), \n width=500, height=500, resizable=True, title=\"Circle Selection Plot\")\n \n def __init__(self, **traits):\n super(CircleSelectionDemo, self).__init__(**traits)\n x = np.random.random(100)*2\n y = np.random.random(100)\n data = ArrayPlotData(x=x, y=y) \n plot = Plot(data) \n \n scatter = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n scatter.tools.append( CircleSelection(scatter) )\n scatter.overlays.append( ScatterInspectorOverlay(scatter, \n selection_color=\"red\", selection_marker=\"circle\", \n selection_outline_color = \"black\",\n selection_marker_size = 6) )\n scatter.overlays.append( CircleSelectionOverlay(scatter) )\n \n self.plot = plot\n self.data = data\n\nif __name__ == \"__main__\":\n p = CircleSelectionDemo()\n p.configure_traits() ","repo_name":"xiexiaoshinick/Python-scientific-computation","sub_path":"08/chaco_tools_circle.py","file_name":"chaco_tools_circle.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"67"} +{"seq_id":"37567473692","text":"import asyncio\nfrom gpgui import dash, dmc, idp, html, dash_player, dcc\nfrom gpgui.cbtools import cbm\nfrom quart import make_response\nfrom gpgui.sockets import Message, SocketComponentPath\nfrom dash_extensions import WebSocket\nfrom websockets.legacy import server\nimport plotly.express as px\nimport time\n\ndash.register_page(__name__)\nidp = idp.calibration\n\n\ndef otherthing():\n import numpy as np\n import asyncio\n from websockets.legacy import server\n from websockets.legacy.server import WebSocketServerProtocol\n import cv2\n\n async def serve_img():\n async def serve(websocket: WebSocketServerProtocol):\n for i in range(100000):\n img = np.zeros((1440, 2560, 3), dtype=np.uint8)\n img[i : i + 4, i : i + 4] = [255, 255, 255]\n data = cv2.imencode(\".png\", img)[1].tobytes()\n await websocket.send(data)\n await asyncio.sleep(0.05)\n\n async with server.serve(serve, \"10.53.58.89\", 9876):\n for i in range(100000):\n print(\"running\", i)\n await asyncio.sleep(1)\n\n if __name__ == \"__main__\":\n asyncio.run(serve_img())\n\n\nlayout = dmc.Paper(\n [\n WebSocket(id=idp.socket, url=\"ws://10.53.58.89:9876\"),\n dmc.Affix(\n html.Img(\n id=idp.img,\n src=\"\",\n width=\"100%\",\n height=\"100%\",\n ),\n position={\"top\": \"0\", \"left\": \"0\"},\n style={\"height\": \"100%\", \"width\": \"100%\"},\n zIndex=10000,\n ),\n ],\n p=\"xl\",\n)\n\n\n@cbm.js_callback(idp.img.as_output(\"src\"))\nasync def update_image(message: Message = idp.socket.as_input(\"message\")):\n \"\"\"\n if (message) {\n return message.data.arrayBuffer().then(buffer => {\n let b64 = btoa(String.fromCharCode(...new Uint8Array(buffer)));\n return \"data:image/png;base64,\" + b64;\n });\n } else {\n return no_update\n };\n \"\"\"\n","repo_name":"emillma/gpgui","sub_path":"test_project/pages/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38268721791","text":"\"\"\"Blogly application.\"\"\"\n\nfrom flask import Flask, redirect, render_template, request\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, User\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\n\nconnect_db(app)\n\n# Use once to setup the database using the models\n# with app.app_context():\n# db.create_all()\n\napp.config['SECRET_KEY'] = \"Secret Secret Secret\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ntoolbar = DebugToolbarExtension(app)\n\n\n@app.route(\"/\")\ndef root_route():\n \"\"\" Homepage\"\"\"\n return redirect(\"/users\")\n\n\n@app.route(\"/users\")\ndef users_route():\n \"\"\" List of Users\"\"\"\n users = User.query.order_by(User.last_name).all()\n\n return render_template(\"home.html\", users = users)\n\n@app.route(\"/users/new\", methods=[\"GET\"])\ndef create_user_form():\n \"\"\" Create a new User\"\"\"\n\n return render_template(\"new_user.html\")\n\n\n@app.route(\"/users/new\", methods=[\"POST\"])\ndef create_user_db():\n \"\"\" Create a new User\"\"\"\n new_user = User(\n first_name = request.form['first_name'],\n last_name = request.form['last_name'],\n image_url = request.form['image_url'] or '/static/img/default.jpg')\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")\n\n\n@app.route(\"/users/\")\ndef show_user_details(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"user_detail.html\", user=user)\n\n\n@app.route(\"/users//edit\", methods=[\"GET\"])\ndef edit_user_details_form(user_id):\n \n user = User.query.get_or_404(user_id)\n\n return render_template(\"user_detail_edit_form.html\", user=user)\n\n\n@app.route(\"/users//edit\", methods=[\"POST\"])\ndef edit_user_details(user_id):\n\n user = User.query.get_or_404(user_id)\n user.first_name = request.form[\"first_name\"]\n user.last_name = request.form[\"last_name\"]\n user.image_url = request.form['image_url'] or '/static/img/default.jpg'\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(f\"/users/{user.id}\")\n\n@app.route(\"/users//delete\", methods=[\"POST\"])\ndef delete_user(user_id):\n \n user = User.query.get_or_404(user_id)\n \n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")\n","repo_name":"kcsid7/blogly01","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13980241043","text":"from datetime import datetime\nfrom src.lib.Analysis import Analysis\n\nclass SimulationV5:\n def __init__(self, dataset, walletA, walletB):\n self.dataset = [[float(x) for x in d] for d in dataset]\n self.walletA = walletA\n self.walletB = walletB\n self.analysis = Analysis(self.dataset)\n self.buyPosition = False\n self.stopLoss = 0\n self.takeProfit = 0\n self.win = 0\n self.loss = 0\n self.risk = 1\n\n def updateDataset(self, lastCandle):\n self.dataset.pop(0)\n self.dataset.append([float(x) for x in lastCandle])\n self.analysis.setCandles(self.dataset)\n\n def priceAction(self):\n lastIndex = len(self.dataset) - 1\n lastVolume = self.dataset[lastIndex]\n for i in range(1, 3):\n v = self.dataset[lastIndex - i]\n if v[5] < lastVolume[5] and v[4] > lastVolume[4]:\n lastVolume = v\n \n else:\n return False\n return True\n\n def makeDecision(self, candle):\n self.updateDataset(candle)\n price = float(candle[1])\n if self.priceAction() and not self.buyPosition:\n self.takeProfit = price - (price * 0.01)\n self.stopLoss = price + (price * 0.01)\n self.walletA = (self.walletB * self.risk) * price\n self.walletB = self.walletB - (self.walletB * self.risk)\n self.buyPosition = True\n \n if self.buyPosition:\n if price <= self.takeProfit:\n print(datetime.fromtimestamp(int(candle[0])/1000), \"\\033[32m WIN\\033[39m\")\n self.walletB = self.walletA / price\n self.walletA = 0\n self.buyPosition = False\n self.risk = 1\n self.win += 1\n elif price >= self.stopLoss:\n print(datetime.fromtimestamp(int(candle[0])/1000), \"\\033[31m LOSS\\033[39m\")\n self.walletB = self.walletA / price\n self.walletA = 0\n self.buyPosition = False\n if self.risk < 1:\n self.risk *= 2\n self.loss += 1","repo_name":"SamihOuague/PythonBOT","sub_path":"src/simulations/simulationV5.py","file_name":"simulationV5.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70938632533","text":"# %%\ndoc = 'Assets/countries.csv'\nimport pandas as pd\nfrom pandas.core.indexes.base import ensure_index\n\ndf0 = pd.read_csv(doc)\n\ndf = df0[['code', 'name', 'id']].copy()\ndf['Countries'] = df['code'] + ' - ' + df['name']\ndf['Index'] = range(0,df.shape[0])\ndf['Index'] = df['Index'].apply(lambda x: str(x).zfill(4))\ndf.dropna(inplace=True)\nlst = []\n\nfor id, name in zip(df['Index'], df['Countries']):\n lst.append(\n {\"ID\": id, \"Country\": name})\n\nimport json\n\ncontent = json.dumps(lst,ensure_ascii=False)\nwith open('country_codes.json', 'w') as js:\n js.write(content)\n\n# %%\n","repo_name":"edenmui/edenmui.github.io","sub_path":"get_countries.py","file_name":"get_countries.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16098392702","text":"\"\"\"\r\ninput file preparation of HYPHY\r\n\r\n\"\"\"\r\n__author__ = 'Zerodel_2'\r\n\r\n\r\nclass sequenceNotSameLength(Exception):\r\n \"\"\"\r\n :raise when sequences have different length\r\n \"\"\"\r\n pass\r\n\r\nclass notStr(Exception):\r\n pass\r\n\r\n# read dot-aln files and return the gene names in one aln file\r\ndef aln_reader(dot_aln_file):\r\n \"\"\"\r\n read an aln file , and report genes in it .\r\n Args:\r\n 1. dot_aln_file : file name of .aln file,which contains a alignment of several genes\r\n Return:\r\n 1. genes: a list of strings , represent the genes\r\n \"\"\"\r\n with open(dot_aln_file, \"r\") as aln:\r\n genes = [line.split()[0] for line in aln]\r\n\r\n return genes\r\n\r\ndef lines_to_input(aln_lines, input_file_path):\r\n \"\"\"\r\n translate aln_lines for .aln file into a input file : input_file_path\r\n :param aln_lines:\r\n :param input_file_path:\r\n :return:\r\n \"\"\"\r\n hyphy_input_content = [\">%s\\n%s\\n\" % (line.split()[0], line.split()[1])\r\n for line in aln_lines]\r\n\r\n with open(input_file_path, \"w\") as Hyphy_input_writer:\r\n Hyphy_input_writer.writelines(hyphy_input_content)\r\n\r\n\r\ndef aln2input(dot_aln_file, hyphy_input_file):\r\n \"\"\"translate dot-aln file to \"input\" file for HYPHY\r\n\r\n Args:\r\n dot_aln_file: file name of .aln alignment file\r\n hyphy_input_file : translated input file for hyphy process\r\n\r\n :Return:\r\n nothing\r\n \"\"\"\r\n with open(dot_aln_file, 'r') as aln:\r\n\r\n hyphy_input_content = [\">%s\\n%s\\n\" % (line.split()[0], line.split()[1])\r\n for line in aln]\r\n\r\n\r\n\r\n with open(hyphy_input_file, \"w\") as Hyphy_input_writer:\r\n Hyphy_input_writer.writelines(hyphy_input_content)\r\n\r\n\r\ndef aln2inputNogap(dot_aln_file, hyphy_input_file):\r\n with open(dot_aln_file, \"r\") as aln:\r\n contents = aln.readlines()\r\n gene_titles = [line.split()[0].strip() for line in contents]\r\n gene_sequences = [line.split()[-1].strip() for line in contents]\r\n\r\n check_sequence_matrix(gene_sequences)\r\n sequence_no_gap = remove_gaps_matrix(gene_sequences)\r\n\r\n with open(hyphy_input_file, \"w\") as write_hyphy:\r\n for index_i, gene_title in enumerate(gene_titles):\r\n write_hyphy.write(\">\" + gene_title.strip() + \"\\n\")\r\n write_hyphy.write(sequence_no_gap[index_i].strip() + \"\\n\")\r\n\r\n\r\ndef check_sequence_matrix(sequence_matrix):\r\n\r\n length_1 = len(sequence_matrix[0])\r\n\r\n for line in sequence_matrix:\r\n if not isinstance(line, str):\r\n raise notStr\r\n if not length_1 == len(line):\r\n raise sequenceNotSameLength\r\n\r\n\r\ndef remove_gaps_matrix(sequence_matrix):\r\n check_sequence_matrix(sequence_matrix)\r\n codon_sum = len(sequence_matrix[0])/3\r\n tmp_matrix_filter = [[] for line in sequence_matrix]\r\n\r\n # using a flipping window\r\n for codon_i in range(codon_sum):\r\n start_point = codon_i*3\r\n end_point = start_point + 3\r\n content_in_window = [line[start_point:end_point] for line in sequence_matrix]\r\n # if \"-\" exists , reject whole window\r\n if not \"-\" in \"\".join(content_in_window):\r\n # no \"-\"\r\n for index_line, line_filtered in enumerate(tmp_matrix_filter):\r\n line_filtered.append(content_in_window[index_line])\r\n\r\n sequence_matrix_filter = [\"\".join(line) for line in tmp_matrix_filter]\r\n return sequence_matrix_filter\r\n\r\ndef remove_gaps(sequence_string):\r\n \"\"\" remove gaps \"---\" in given sequence string .\"\"\"\r\n num_codon = len(sequence_string)/3\r\n seq_gap_free = []\r\n for codon_i in range(num_codon):\r\n codon_content = sequence_string[(codon_i*3):(codon_i*3 + 3)]\r\n if '-' in codon_content:\r\n continue\r\n seq_gap_free.append(codon_content)\r\n sequence_main = \"\".join(seq_gap_free)\r\n return sequence_main\r\n\r\ndef aln_info(aln_file):\r\n \"\"\"read a .aln file and report gene names and length of sequence\"\"\"\r\n with open(aln_file, \"r\") as aln:\r\n infos = [(line.split()[0], len(line.split()[1])) for line in aln]\r\n\r\n genes = [info[0] for info in infos]\r\n lengths = [info[1] for info in infos]\r\n\r\n return genes, lengths\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass","repo_name":"zerodel/python-rna-structure","sub_path":"pyHYPHY/DataHYPHY.py","file_name":"DataHYPHY.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42855752896","text":"\"\"\"\n測定データをまとめる\n\"\"\"\n# pylint: disable=W1510\nimport datetime\nimport math\nimport msvcrt\nimport os\nimport shutil\nimport sys\nimport subprocess\n\n\ndef call_cmd(command:str, **kwargs):\n \"\"\"\n cmdコマンドを実行\n \"\"\"\n if isinstance(command, (str, list, tuple)):\n if 'shell' not in kwargs:\n kwargs.update({'shell':True})\n _ = subprocess.run(command, **kwargs)\n else:\n print(\">>> [ERROR] 有効なコマンドではありません : \" + str(command))\ncall_cmd(\"cls\")\n#call_cmd(\"color 0B\")\ncall_cmd(\"title Phi Dependency Automatic Calculator\")\n\n# コマンドライン幅\nCMD_TERMINAL_LENGTH = shutil.get_terminal_size().columns\n# カレントディレクトリ\nCWD = os.path.dirname(os.path.abspath(__file__))\n# ngraph.pyの参照用\nsys.path.append(CWD)\n\n\n# デバッグモード(内部テスト用)\n# 変更禁止\nIS_DEBUG = not __debug__\nif IS_DEBUG:\n print(\"デバッグモード ON\")\n\n# はじめの画面\nprint('\\033[38;5;010m#'*CMD_TERMINAL_LENGTH)\nprint(\"Phi Dependency Automatic Calculator v2.00 (Update : 2021/01/24, Created by OHASHI)\")\nprint(\">>> \\033[38;5;011m使用方法は「How to use.txt」を見てください。\\033[38;5;010m\")\nprint(\">>> \\033[38;5;011mフォルダ参照を使用する場合、初回は少しロードが遅くなります。\\033[38;5;010m\")\nprint(\">>> \\033[38;5;011mブラウズ画面やプログレスバーが表示されないときは何かのキーを押してください。\\033[38;5;010m\")\nprint('#'*CMD_TERMINAL_LENGTH)\nsys.stdout.write(\"\\n\")\n\nFOR_VEE = len(sys.argv) > 1\nif FOR_VEE:\n print(\">>>\\033[38;5;011m [LOG] CMD引数を確認しました。\\033[38;5;010m\")\n\n\n# pylint: disable=wrong-import-position\nfrom glob import iglob\nfrom glob import escape as escape_chr\nfrom statistics import mode\nfrom tkinter import Tk\nfrom tkinter.filedialog import askdirectory\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tqdm\nfrom scipy import optimize as opt\nfrom sklearn import linear_model\nfrom time import sleep\n\nfrom lib.ngraph import NgraphWriter\nfrom lib.si import get_prefix, get_best_prefix\n# pylint: enable=wrong-import-position\n\n\n# 下のは内部で変わるので触らないこと。\n#_____________ここから_____________\n# ブラウズ用 すでにフォルダは選択したか?\nDIR_SELECTED = False\n# ブラウズ後のディレクトリパス\nINPUT_PATH = None\n# データ出力パス\nDEST_PATH = None\n# Ngrapghモード\nNGP_MODE = None\n# prefix\nPREFIX_V = []\nPREFIX_I = []\nPREFIX_G = []\n# excel(I-V) plot\nIV_DATA = {}\n#______________ここまで_____________\n\n# plt initialize\nplt.rcParams['xtick.direction'] = 'in'\nplt.rcParams['ytick.direction'] = 'in'\n\n##################################### 変更場所 #####################################\nANGLE_ADD = 22.5 # 回転角のステップを変える場合はここを変える\n###################################################################################\n\n\n# pylint: disable=invalid-name\ndef browse():\n \"\"\"\n フォルダの参照\n \"\"\"\n global INPUT_PATH # pylint: disable=global-statement\n global DIR_SELECTED # pylint: disable=global-statement\n\n if FOR_VEE:\n INPUT_PATH = sys.argv[1]\n if len(sys.argv) > 2:\n for i in range(2, len(sys.argv)):\n INPUT_PATH += ' ' + sys.argv[i]\n DIR_SELECTED = True\n elif not DIR_SELECTED:\n # 前回実行したログはあるか?\n find_path = os.path.join(CWD, '.record')\n if os.path.exists(find_path):\n with open(find_path, 'r') as f:\n ask_dir = f.readline().rstrip()\n if not os.path.exists(ask_dir):\n ask_dir = CWD\n else:\n ask_dir = CWD\n\n # ブラウジング\n root = Tk()\n root.withdraw()\n INPUT_PATH = askdirectory(initialdir=ask_dir)\n if INPUT_PATH == \"\":\n print(\">>> キャンセルされました。\\033[0m\")\n sys.exit(0)\n DIR_SELECTED = True\n\n\ndef get_curtime(for_folder=True) -> str:\n \"\"\"\n 現在時刻を入手\n \"\"\"\n times = datetime.datetime.now()\n time_ymd = times.strftime('%y%m%d-%H%M') \\\n if for_folder else times.strftime('%Y/%m/%d %H:%M')\n\n return time_ymd\n\n\ndef write_pathlog():\n \"\"\"\n 読み込んだフォルダパスを書き込み(次回以降の参照先にするため)\n \"\"\"\n record_file = '.record'\n file_path = os.path.join(CWD, record_file)\n with open(file_path, 'w') as f:\n f.write(INPUT_PATH + '\\n')\n f.write('Last used:'+ get_curtime(False))\n\n\ndef convert_to_prefix_values(data, x_exp, y_exp):\n \"\"\"\n データを接頭辞に合わせる\n \"\"\"\n\n if isinstance(data, list):\n _data = np.array(data)\n else:\n _data = data.copy()\n \n _data[0] = _data[0]/10**x_exp\n _data[1] = _data[1]/10**y_exp\n\n return _data\n\n\ndef get_best_model(data):\n \"\"\"\n 決定係数がもっともよいモデルを取得し、予測値を取得\n \"\"\"\n\n count = len(data[0])>>1\n models = []\n scores = np.zeros((count, ), dtype=np.float32)\n for i in range(count):\n model = linear_model.LinearRegression()\n x = np.array(data[0][i:], dtype=np.float32).reshape(-1,1)\n y = np.array(data[1][i:], dtype=np.float32).reshape(-1,1)\n model.fit(x, y)\n\n models.append(model)\n scores[i] = model.score(x,y)\n\n model = models[np.argmax(scores)]\n x = np.array(data[0], dtype=np.float32).reshape(-1,1)\n y = model.predict(x).reshape(1,-1)[0]\n\n returns = np.vstack([x.reshape(1,-1)[0], y])\n\n return model, returns\n\n\ndef write_linear_graph(data, fname):\n \"\"\"\n 線形回帰から傾きと切片を求め、グラフを出力\n \"\"\"\n # 線形モデル\n global PREFIX_I, PREFIX_V, PREFIX_G, IV_DATA\n\n model, pred = get_best_model(data)\n v_pref = get_prefix(pred[0])\n i_pref = get_prefix(pred[1])\n\n coef, intercept = model.coef_[0][0], model.intercept_[0]\n\n cf_prefixes = get_prefix(coef)\n ic_prefixes = get_prefix(intercept)\n\n i_sc = intercept/10**ic_prefixes[1]\n cond = coef/10**cf_prefixes[1]\n\n\n liner_image_path = os.path.join(\n DEST_PATH, 'I-V characteristics',\n os.path.splitext(fname)[0]+'_plot.png')\n\n v_prefixes = get_prefix(intercept/coef)\n fix_data = convert_to_prefix_values(data, v_pref[1], i_pref[1])\n fix_pred = convert_to_prefix_values(pred, v_pref[1], i_pref[1])\n plt.title(f\"Slope:{cond:.3f} {cf_prefixes[0]}S, Intercept:{i_sc:.3f} {ic_prefixes[0]}A\")\n plt.xlabel(f'Voltage ({v_pref[0]}V)')\n plt.ylabel(f'Current ({i_pref[0]}A)')\n plt.plot(fix_data[0], fix_data[1], marker='o', linestyle='None', color='black')\n plt.plot(fix_pred[0], fix_pred[1], linestyle='solid', color='red')\n plt.savefig(liner_image_path)\n plt.close()\n\n PREFIX_I.append(ic_prefixes[1])\n PREFIX_V.append(v_prefixes[1])\n PREFIX_G.append(cf_prefixes[1])\n IV_DATA[os.path.splitext(fname)[0]] = {\n 'prefix' : [v_pref, i_pref],\n 'data' : np.vstack([fix_data[0], fix_data[1], fix_pred[1]]).T\n }\n return intercept/coef, -intercept, coef # 短絡電流はマイナスにしておく\n\n\n#___################################# 変更場所 #####################################\ndef formula(x, a, b, c):\n # ↑ 理論式の変数の個数に合わせて変数を追加する\n ###############################################################################\n \"\"\"\n 理論式(deg)\n 林本さん卒論3章 or 卒研フォルダを参照\n \"\"\"\n ##################################### 変更場所 #####################################\n # 理論式を変える場合はここを変える\n # 現在は(001)配向のBFOの偏光角依存性\n return a * np.sin(np.pi*(x/90. + b/180.)) + c\n ###################################################################################\n\n\ndef calc_y_bars(plt_ax):\n \"\"\"\n y軸のスケール幅を設定する\n \"\"\"\n\n act_bool = plt_ax[0]*plt_ax[1] > 0 # 全て負 or 正\n yma = max(abs(plt_ax[0]), abs(plt_ax[1]))\n ymm = min(abs(plt_ax[0]), abs(plt_ax[1]))\n if IS_DEBUG:\n print(plt_ax, yma, ymm)\n if act_bool:\n # 基点を調整する\n ref_gap = (yma+ymm)/2\n yma -= ref_gap\n ymm -= ref_gap\n else:\n ref_gap = 0.\n ymm = -ymm\n npow = round(math.log10(yma))\n # 軸幅の設定\n if yma/10**npow < 1.5:\n step = 0.5*10**npow\n elif yma/10**npow < 3.5:\n step = 1.0*10**npow\n elif yma/10**npow <= 5.0:\n step = 2.0*10**npow\n else:\n step = 5.0*10**npow\n\n if step < 0.1:\n while step < 0.1:\n # 別に問題ではないが、ステップ値が小さすぎるとngraphの軸目盛が表示されない\n step *= 2\n\n def rangeset(value):\n if value == 0:\n return step\n n = abs(value)\n ret = step\n i = 2\n while n > ret:\n ret = step*i\n i += 1\n return ret if value >=0 else -ret\n\n if act_bool:\n # 基点をもとの場所に戻す\n fix_gap = rangeset(ref_gap)\n sgn = -1 if max(plt_ax) < 0 else 1\n y_min = rangeset(ymm) + fix_gap * sgn\n y_max = rangeset(yma) + fix_gap * sgn\n # さらに補正\n while (\n y_min > min(plt_ax)\n or y_max < max(plt_ax)\n ):\n y_min -= step\n y_max += step\n else:\n y_min = rangeset(ymm)\n y_max = rangeset(yma)\n # 上下の高さを統一する\n y_min = max(abs(y_max), abs(y_min)) * abs(y_min) / y_min\n y_max = max(abs(y_max), abs(y_min)) * abs(y_max) / y_max\n\n check = int(abs((y_max-y_min)/step))\n if check == 2:\n # 軸数が2つの場合は4つにする\n step /= 2\n elif check > 6:\n # 軸数が6より大きい場合は半分にする\n step *= 2\n return [y_min, y_max, step]\n\n\ndef correct_x(x):\n \"\"\"\n x軸の最大値を調節\n \"\"\"\n if abs(x) % 45 != 0:\n ret = ANGLE_ADD*2\n i = 2\n while abs(x) > ret:\n ret = ANGLE_ADD*2*i\n i += 1\n x_max = ret if x >=0 else -ret\n else:\n x_max = x\n\n return x_max\n\n\ndef update_ngp_mode(mode):\n \"\"\"\n NgraphWriterのモードを更新\n \"\"\"\n global NGP_MODE # pylint: disable=global-statement\n NGP_MODE = mode\n\n\ndef write_ngp_data(angles, y_data, y_scales, s_params, txtpath, prefix):\n \"\"\"\n Ngraphデータを生成\n \"\"\"\n with open(txtpath, 'w') as f:\n f.writelines([\n f'{angles[i]}\\t{y_data[i]}\\n' for i in range(len(angles))])\n\n directory = os.path.join('.', os.path.split(txtpath)[1]).replace(\"\\\\\", '/')\n\n def encode_escape(string):\n if string == '\\xb5':\n string = str(string.encode('unicode-escape')).split(\"'\")[1][1:]\n return string\n\n ngp = NgraphWriter(NGP_MODE)\n _txt = r\"'Short circuit current %f{HelvI}I%f{Helv}_sc@ \"+f\"({encode_escape(prefix)}A)'\"\\\n if NGP_MODE == 'i' else\\\n r\"'Open circuit voltage %f{HelvI}V%f{Helv}_oc@ \"+f\"({encode_escape(prefix)}V)'\"\n ##################################### 変更場所 #####################################\n # ([])内の要素を追加できます。\n # 各リストの1番目はクラス番号(何番目のクラスか)、2番目はクラス名(ngpファイルの\n # 「new ...」)、 3番目はクラス内の変数名、4番目は代入する変数名です。\n # 文字を代入する場合は\"''\"のようにアポストロフィを入れないといけない場合と、\n # 入れなくても\"\"か''でいいようなケースがあるので注意してください。\n # 間違っていると生成されたngpファイルを開いたらエラーが出ます。\n ngp.write([\n [0,'axis name:fX1', 'min', angles[0]],\n [0,'axis name:fX1', 'max', correct_x(angles[-1])],\n [0,'axis name:fX1', 'inc', ANGLE_ADD*2],\n [1,'axis name:fY1', 'min', y_scales[0]],\n [1,'axis name:fY1', 'max', y_scales[1]],\n [1,'axis name:fY1', 'inc', y_scales[2]],\n [5, 'file', 'file', f\"'{directory}'\"],\n # 理論式を変える場合はここも変える\n # 書き方はngpファイル参照(fit::equation)\n [6,'fit','equation',\n f\"'{s_params[0]}*\"\\\n f\"SIN(PI*(X/90+{s_params[1]}/180))+{s_params[2]}'\"],\n [7, 'file', 'file', f\"'{directory}'\"],\n [8, 'text', 'text', _txt]\n ])\n ###################################################################################\n ngp.out(txtpath.replace('.txt', '.ngp'))\n\n\ndef write_polarization_graph(data):\n \"\"\"\n 偏光角-開放端電圧、偏光角-短絡電流のグラフを書き出す\n \"\"\"\n # 分割\n angles, voltages, currents = data\n\n # カーブフィッティング\n # formulaの変数を求める\n sv_params = opt.curve_fit(formula, angles, voltages)[0]\n sa_params = opt.curve_fit(formula, angles, currents)[0]\n\n # 曲線を書く用のx軸データ\n graph_xlim = correct_x(angles[-1])+(ANGLE_ADD*2)\n polar_angles = np.arange(\n angles[0], graph_xlim, step=2)\n\n # 曲線用のデータを作成\n pred_y = np.vstack([\n [formula(angle, *sv_params) for angle in polar_angles],\n [formula(angle, *sa_params) for angle in polar_angles]\n ])\n\n # 単位修正\n v_prefixes = get_best_prefix(PREFIX_V)\n i_prefixes = get_best_prefix(PREFIX_I)\n voltages = np.array(voltages)/10**v_prefixes[1]\n currents = np.array(currents)/10**i_prefixes[1]\n pred_y = convert_to_prefix_values(pred_y, v_prefixes[1], i_prefixes[1])\n for i in [0, 2]:\n sv_params[i] = sv_params[i]/10**v_prefixes[1]\n sa_params[i] = sa_params[i]/10**i_prefixes[1]\n\n # フィッテイング書き出し\n with open(os.path.join(DEST_PATH, 'fitting_result.txt'), 'w') as f:\n f.writelines([\n 'Voc fitting\\n',\n f'\\tAmplitude : {sv_params[0]:>10.6} {v_prefixes[0]}V\\n',\n f'\\t{\"Phase\".ljust(9)} : {sv_params[1]:>10.6} deg\\n',\n f'\\tIntercept : {sv_params[2]:>10.6} {v_prefixes[0]}V\\n',\n 'Isc fitting\\n',\n f'\\tAmplitude : {sa_params[0]:>10.6} {i_prefixes[0]}A\\n',\n f'\\t{\"Phase\".ljust(9)} : {sa_params[1]:>10.6} deg\\n',\n f'\\tIntercept : {sa_params[2]:>10.6} {i_prefixes[0]}A\\n'\n ])\n # グラフ書き出し\n fig, (graph_l, graph_r) = plt.subplots(ncols=2, figsize=(10,4))\n\n def plot_voltage():\n \"\"\"\n 偏光角-開放端電圧特性\n \"\"\"\n graph_l.plot(angles, voltages, linestyle='None', marker='o', color='green')\n graph_l.plot(polar_angles, pred_y[0],\n label=f'Voc={sv_params[0]:.3f}'\\\n f'sin(2φ{sv_params[1]*3.1415926535/180:+.1f}){sv_params[2]:+.1f}',\n color='black')\n if IS_DEBUG:\n print('voc:',sv_params)\n y_scales = calc_y_bars(list(graph_l.get_ylim()))\n if y_scales[2] < 1:\n dec_len = len(str(abs(y_scales[2])).split('.')[1])\n y_scales[1] = round(y_scales[1], dec_len)\n ##################################### 変更場所 #####################################\n # 特性グラフの軸の名前\n graph_l.set_title('Voc-φ')\n graph_l.set_xlabel('Angle of light polarization φ (deg)')\n graph_l.set_ylabel(f'Open circuit voltage ({v_prefixes[0]}V)')\n ###################################################################################\n graph_l.set_ylim(y_scales[:2])\n graph_l.set_yticks(\n np.arange(y_scales[0],y_scales[1]+y_scales[2], y_scales[2]))\n graph_l.set_xlim([angles[0], angles[-1]])\n graph_l.set_xticks(\n np.arange(angles[0], graph_xlim, ANGLE_ADD*2))\n graph_l.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0)\n # ngraph用のデータを書き出す\n update_ngp_mode('v')\n write_ngp_data(\n angles,\n voltages,\n y_scales,\n sv_params,\n os.path.join(DEST_PATH, 'Ngraph data', 'Voc_phi.txt'),\n v_prefixes[0])\n\n def plot_current():\n \"\"\"\n 偏光角-短絡電流特性\n \"\"\"\n graph_r.plot(angles, currents, linestyle='None', marker='o', color='blue')\n graph_r.plot(polar_angles, pred_y[1],\n label=f'Isc={sa_params[0]:.3f}'\\\n f'sin(2φ{sa_params[1]*math.pi/180:+.1f}){sa_params[2]:+.1f}',\n color='black')\n if IS_DEBUG:\n print('isc:',sa_params)\n y_scales = calc_y_bars(list(graph_r.get_ylim()))\n if y_scales[2] < 1:\n dec_len = len(str(abs(y_scales[2])).split('.')[1])\n y_scales[1] = round(y_scales[1], dec_len)\n ##################################### 変更場所 #####################################\n # 特性グラフの軸の名前\n graph_r.set_title('Isc-φ')\n graph_r.set_xlabel('Angle of light polarization φ (deg)')\n graph_r.set_ylabel(f'Short circuit current ({i_prefixes[0]}A)')\n ###################################################################################\n graph_r.set_ylim(y_scales[:2])\n graph_r.set_yticks(\n np.arange(y_scales[0], y_scales[1]+y_scales[2], y_scales[2]))\n graph_r.set_xlim([angles[0], angles[-1]])\n graph_r.set_xticks(\n np.arange(angles[0], graph_xlim, ANGLE_ADD*2))\n graph_r.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0)\n update_ngp_mode('i')\n write_ngp_data(\n angles,\n currents,\n y_scales,\n sa_params,\n os.path.join(DEST_PATH, 'Ngraph data', 'Isc_phi.txt'),\n i_prefixes[0])\n\n plot_voltage()\n plot_current()\n fig.savefig(os.path.join(DEST_PATH, 'Voc_Isc_phi.png'))\n plt.close()\n\n\ndef write_to_excel(data):\n \"\"\"\n エクセルデータを出力(使用module:openpyxl)\n \"\"\"\n data = np.array(data).T\n prefix_arr = [PREFIX_V, PREFIX_I, PREFIX_G]\n prefix = []\n for i in range(2, len(data)):\n _prefix, num = get_best_prefix(prefix_arr[i-2])\n prefix.append(_prefix)\n data[i] = data[i]/10**num\n for_avg = data.copy()\n data = data.T.tolist()\n data += [[None, 'Average', np.average(for_avg[2]), np.average(for_avg[3]), np.average(for_avg[4])]]\n\n # pylint: disable=abstract-class-instantiated\n # pylint: disable=unsubscriptable-object\n output_file = os.path.join(DEST_PATH, f'{os.path.split(INPUT_PATH)[1]}_result.xlsx')\n with pd.ExcelWriter(output_file) as writer:\n ##################################### 変更場所 #####################################\n # excelの内容\n columns = [\n 'λ/2 plate angle (deg)',\n 'Polarization angle (deg)',\n f'Voltage ({prefix[0]}V)',\n f'Current ({prefix[1]}A)',\n f'Conductance ({prefix[2]}S)'\n ]\n ###################################################################################\n df_data = pd.DataFrame(data, columns=columns)\n df_data.to_excel(writer, sheet_name='result', engine='openpyxl')\n\n # エクセルのコラム幅を変える\n def change_cellwidth(_key, cells, xcolumns):\n worksheet = writer.book[_key]\n for i, cell in enumerate(cells):\n worksheet.column_dimensions[cell].width = len(columns[i])+1\n change_cellwidth('result', ['B', 'C', 'D', 'E', 'F'], columns)\n\n for _key in IV_DATA.keys():\n _pv, _pi = IV_DATA[_key]['prefix']\n columns = [\n f'Voltage ({_pv[0]}V)',\n f'Current(real) ({_pi[0]}A)',\n f'Current(fit) ({_pi[0]}A)']\n df_data = pd.DataFrame(IV_DATA[_key]['data'], columns=columns)\n df_data.to_excel(writer, sheet_name=_key, engine='openpyxl')\n change_cellwidth(_key, ['B', 'C', 'D'], columns)\n\n\ndef dat_to_ndarray(filepath):\n \"\"\"\n ゼロチェックのデータ以降を読み込む\n \"\"\"\n _array = np.loadtxt(filepath, delimiter=',')\n if _array[0][0] == 0.: # zchk\n _array = np.delete(_array, 0, 0)\n return _array.T\n\n\ndef main():\n \"\"\"\n データを作成(フォーマットは林本さんの形式)\n \"\"\"\n global DEST_PATH # pylint: disable=global-statement\n browse() # 参照(INPUT_PATHを更新)\n DEST_PATH = os.path.join(INPUT_PATH, get_curtime()+'_result')\n print('>>> パス : ' + INPUT_PATH)\n\n error_log = []\n nums = {}\n for _path in iglob(escape_chr(INPUT_PATH)+'/*'):\n if not os.path.isfile(_path):\n continue\n try:\n num = float(os.path.split(_path)[1])\n except ValueError:\n if os.path.splitext(os.path.split(_path)[1])[1].lower() != '.dat':\n continue\n try:\n num = float(\n os.path.splitext(os.path.split(_path)[1])[0])\n except ValueError as e:\n error_log.append(str(e) + '\\n')\n error_log.append(f\"Invalid file name : {_path}\\n\")\n print(f'\\033[38;5;009m>>> [ERROR] ファイルを読み込めません({os.path.split(_path)[1]})'\\\n '。ファイル名を確認してください。\\033[38;5;010m')\n continue\n nums[_path] = num\n if len(nums) == 0 and FOR_VEE:\n print(\">>>\\033[38;5;009m [ERROR] 有効なファイルがありませんでした。\\033[38;5;010m\")\n sleep(3)\n sys.exit(1)\n\n if len(nums) == 0:\n print(\">>>\\033[38;5;009m [ERROR] 有効なファイルがありませんでした。\\n\\033[38;5;010m>>> ほかのデータを参照しますか? (y/n):\", end='')\n return\n\n nums = sorted(nums.items(), key=lambda x: x[1])\n os.makedirs(DEST_PATH, exist_ok=True)\n os.makedirs(os.path.join(DEST_PATH, 'I-V characteristics'), exist_ok=True)\n os.makedirs(os.path.join(DEST_PATH, 'Ngraph data'), exist_ok=True)\n if len(error_log) > 0:\n output_file = os.path.join(DEST_PATH, 'Error.txt')\n with open(output_file, 'w', encoding='utf-8') as f:\n f.writelines(error_log)\n\n data = []\n polar_angle = 0.\n polarizations = [[],[],[]]\n for _nums in tqdm.tqdm(nums):\n v_oc, i_sc, cond = write_linear_graph(\n dat_to_ndarray(_nums[0]),\n os.path.split(_nums[0])[-1]\n )\n\n data += [[_nums[1], polar_angle, v_oc, i_sc, cond]]\n polarizations[0] += [polar_angle]\n polarizations[1] += [v_oc]\n polarizations[2] += [i_sc]\n polar_angle += ANGLE_ADD\n write_polarization_graph(polarizations)\n write_to_excel(data)\n write_pathlog()\n if IS_DEBUG:\n shutil.rmtree(DEST_PATH)\n print(sys.argv, '\\033[0m')\n sys.exit(0)\n\n # ファイルを開く\n if os.name == 'nt':\n DEST_PATH = DEST_PATH.replace('/', '\\\\')\n if __debug__:\n call_cmd(f\"explorer {DEST_PATH}\")\n\n if FOR_VEE:\n print(\">>> 完了!\\033[0m\")\n sys.exit(0)\n print(\">>> 完了!\\n>>> 続行しますか? (y/n):\", end='')\n\n\ndef reset():\n \"\"\"\n グローバル変数のリセット\n \"\"\"\n # pylint: disable=global-statement\n global DIR_SELECTED\n global DEST_PATH\n global PREFIX_V, PREFIX_I, PREFIX_G\n global IV_DATA\n # pylint: enable=global-statement\n\n DIR_SELECTED = False\n DEST_PATH = None\n PREFIX_V.clear()\n PREFIX_I.clear()\n PREFIX_G.clear()\n IV_DATA = {}\n\n\nif __name__=='__main__':\n main()\n\n while True:\n key = ord(msvcrt.getch())\n sys.stdout.write(f\"{chr(key)}\")\n if key==3 or chr(key).lower()=='n':\n print('\\033[0m')\n sys.exit(0)\n elif chr(key).lower()=='y':\n sys.stdout.write(\"\\n\")\n reset()\n main()\n else:\n print_write = \"\\n>>> 無効なキーです。「y」か「n」を押してください:\"\n if key == 0x1B:\n sys.stdout.write('0')\n sys.stdout.write(print_write)\n","repo_name":"ppza53893/Phi_Dependency_Automatic_Calculator","sub_path":"create.pyw","file_name":"create.pyw","file_ext":"pyw","file_size_in_byte":24151,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29048706366","text":"import logging\nimport ujson as json\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom pipeline.component_framework.constants import LEGACY_PLUGINS_VERSION\nfrom pipeline.contrib.statistics.models import (\n ComponentInTemplate,\n TemplateInPipeline,\n)\nfrom pipeline.contrib.statistics.tasks import pipeline_post_save_statistics_task, pipeline_archive_statistics_task\nfrom pipeline.contrib.statistics.utils import count_pipeline_tree_nodes\nfrom pipeline.core.constants import PE\nfrom pipeline.models import PipelineInstance, PipelineTemplate\nfrom pipeline.signals import post_pipeline_finish, post_pipeline_revoke\n\nlogger = logging.getLogger(\"root\")\n\n\n@receiver(post_save, sender=PipelineTemplate)\ndef template_post_save_handler(sender, instance, created, **kwargs):\n \"\"\"\n 模板执行保存处理\n :param sender:\n :param instance: 任务实例 Instance.Object对象\n :param created: 是否是创建(可为更新)\n :param kwargs: 参数序列\n :return:\n \"\"\"\n template = instance\n template_id = template.template_id\n # 删除原先该项模板数据(无论是更新还是创建,都需要重新创建统计数据)\n ComponentInTemplate.objects.filter(template_id=template_id).delete()\n data = template.data\n component_list = []\n # 任务节点引用标准插件统计(包含间接通过子流程引用)\n for act_id, act in data[PE.activities].items():\n # 标准插件节点直接引用\n if act[\"type\"] == PE.ServiceActivity:\n component = ComponentInTemplate(\n component_code=act[\"component\"][\"code\"],\n template_id=template_id,\n node_id=act_id,\n version=act[\"component\"].get(\"version\", LEGACY_PLUGINS_VERSION),\n )\n component_list.append(component)\n # 子流程节点间接引用\n else:\n components = ComponentInTemplate.objects.filter(template_id=act[\"template_id\"]).values(\n \"subprocess_stack\", \"component_code\", \"node_id\", \"version\"\n )\n for component_sub in components:\n # 子流程的执行堆栈(子流程的执行过程)\n stack = json.loads(component_sub[\"subprocess_stack\"])\n # 添加节点id\n stack.insert(0, act_id)\n component = ComponentInTemplate(\n component_code=component_sub[\"component_code\"],\n template_id=template_id,\n node_id=component_sub[\"node_id\"],\n is_sub=True,\n subprocess_stack=json.dumps(stack),\n version=component_sub[\"version\"],\n )\n component_list.append(component)\n ComponentInTemplate.objects.bulk_create(component_list)\n\n # 统计流程标准插件个数,子流程个数,网关个数\n atom_total, subprocess_total, gateways_total = count_pipeline_tree_nodes(template.data)\n TemplateInPipeline.objects.update_or_create(\n template_id=template_id,\n defaults={\"atom_total\": atom_total, \"subprocess_total\": subprocess_total, \"gateways_total\": gateways_total},\n )\n\n\n@receiver(post_save, sender=PipelineInstance)\ndef pipeline_post_save_handler(sender, instance, created, **kwargs):\n try:\n if created:\n pipeline_post_save_statistics_task.delay(instance_id=instance.instance_id)\n except Exception:\n logger.exception(\"pipeline_post_save_handler[instance_id={}] send message error\".format(instance.id))\n\n\n@receiver(post_pipeline_finish, sender=PipelineInstance)\ndef pipeline_post_finish_handler(sender, instance_id, **kwargs):\n try:\n pipeline_archive_statistics_task.delay(instance_id=instance_id)\n except Exception:\n logger.exception(\"pipeline_post_finish_handler[instance_id={}] send message error\".format(instance_id))\n\n\n@receiver(post_pipeline_revoke, sender=PipelineInstance)\ndef pipeline_post_revoke_handler(sender, instance_id, **kwargs):\n try:\n pipeline_archive_statistics_task.delay(instance_id=instance_id)\n except Exception:\n logger.exception(\"pipeline_post_revoke_handler[instance_id={}] send message error\".format(instance_id))\n","repo_name":"TencentBlueKing/bamboo-engine","sub_path":"runtime/bamboo-pipeline/pipeline/contrib/statistics/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"67"} +{"seq_id":"39964118570","text":"from datetime import datetime\n\ntrabalhadores = {}\nano = datetime.now().year\n\ntrabalhadores['Nome'] = str(input('Nome: '))\ntrabalhadores['Idade'] = int(input('Ano de Nascimento: '))\ntrabalhadores['Idade'] = ano - trabalhadores['Idade']\ntrabalhadores['NumeroCtps'] = int(input('Carteira de Trabalho (0 nao tem): '))\n\nif trabalhadores['NumeroCtps'] == 0:\n\n print('-='*30)\n for k, v in trabalhadores.items():\n\n print(f' - {k} tem o valor {v}')\n\nelse:\n\n trabalhadores['Contrataçao'] = int(input('Ano de Contratação: '))\n trabalhadores['Salario'] = float(input('Salário: R$'))\n trabalhadores['Aposentadoria'] = trabalhadores['Idade'] + ((trabalhadores['Contrataçao'] + 30) - datetime.now().year)\n print('-='*30)\n for k, v in trabalhadores.items():\n\n print(f' - {k} tem o valor {v}')\n\n\n","repo_name":"Matheus0605/Estudos-Python","sub_path":"ExerciciosPython_M3/Ex092.py","file_name":"Ex092.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"25050634684","text":"import subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# Step 1: Compile the Fortran code\nfortran_file_path = \"numerical_solvers_fortran/three_body_leapfrog.f\"\nexecutable_path = \"numerical_solvers_fortran/three_body_leapfrog\"\noutput_file_path = \"numerical_solvers_fortran/leapfrog_output.txt\"\n\nsubprocess.run([\"gfortran\", \"-o\", executable_path, fortran_file_path])\n\n# Step 2: Input reasonable values and run the Fortran program\ninitial_values = [\n \"0 0 0 1 0 0\", # Body 1\n \"1 0 0 0 1 0\", # Body 2\n \"0 1 0 0 0 1\" # Body 3\n]\n\n# Change the current working directory\nos.chdir(\"numerical_solvers_fortran\")\n\n# Run the executable with inputs\nprocess = subprocess.Popen([\"./three_body_leapfrog\"], stdin=subprocess.PIPE, text=True)\nprocess.communicate(input='\\n'.join(initial_values))\n\n# Change back to the main directory\nos.chdir(\"..\")\n\n# Step 3: Parse the output file\ndata = np.loadtxt(output_file_path)\n\n# Extract positions for each body\ntimesteps, x1, y1, z1, x2, y2, z2, x3, y3, z3 = data.T\n\n# Step 4: Plot the results\nplt.figure(figsize=(10, 6))\n\nplt.plot(x1, y1, label=\"Body 1\", marker='o', markersize=2, linestyle='-')\nplt.plot(x2, y2, label=\"Body 2\", marker='o', markersize=2, linestyle='-')\nplt.plot(x3, y3, label=\"Body 3\", marker='o', markersize=2, linestyle='-')\n\nplt.xlabel(\"X Position\")\nplt.ylabel(\"Y Position\")\nplt.title(\"Three Body Leapfrog Simulation\")\nplt.legend()\nplt.grid(True)\nplt.show()\n\n","repo_name":"Sh1su1Uch1ha/three_body_problem","sub_path":"python_wrappers/run_three_body_fortran.py","file_name":"run_three_body_fortran.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40807296309","text":"'''show_platform.py\n\nJunOS parsers for the following show commands:\n * file list\n * file list {filename}\n * show version\n'''\n\n# Python\nimport re\n\n# Genie\nfrom genie.metaparser import MetaParser\nfrom genie.metaparser.util.schemaengine import Schema, Any, \\\n Optional, Use, SchemaTypeError\n\n\n# ===========================\n# Schema for:\n# * 'file list'\n# * 'file list {filename}'\n# ===========================\nclass FileListSchema(MetaParser):\n ''' Schema for:\n * 'file list'\n * 'file list {filename}'\n '''\n\n schema = {\n 'dir': \n {Any(): \n {Optional('files'): \n {Any(): \n {Optional('path'): str,\n },\n },\n },\n },\n }\n\n\n# ===========================\n# Parser for:\n# * 'file list'\n# * 'file list {filename}'\n# ===========================\nclass FileList(FileListSchema):\n ''' Parser for:\n * 'file list'\n * 'file list {filename}'\n '''\n\n cli_command = ['file list', 'file list {filename}']\n exclude = []\n\n def cli(self, filename='', output=None):\n\n # Execute command\n if output is None:\n if filename:\n out = self.device.execute(self.cli_command[1].format(filename=filename))\n else:\n out = self.device.execute(self.cli_command[0])\n else:\n out = output\n\n # Init\n ret_dict = {}\n\n # /root/:\n p1 = re.compile(r'^\\/(?P(\\S+))\\/\\:$')\n\n # filename\n # .profile@ -> /packages/mnt/os-runtime/root/.profile\n p2 = re.compile(r'^(?P([a-zA-Z0-9\\-\\_\\.\\@]+))(?: +\\-\\> +(?P(.*)))?$')\n\n # /root/filename999\n # /root/filename999: No such file or directory\n p3 = re.compile(r'^\\/(?P(\\S+))\\/(?P([a-zA-Z0-9\\-\\_\\/]+))'\n '(?P(?:\\: +No +such +file +or +directory)?)$')\n\n for line in out.splitlines():\n line = line.replace('\\t', ' ')\n line = line.strip()\n\n # /root/:\n m = p1.match(line)\n if m:\n dir_dict = ret_dict.setdefault('dir', {}).\\\n setdefault(m.groupdict()['dir'].strip(), {})\n continue\n\n # filename\n # .profile@ -> /packages/mnt/os-runtime/root/.profile\n m = p2.match(line)\n if m:\n group = m.groupdict()\n files_dict = dir_dict.setdefault('files', {}).\\\n setdefault(group['file'].strip(), {})\n if group['path']:\n files_dict['path'] = group['path'].strip()\n continue\n\n # /root/filename999\n m = p3.match(line)\n if m:\n group = m.groupdict()\n dir_dict = ret_dict.setdefault('dir', {}).\\\n setdefault(group['dir'].strip(), {})\n if group['missing']:\n continue\n files_dict = dir_dict.setdefault('files', {}).\\\n setdefault(group['file'].strip(), {})\n continue\n\n return ret_dict\n\n\n# ===========================\n# Schema for show version\n# ===========================\nclass ShowVersionSchema(MetaParser):\n \"\"\"\n schema = {\n \"software-information\": {\n \"host-name\": str,\n \"junos-version\": str,\n \"product-model\": str,\n \"product-name\": str,\n \"package-information\":\n [\n {\n \"comment\": str,\n \"name\": str,\n }\n ]\n }\n \"\"\"\n\n def validate_package_info_list(value):\n if not isinstance(value, list):\n raise SchemaTypeError('package infomation is not a list')\n package_info_schema = Schema(\n {\n \"comment\": str,\n \"name\": str,\n }\n )\n\n for item in value:\n package_info_schema.validate(item)\n return value\n\n # main schema\n schema = {\n \"software-information\": {\n \"host-name\": str,\n \"junos-version\": str,\n \"product-model\": str,\n \"product-name\": str,\n \"package-information\": Use(validate_package_info_list)\n }\n }\n\n\n# =========================\n# Parser for 'show version'\n# =========================\nclass ShowVersion(ShowVersionSchema):\n \"\"\"Parser for show version\"\"\"\n cli_command = 'show version'\n\n def cli(self, output=None):\n if output is None:\n out = self.device.execute(self.cli_command)\n else:\n out = output\n\n # Init vars\n show_version_dict = {}\n\n # -----------------------------------------------------------\n # Regex patterns\n # -----------------------------------------------------------\n\n # Junos: 15.1R1-S1\n p1 = re.compile(r'^Junos: +(?P\\S+)$')\n\n # Model: ex4300-24p\n p2 = re.compile(r'^Model: +(?P\\S+)$')\n\n # Hostname: myJunosDevice\n p3 = re.compile(r'^Hostname: +(?P\\S+)$')\n\n # JUNOS EX Software Suite [18.2R2-S1]\n p4 = re.compile(r'^JUNOS +(?P.*)$')\n\n show_version_dict[\"software-information\"] = {}\n show_version_dict[\"software-information\"][\"package-information\"] = []\n\n # -----------------------------------------------------------\n # Build parsed output\n # -----------------------------------------------------------\n for line in out.splitlines():\n line = line.strip()\n\n m = p1.match(line)\n if m:\n show_version_dict[\"software-information\"]['junos-version'] = \\\n m.groupdict()['junosversion']\n continue\n\n m = p2.match(line)\n if m:\n show_version_dict[\"software-information\"]['product-model'] = \\\n m.groupdict()['productmodel']\n show_version_dict[\"software-information\"]['product-name'] = \\\n m.groupdict()['productmodel']\n continue\n\n m = p3.match(line)\n if m:\n show_version_dict[\"software-information\"]['host-name'] = \\\n m.groupdict()['hostname']\n continue\n\n m = p4.match(line)\n if m:\n # Cleaning name to remove multiple white spaces, lower case string,\n # remove JUNOS word, version between brakes (if present)\n # and replacing spaces for dashes\n\n name = re.sub(' +', ' ', m.groupdict()['package'].replace(\"JUNOS\", \"\"))\n\n if \"[\" in name:\n name = name.split(\"[\")[0].strip().lower().replace(\" \", \"-\")\n else:\n name = name.strip().lower().replace(\" \", \"-\").replace(\" \", \"-\")\n\n show_version_dict[\"software-information\"]['package-information'].append(\n {\n \"comment\": m.groupdict()['package'],\n \"name\": name\n }\n )\n continue\n\n # Check for empty input\n if 'junos-version' not in show_version_dict[\"software-information\"].keys():\n return {}\n\n return show_version_dict\n","repo_name":"Loicsnyers/Python3Experiments","sub_path":".local/lib/python3.8/site-packages/genie/libs/parser/junos/show_platform.py","file_name":"show_platform.py","file_ext":"py","file_size_in_byte":7570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23550254910","text":"from datetime import datetime\r\nhigh = []#put highs to test\r\nlow = []#put lows to test\r\n\r\n#just to have it initiated \r\nIchimokuCloudA = []\r\nIchimokuCloudB = []\r\n\r\ndef IchimokuCloud(high,low,IchimokuCloudA,IchimokuCloudB):\r\n\tIchimokuCloudA = []\r\n\tIchimokuCloudB = []\r\n\tconversion = []\r\n\tbase = []\r\n\tcounter = 26\r\n\twhile counter >= 0:\r\n\t\tperiod9High = 0\r\n\t\tperiod9Low = 0\r\n\t\t\t\r\n\t\tperiod26High = 0\r\n\t\tperiod26Low = 0\r\n\t\t\t\r\n\t\tperiod52High = 0\r\n\t\tperiod52Low = 0\r\n #Gets highest and lowest value from the last 9 periods\r\n\t\tfor i in range(9):\r\n\t\t\tif high[i+counter] > period9High:\r\n\t\t\t\tperiod9High = high[i+counter]\r\n\t\t\tif low[i+counter] < period9Low or period9Low == 0:\r\n\t\t\t\tperiod9Low = low[i+counter]\r\n\r\n #Gets highest and lowest value from the last 26 periods\r\n\t\tfor x in range (26):\r\n\t\t\tif high[x+counter] > period26High:\r\n\t\t\t\tperiod26High = high[x+counter]\r\n\t\t\tif low[x+counter] < period26Low or period26Low == 0:\r\n\t\t\t\tperiod26Low = low[x+counter]\r\n\r\n #Gets highest and lowest value from the last 52 periods\r\n\t\tfor y in range (52):\r\n\t\t\tif high[y+counter] > period52High:\r\n\t\t\t\tperiod52High = high[y+counter]\r\n\t\t\tif low[y+counter] < period52Low or period52Low == 0:\r\n\t\t\t\tperiod52Low = low[y+counter]\r\n\r\n\t #Calculates Conversion and Base Lines\r\n\t\tconversionLine = (period9High + period9Low) / 2\r\n\t\tbaseLine = (period26High + period26Low) / 2\r\n\r\n\t #Calculates the leading span\r\n\t\tleadingSpanA = (conversionLine + baseLine) / 2\r\n\t\tleadingSpanB = (period52High + period52Low) / 2\r\n\r\n #appends it to all the cloud values for the 26 counter\r\n\t\tIchimokuCloudA.append(leadingSpanA)\r\n\t\tIchimokuCloudB.append(leadingSpanB)\r\n\r\n\t\tif IchimokuCloudA[0] == 0:\r\n\t\t\tIchimokuCloudA.remove(0)\r\n\t\tif IchimokuCloudB[0] == 0:\r\n\t\t\tIchimokuCloudB.remove(0)\r\n\t\tcounter -= 1\r\n\t\t\r\n\treturn IchimokuCloudA, IchimokuCloudB,conversionLine,baseLine\r\n\r\n\r\n","repo_name":"henriquemgn/Indicators","sub_path":"Ichimoku.py","file_name":"Ichimoku.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44254663189","text":"# 필요한 라이브러리를 불러옵니다.\nimport easyocr\n\n# OCR reader를 초기화합니다. 여기서는 영어(en) 텍스트를 읽을 것이므로 ['en']을 인자로 전달합니다.\n# gpu=False로 설정하여 GPU를 사용하지 않도록 합니다. 만약 GPU를 사용할 수 있는 환경이라면 gpu=True로 설정하면 속도가 향상됩니다.\nreader = easyocr.Reader(['en'], gpu=False)\n\n# readtext 메서드를 사용하여 이미지에서 텍스트를 읽습니다. 이 메서드는 이미지 파일의 경로를 인자로 받습니다.\n# 이 메서드는 이미지 내의 모든 텍스트 영역을 찾아내고, 각 영역의 경계 상자와 그 안의 텍스트를 반환합니다.\nresult = reader.readtext('ocr_org.png')\n\n# 결과를 출력합니다.\nprint(result)\n\n","repo_name":"ikheee/python","sub_path":"ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71587475735","text":"import datetime\nimport json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict, Optional, Union\n\nfrom junit_xml import TestCase, to_xml_report_string, TestSuite\n\nfrom .utils import CaseFailure, Utils\n\n\n@dataclass\nclass CaseFormatKeys:\n case_name: str\n severity_key: str\n case_classname: str = None\n case_category: str = None\n case_timestamp: str = None\n static_case_name: bool = False\n\n\nclass JsonJunitExporter:\n DEFAULT_SEVERITY_LEVELS = (\"error\", \"critical\", \"fatal\")\n REPORT_PREFIX = \"junit_report\"\n\n def __init__(self,\n fmt: CaseFormatKeys,\n update_format_keys: bool = True,\n report_prefix: str = REPORT_PREFIX,\n export_on_success: bool = True,\n severity_export_values: Tuple[str, ...] = DEFAULT_SEVERITY_LEVELS):\n self._format = fmt\n self._report_prefix = report_prefix\n self._export_on_success = export_on_success\n self._severity_export_values = severity_export_values\n if update_format_keys:\n self._update_format_keys()\n\n def _update_format_keys(self):\n self._format.case_classname = self._format.case_classname or self._format.case_name\n self._format.case_category = self._format.case_category or self._format.case_name\n\n def _get_case_name(self, entry: Dict[str, str]) -> str:\n return self._format.case_name if self._format.static_case_name else entry[self._format.case_name]\n\n def _get_case_classname(self, entry: Dict[str, str]) -> str:\n return self._format.case_name if self._format.static_case_name else entry[self._format.case_classname]\n\n def _get_case_category(self, entry: Dict[str, str]) -> str:\n return self._format.case_name if self._format.static_case_name else entry[self._format.case_category]\n\n def _get_test_case(self, entry: Dict[str, str], severity: str) -> Union[TestCase, None]:\n msg = json.dumps(entry, indent=4)\n case = TestCase(name=self._get_case_name(entry),\n classname=self._get_case_classname(entry),\n category=self._get_case_category(entry),\n timestamp=entry.get(self._format.case_timestamp, str(datetime.datetime.now()))\n )\n\n if severity in self._severity_export_values:\n failure = CaseFailure(message=msg, output=msg, type=severity)\n case.failures.append(failure)\n elif not self._export_on_success:\n return None\n\n if not case.is_failure():\n case.stdout = msg\n return case\n\n def collect(self, entries: List[Dict[str, str]],\n suite_name: str,\n report_dir: Optional[Path] = None,\n xml_suffix: str = \"\"\n ) -> str:\n report_dir = Utils.get_report_dir(report_dir)\n\n test_cases = list()\n for entry in entries:\n test_case = self._get_test_case(entry, entry.get(self._format.severity_key, None))\n if test_case is not None:\n test_cases.append(test_case)\n\n report_dir.mkdir(exist_ok=True)\n xml_report = to_xml_report_string(test_suites=[TestSuite(name=suite_name,\n test_cases=test_cases,\n timestamp=self._get_suite_timestamp(test_cases))])\n file_name = f\"{self._report_prefix}_{suite_name}{f'_{xml_suffix}' if xml_suffix else ''}.xml\"\n with open(report_dir.joinpath(file_name), \"w\") as f:\n f.write(xml_report)\n return f.name\n\n @classmethod\n def _get_suite_timestamp(cls, test_cases: List[TestCase]) -> str:\n return test_cases[0].timestamp if test_cases else str(datetime.datetime.now())\n","repo_name":"eliorerz/junit-report","sub_path":"src/junit_report/json_junit_exporter.py","file_name":"json_junit_exporter.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"10609948042","text":"from pathlib import Path\nfrom flask import Flask, Response, send_file\nfrom common.config.config import Config\nfrom watcher import Watcher\nimport io, threading\n\napp = Flask(__name__)\nconfig_path = Path(__file__).parent.parent / 'conf' / 'conf.yml'\nconfig = Config.from_yaml(config_path)\nwatcher = Watcher(config.doorbell_ip, config.user, config.password, config.event_retention_count)\n\n@app.route('/motion', methods = ['GET'])\ndef motion():\n watcher_thread = threading.Thread(target = watcher.save_event_set, name = 'Watcher-Motion', args = ('motion',))\n watcher_thread.start()\n return Response(status = 200)\n\n@app.route('/ring', methods = ['GET'])\ndef ring():\n watcher_thread = threading.Thread(target = watcher.save_event_set, name = 'Watcher-Ring', args = ('ring',))\n watcher_thread.start()\n return Response(status = 200)\n\n@app.route('/current_image', methods = ['GET'])\ndef current():\n return send_file(io.BytesIO(watcher.images[-1]), mimetype = 'image/jpeg')\n\napp.run(host = '0.0.0.0', port = 80)","repo_name":"Mikey-Beep/Doorbird-Chime","sub_path":"doorbird_watcher/app/serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71319696535","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nimport datetime as dt\r\n\r\n# Python SQL toolkit and Object Relational Mapper\r\nimport sqlalchemy\r\nfrom sqlalchemy.ext.automap import automap_base\r\nfrom sqlalchemy.orm import Session\r\nfrom sqlalchemy import create_engine, func, inspect\r\nfrom flask import Flask, jsonify\r\n\r\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\r\ncheck_same_thread=False\r\n\r\n# reflect an existing database into a new model\r\nBase = automap_base()\r\n# reflect the tables\r\nBase.prepare(engine, reflect=True)\r\n\r\n# Save references to each table\r\nMeasurement = Base.classes.measurement\r\nStation = Base.classes.station\r\n\r\n# Create our session (link) from Python to the DB\r\nsession = Session(engine)\r\n\r\n# Flask Setup\r\napp = Flask(__name__)\r\n\r\n#Flask Routes\r\n@app.route(\"/\")\r\ndef welcome():\r\n\treturn(\r\n f\"Available Routes:
\"\r\n f\"/api/v1.0/precipitation
\"\r\n f\"/api/v1.0/stations
\"\r\n f\"/api/v1.0/tobs
\"\r\n f\"/api/v1.0/temp/start
\"\r\n f\"/api/v1.0/temp/start/end\"\r\n )\r\n\r\n@app.route(\"/api/v1.0/precipitation\")\r\ndef precipitation():\r\n year = session.query(Measurement.date, Measurement.prcp).\\\r\n filter(Measurement.date >= '2016-08-23').\\\r\n filter(Measurement.date <= '2017-08-23').\\\r\n order_by(Measurement.date).all()\r\n return jsonify(year)\r\n\r\n\r\n\r\n@app.route(\"/api/v1.0/stations\")\r\ndef stations():\r\n results = session.query(Station.station, Station.name).all()\r\n stations = list(np.ravel(results))\r\n return jsonify(results)\r\n\r\n\r\n@app.route(\"/api/v1.0/tobs\")\r\ndef tobs():\r\n temp = session.query(Measurement.date, Measurement.station, Measurement.tobs).\\\r\n filter(Measurement.date >= '2016-08-23').\\\r\n filter(Measurement.date <= '2017-08-23').all()\r\n temps = list(np.ravel(temp))\r\n return jsonify(temps)\r\n\r\n\r\n@app.route(\"/api/v1.0/temp/\")\r\ndef startDateOnly(start):\r\n result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start).all()\r\n results = list(np.ravel(result))\r\n return jsonify(results)\r\n\r\n\r\n@app.route(\"/api/v1.0/temp//\")\r\ndef startDateEndDate(start,end):\r\n result = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start).\\\r\n filter(Measurement.date <= end).all()\r\n results = list(np.ravel(result))\r\n return jsonify(results)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"jolivero707/SQL2","sub_path":"HW_8.py","file_name":"HW_8.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2074777098","text":"from google.cloud import bigquery\n\nSERVICE_ACCOUNT_JSON = r'/Users/mikeqiu/keys/valued-lyceum-341108-b090826f2edc.json'\n\nclient = bigquery.Client.from_service_account_json(SERVICE_ACCOUNT_JSON)\n\ndataset_id = 'valued-lyceum-341108.dataset_py'\n\ndataset = bigquery.Dataset(dataset_id)\n\ndataset.location = 'asia-northeast1'\ndataset.description = 'dataset from Python'\n\ndataset_ref = client.create_dataset(dataset, timeout=30)\n\nprint('successful created dataset {}.{}'.format(client.project, dataset_ref.dataset_id))\n","repo_name":"mikeqiu2011/bigquery-python-demo","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21125836058","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', nodes: 'List[TreeNode]') -> 'TreeNode':\n def lca(node):\n if not node:\n return None\n if node in nodes:\n return node\n\n l,r = lca(node.left), lca(node.right)\n if l and r:\n return node\n return l or r\n\n return lca(root)","repo_name":"fxrcode/FG","sub_path":"1676-lowest-common-ancestor-of-a-binary-tree-iv/1676-lowest-common-ancestor-of-a-binary-tree-iv.py","file_name":"1676-lowest-common-ancestor-of-a-binary-tree-iv.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"33595250357","text":"import numpy as np\n\n\n# In[6]:\n\n\ndef sigmoid(x):\n return 1/float((1+np.exp(-x)))\n\ndef sigmoidd(x):\n sig=[]\n for row in x:\n r=[]\n for value in row:\n r.append(sigmoid(value))\n sig.append(r)\n return sig\n\ndef diff_sigmoid(x):\n return sigmoid(x)*(1-sigmoid(x))\n\ndef diff_sigmoidd(x):\n sig=[]\n for row in x:\n r=[]\n for value in row:\n r.append(diff_sigmoid(value))\n sig.append(r)\n return sig\n\n\n# In[7]:\n\n\ndef tanh(x):\n return np.tanh(x)\n\ndef tanhh(x):\n tan=[]\n for row in x:\n r=[]\n for value in row:\n r.append(tanh(value))\n tan.append(r)\n return tan\n\ndef diff_tanh(x):\n return 1-x**2\n\ndef diff_tanhh(x):\n diftan=[]\n for row in x:\n r=[]\n for value in row:\n r.append(diff_tanh(value))\n diftan.append(r)\n return diftan\n\n\n# In[4]:\n\n\ndef relu(x):\n return np.log(1+np.exp(x))\n\ndef reluu(x):\n rel=[]\n for row in x:\n r=[]\n for value in row:\n r.append(relu(x))\n rel.append(r)\n return rel\n\ndef diff_relu(x):\n return 1/float(1+np.exp(-x))\n\ndef diff_reluu(x):\n difrel=[]\n for row in x:\n r=[]\n for value in row:\n r.append(diff_relu(x))\n rel.append(r)\n return rel\n\n\n\n#Gaussian\n\ndef gauss(x):\n return np.exp(-1*x**2)\n\ndef gausss(x):\n gau=[]\n for row in x:\n r=[]\n for value in row:\n r.append(gauss(value))\n gau.append(r)\n return gau\n\ndef diff_gauss(x):\n return -2*x*np.exp(-1*x**2)\n\ndef diff_gausss(x):\n difgau=[]\n for row in x:\n r=[]\n for value in row:\n r.append(diff_gauss(value))\n difgau.append(r)\n return difgau","repo_name":"surajn581/machine_learning","sub_path":"Neural_Network/activation.py","file_name":"activation.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33444927641","text":"import socket\nfrom src.nlp import NLP\nfrom src.channel import Channel\n\nclass PyIRC:\n\tdef __init__(self, hostname, port, channel, nick):\n\t\tself.hostname = hostname\n\t\tself.port = port\n\t\tself.channel = channel\n\t\tself.nick = nick\n\t\tself.nlp = NLP()\n\n\t\"\"\"\n\tSends a message.\n\t\"\"\"\n\tdef send(self, message):\n\t\tprint(\"SEND: %s\" % message)\n\t\tself.ircsock.send(message.encode())\n\n\t\"\"\"\n\tSends a private message.\n\t\"\"\"\n\tdef privmsg(self, channel, message):\n\t\tself.send(\"PRIVMSG %s :%s\\n\" % (channel, message))\n\n\t\"\"\"\n\tReturns the next available message on the socket.\n\t\"\"\"\n\tdef get_message(self):\n\t\tmessage = self.ircsock.recv(2048).decode()\n\t\tmessage = message.strip('\\n\\r')\n\n\t\tprint(\"RECV: %s\" % message)\n\n\t\treturn message\n\n\t\"\"\"\n\tChange the bot's nickname\n\t\"\"\"\n\tdef change_nick(self, nick):\n\t\tself.send(\"USER %s 8 * :Skylar\\'s Bot\\n\" % nick)\n\t\tself.send(\"NICK %s\\n\" % nick)\n\n\t\t# Make sure this is okay.\n\t\twhile 1:\n\t\t\tmessage = self.get_message()\n\t\t\tif message.find('004') != -1:\n\t\t\t\tbreak\n\n\t\"\"\"\n\tJoin a channel\n\t\"\"\"\n\tdef join(self, channel):\n\t\tself.send(\"JOIN \" + channel + \"\\n\")\n\n\t\"\"\"\n\tRun the bot\n\t\"\"\"\n\tdef run(self):\n\t\tself.ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.ircsock.connect((self.hostname, self.port))\n\n\t\tself.change_nick(self.nick)\n\t\tself.join(self.channel)\n\n\t\twhile 1:\n\t\t\tmessage = self.get_message()\n\n\t\t\tif message.find(\"PING :\") != -1:\n\t\t\t\tself.send(\"PONG :Pong\\n\")\n\t\t\t\tcontinue\n\n\t\t\tif message.find(' PRIVMSG ') !=-1:\n\t\t\t\tnick = message.split('!')[0][1:]\n\t\t\t\tperson = Channel(self, nick)\n\n\t\t\t\tchannel = message.split(' PRIVMSG ')[-1].split(' :')[0]\n\t\t\t\tchannel = Channel(self, channel)\n\n\t\t\t\tmessage = message.split(\" :\", 1)[1]\n\t\t\t\tmessage = message.lower()\n\n\t\t\t\tbotname = self.nick.lower()\n\n\t\t\t\tif not self.nlp.is_subject(botname, message):\n\t\t\t\t\tprint(\"DET: Not the subject.\")\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Extract name.\n\t\t\t\tif message.startswith(botname):\n\t\t\t\t\tmessage = message.lstrip(botname)\n\t\t\t\telif message.endswith(botname):\n\t\t\t\t\tmessage = message.rstrip(botname)\n\n\t\t\t\tif nick == \"sky\" and self.nlp.match_any_ends(message, \"shutdown\"):\n\t\t\t\t\tbreak\n\n\t\t\t\t(module, arguments) = self.nlp.parse(message.strip(\" \"))\n\t\t\t\tmodule.recv(channel, person, arguments)\n","repo_name":"SkylarKelty/pyirc","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1757596582","text":"#!/usr/bin/env python3\n\"\"\"Print out the Ridgeback battery voltage, as reported by the Ridgeback node.\"\"\"\nimport rospy\nfrom diagnostic_msgs.msg import DiagnosticArray\n\n\ndef diag_cb(msg):\n for component in msg.status:\n if component.name == \"ridgeback_node: Battery\":\n print(component)\n\n\ndef main():\n rospy.init_node(\"battery_voltage_node\")\n rb_diagnostics_sub = rospy.Subscriber(\"/diagnostics\", DiagnosticArray, diag_cb)\n rospy.spin()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"utiasDSL/mobile_manipulation_central","sub_path":"scripts/logging/battery_voltage.py","file_name":"battery_voltage.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43277886385","text":"from news import NewsGroup, NewsItem, has_class_name, get_page_source, get_filesafe_url\nfrom news import html_dir, json_dir\nimport bs4\nimport os\nimport datetime\n\nclass NewsItem_Inquirer(NewsItem):\n BASE_URL = \"https://business.inquirer.net\"\n URLS = [\n \"https://business.inquirer.net\"\n ]\n @classmethod\n def yield_news(cls, soup: bs4.BeautifulSoup, base_url: str):\n for s in soup.findAll('div', {\"id\": \"cmr-bg\"}):\n # print(s[\"class\"])\n # print(['StoryCollection__story' in c for c in s[\"class\"]])\n # print(s)\n s = s.parent\n # print(s)\n if not s.find(\"h2\") is None:\n header = s.find(\"h2\").text\n else:\n header = s.find(\"h3\").text\n print(s)\n try:\n url = s['onclick'].replace(\"window.open('\", \"\").replace(\"','_blank');\", \"\")\n except:\n print(\"!!\")\n print(s)\n url = s.parent['href']\n\n # if 'onclick' in s:\n \n # else:\n \n # url = s.parent['href']\n n = NewsItem_Inquirer(\n base_url,\n url,\n s.find(\"h3\").text,\n header,\n \"\"\n )\n print(url)\n print(\"------\")\n yield n\n \n @classmethod\n def cleanup_content(cls, content: list):\n return content\n \n @classmethod\n def cleanup_content_item(cls, c: str, i: int):\n return c\n\n def extract_header(self, soup: bs4.BeautifulSoup):\n self.header = soup.find(\"h1\", {\"class\": \"entry-title\"}).text\n \n def extract_full_date(self, soup: bs4.BeautifulSoup):\n a = (\n soup.find(\"div\", {\"id\": \"byline_share\"})\n .find(\"div\", {\"id\": \"art_plat\"})\n )\n for c in a.find_all(\"a\"):\n c.decompose()\n self.full_date = a.text\n print(self.full_date)\n \n def extract_summary_content(self, soup: bs4.BeautifulSoup):\n summary = list()\n content = list()\n from news import has_class_name\n d = soup.find(\"div\", {\"id\": \"article_content\"})\n if not d:\n return\n for d in d.findAll(\"p\"):\n this_text = d.text\n if not this_text is None:\n if len(this_text) == 0:\n continue\n if \"CONTRIBUTED PHOTO\" in this_text:\n continue\n if \"Written By:\" in this_text:\n continue\n if \"By \" in this_text and \"@\" in this_text:\n continue\n if \"FEATURED STORIES\" in this_text:\n break\n this_text = this_text.replace(\"ADVERTISEMENT\\n\\n\\n\\n\\n\\n\",\"\").strip()\n if len(summary) < 2:\n print(this_text)\n summary.append(this_text)\n content.append(this_text)\n \n self.summary = ' '.join(summary)\n print(\"summary\", self.summary)\n \n self.content = list(content)\n self.content_raw = list(content)\n\n # print(\"content\", self.content)\n\n def extract_news_content(\n self,\n news_content_html_dir,\n sleep_s=0, hold_proc=True,\n use_selenium=False\n ):\n url = self.url\n page_source = get_page_source(\n url, os.path.join(news_content_html_dir, get_filesafe_url(url)),\n sleep_s=sleep_s, hold_proc=hold_proc,\n use_selenium=use_selenium\n )\n soup = bs4.BeautifulSoup(page_source, features='html.parser')\n \n self.extract_header(soup)\n\n self.extract_full_date(soup)\n\n self.extract_summary_content(soup)\n\n return self\n \n def cleanup_data(self):\n print(' '.join(self.full_date.split()[-3:]))\n # input(\"enter to continue\")\n self.date = datetime.datetime.strptime(' '.join(self.full_date.split()[-3:]),r\"%B %d, %Y\").isoformat()\n # self.content = NewsItem_Inquirer.cleanup_content(self.content)\n\nif __name__ == \"__main__\":\n for front_url in NewsItem_Inquirer.URLS:\n html_path, json_path, ng = NewsGroup.process(\n NewsItem_Inquirer, front_url,\n html_dir_=html_dir, json_dir_=json_dir\n )","repo_name":"jcosto/news_summary","sub_path":"news_sources/news_inquirer.py","file_name":"news_inquirer.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10208538310","text":"# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nfrom ._op import OpRun\n\n\ndef _apply_momentum(r, t, x, g, v, norm_coefficient, alpha, beta):\n # Add gradient of regularization term.\n g_regularized = norm_coefficient * x + g\n # Coefficient of gradient should be 1 at the first iteration.\n beta_adjusted = beta if t > 0 else 1\n # Update momentum.\n v_new = alpha * v + beta_adjusted * g_regularized\n # Apply SG with momentum update rule.\n x_new = x - r * v_new\n return x_new, v_new\n\n\nclass Momentum(OpRun):\n\n atts = {'alpha': 0,\n 'beta': 0,\n 'mode': b'standard',\n 'norm_coefficient': 0.}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRun.__init__(self, onnx_node, desc=desc,\n expected_attributes=Momentum.atts,\n **options)\n\n def _run(self, *data, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221\n if len(data) == 5:\n return self._run1(*data)\n n = (len(data) - 2) // 3\n xs = []\n vs = []\n for i in range(0, n):\n a, b = self._run1(*data[:2], data[2 + i],\n data[2 + n + i], data[2 + n * 2 + i])\n xs.append(a)\n vs.append(b)\n return tuple(xs + vs)\n\n def _run1(self, r, t, x, g, v): # pylint: disable=W0221\n x_new, v_new = _apply_momentum(\n r, t, x, g, v, self.norm_coefficient, self.alpha, self.beta)\n return x_new, v_new\n","repo_name":"sdpython/mlprodict","sub_path":"mlprodict/onnxrt/ops_cpu/op_momentum.py","file_name":"op_momentum.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"67"} +{"seq_id":"31818951898","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 7 16:37:25 2023\n\n@author: Alumno\n\"\"\"\n\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# Crear un nuevo grafo\ntaxonomia = nx.DiGraph()\n\n# Agregar nodos (categorías o conceptos) a la taxonomía\ntaxonomia.add_node(\"Animalio\")\ntaxonomia.add_node(\"Chordata\")\ntaxonomia.add_node(\"Mammalia\")\ntaxonomia.add_node(\"Carnivora\")\ntaxonomia.add_node(\"Felidae\")\ntaxonomia.add_node(\"Felis\")\n\n# Establecer relaciones entre los nodos\ntaxonomia.add_edge(\"Animalia\", \"Chordata\")\ntaxonomia.add_edge(\"Chordata\", \"Mammalia\")\ntaxonomia.add_edge(\"Mammalia\", \"Carnivora\")\ntaxonomia.add_edge(\"Carnivora\", \"Felidae\")\ntaxonomia.add_edge(\"Felidae\", \"Felis\")\n\n# Visualizar la taxonomía\npos = nx.spring_layout(taxonomia)\nnx.draw_networkx(taxonomia, pos=pos, with_labels=True, node_size=1000, node_color='lightblue', font_size=10, font_weight='bold', arrowsize=20)\nplt.axis('off')\nplt.show()\n","repo_name":"davidreyf/miprimerprograma","sub_path":"taxonomia.py","file_name":"taxonomia.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1415740625","text":"'KappaPride (NinjaCode2k16[tm])'\nimport datetime\nimport json\nimport logging\nfrom operator import itemgetter\nfrom os.path import isfile\n\nimport discord.ext.commands as commands\n\nimport utils.checks as checks\nfrom utils.log import get_logger\n\n\ndef setup(bot):\n 'Adds the cog to the provided discord bot'\n bot.add_cog(Timerboard(bot, 'fleetlist.json'))\n\n\nclass Timerboard:\n '''A cog defining commands for controlling the\n bot's timerboard functions'''\n\n def __init__(self, bot, fname):\n self.logger = get_logger(__name__, bot)\n self.bot = bot\n self.fname = fname\n\n def loadjson(self, jsonname):\n 'A function which loads a json, given the filename'\n try:\n with open(jsonname) as data_file:\n data = json.load(data_file)\n self.logger.info('Json successfully loaded.')\n return data\n except FileNotFoundError:\n return {\"fleets\": []}\n\n def savejson(self, data, jsonname):\n 'A function which saves a json, given the filename'\n try:\n data['fleets'].sort(key=itemgetter('fleettime'))\n with open(jsonname, 'wt') as outfile:\n json.dump(data, outfile)\n self.logger.info('Json successfully saved.')\n except TypeError as error:\n self.logger.error(error)\n\n def listfleet(self, index, announce=False):\n 'Returns a string containing the fleet details given an index'\n fleets = self.loadjson(self.fname)['fleets']\n response = \"\"\n if announce:\n response += \"@everyone\\n\"\n fleet = fleets[index]\n response += \"**Fleet {}:**\\n\".format(index + 1)\n ftime = datetime.datetime.strptime(fleet[\"fleettime\"],\n '%Y-%m-%dT%H:%M:%S')\n response += \"```\\nWhen: {}\\n\".format(ftime)\n response += \"FC: {}\\n\".format(fleet[\"fc\"])\n response += \"Type: {}\\n\".format(fleet['fleettype'])\n response += \"Doctrine: {}\\n\".format(fleet['doctrine'])\n response += \"Formup: {}```\\n\".format(fleet['formup'])\n self.logger.info(response)\n return response\n\n def get_health(self):\n '''Returns a string describing the status of this cog'''\n if isfile(self.fname):\n return '\\n \\u2714 {} exists'.format(self.fname)\n else:\n return '\\n \\u2716 No fleetlist file found'\n\n @commands.command()\n @commands.check(checks.is_admin)\n async def addfleet(self, *args):\n '''Adds a fleet to the list of fleets in the json.\n Input fleets in the format\n \"DD/MM/YYYY HH/MM FC FORMUP DOCTRINE FLEETTYPE'''\n if not args or len(args) != 6:\n if args:\n response = \"You only entered {} argument(s)\".format(len(args))\n else:\n response = \"You didn't enter any arguments\"\n response += \". Please ensure all of the 6 arguments are entered.\"\n await self.bot.say(response)\n return\n try:\n fleetdtime = datetime.datetime.strptime((args[0] + args[1]),\n '%d/%m/%Y%H:%M')\n except ValueError:\n await self.bot.say(\"You entered an invalid date or time.\")\n return\n self.logger.info('Converted to datetime')\n if fleetdtime <= datetime.datetime.now():\n await self.bot.say(\"Date entered is before the current date.\")\n self.logger.warning(\"User entered an invalid date\")\n else:\n fleetjson = self.loadjson(self.fname)\n fleetjson[\"fleets\"].append({\n 'fleettime': fleetdtime.isoformat(),\n 'fc': args[2],\n 'formup': args[3],\n 'doctrine': args[4],\n 'fleettype': args[5],\n 'announced': False\n })\n self.savejson(fleetjson, self.fname)\n await self.bot.say(\"Fleet successfully added!\")\n\n @commands.command()\n @commands.check(checks.is_admin)\n async def removefleet(self, number: int = 0):\n 'Removes a fleet from the json via number on the list of fleets'\n fleetjson = self.loadjson(self.fname)\n if number > 0 and number <= len(fleetjson['fleets']):\n fleetjson['fleets'].pop(number - 1)\n self.savejson(fleetjson, self.fname)\n await self.bot.say(\"Fleet %d successfully removed.\" % number)\n else:\n await self.bot.say(\"You didn't enter a valid fleet number.\")\n\n @commands.command()\n @commands.check(checks.is_admin)\n async def listfleets(self):\n 'Lists all fleets to the chat in discord'\n fleets = self.loadjson(self.fname)['fleets']\n n_fleets = len(fleets)\n self.logger.info(fleets)\n listedfleets = 0\n for idx in range(n_fleets):\n if self.listfleet(idx) != []:\n await self.bot.say(self.listfleet(idx))\n listedfleets += 1\n if listedfleets == 0:\n await self.bot.say('No fleets to list.')\n\n @commands.command()\n @commands.check(checks.is_admin)\n async def announcefleets(self):\n 'Announces all un-announced fleets'\n fleetjson = self.loadjson(self.fname)\n n_fleets = len(fleetjson['fleets'])\n announced = False\n for idx in range(n_fleets):\n if not fleetjson['fleets'][idx][\"announced\"]:\n await self.bot.say(self.listfleet(idx, True))\n fleetjson['fleets'][idx][\"announced\"] = True\n self.savejson(fleetjson, self.fname)\n announced = True\n if not announced:\n await self.bot.say(\"All Fleets Announced!\")\n\n @commands.command()\n @commands.check(checks.is_admin)\n async def resetannouncefleets(self, number: str = ''):\n '''Resets the boolean specifying whether a fleet has been announced.\n Enter a fleet number to reset a specific fleet or \"all\" to reset all'''\n fleetjson = self.loadjson(self.fname)\n n_fleets = len(fleetjson['fleets'])\n if number.isdecimal():\n number = int(number)\n if number > 0 and number <= n_fleets:\n fleetjson['fleets'][int(number) - 1][\"announced\"] = False\n self.savejson(fleetjson, self.fname)\n await self.bot.say(\n \"Fleet %s's announcement status reset.\" % number)\n self.logger.info('User reset fleet %s\\'s announcement status',\n number)\n return\n elif number == '*' or number == 'all':\n for idx in range(n_fleets):\n fleetjson['fleets'][idx][\"announced\"] = False\n self.savejson(fleetjson, self.fname)\n await self.bot.say(\"All anouncement statuses reset.\")\n self.logger.info('User reset all announcement statuses')\n return\n\n error = \"Enter a valid fleet number to reset or * to reset all\"\n await self.bot.say(error)\n","repo_name":"randomic/antinub-gregbot","sub_path":"ext/timerboard.py","file_name":"timerboard.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10326309137","text":"class QueryInfo:\n\n def __init__(self, year, month, day, state, city):\n self.state = state\n self.city = city\n self.year = year\n self.month = month\n self.day = day\n\n def __str__(self):\n return \"Query for {day:02d}/{month:02d}/{year} location: {city} ({state})\".format(\n day=self.day, month=self.month, year=self.year, city=self.city, state=self.state)","repo_name":"DarioBernardo/temperature-bot","sub_path":"beans/QueryInfo.py","file_name":"QueryInfo.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20943319557","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport random\r\nimport threading\r\nimport re\r\n\r\n\r\nurl_xici = 'https://www.xicidaili.com/nn/'\r\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0', 'Connection':'keep-alive'}\r\nheader_ip3366 = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36'}\r\nheader_kuaidaili = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0'}\r\nurl_check = 'http://www.baidu.com/s?wd=ip'\r\n# url_check = 'https://movie.douban.com/subject/3878007/comments'\r\n\r\n\r\nclass MyThread(threading.Thread): # 类继承多线程的方法threading.Thread,并加入返回数据的方法\r\n def __init__(self, func, args, name=''):\r\n threading.Thread.__init__(self)\r\n self.name = name\r\n self.func = func\r\n self.args = args\r\n self.result = self.func(*self.args) # 接收函数返回的数据\r\n\r\n def get_result(self):\r\n try:\r\n return self.result # 返回结果\r\n except Exception:\r\n return None\r\n\r\n\r\ndef threading_for_check_ip(uncheck_ip, ip_list):\r\n uncheck_ip_range = range(len(uncheck_ip))\r\n start_time = time.time()\r\n # 第一种多线程,为list里面的每一个dict元素创建一个进程,这样的做法导致了创建太多的进程\r\n # 导致验证IP地址所耗费的时间比单线程的还低 大概在17秒,如果减少for循环,这样可以降低到10秒左右\r\n threads = []\r\n for i in uncheck_ip_range:\r\n t = MyThread(check_ip, (uncheck_ip[i],), check_ip.__name__)\r\n t.start()\r\n threads.append(t)\r\n # for i in uncheck_ip_range:\r\n # threads[i].start()\r\n # for i in uncheck_ip_range:\r\n # threads[i].join()\r\n [t.join() for t in threads]\r\n # for i in uncheck_ip_range:\r\n # result = threads[i].get_result()\r\n # if result != None:\r\n # ip_list.append(result)\r\n # print(result)\r\n # else:\r\n # pass\r\n for t in threads:\r\n result = t.get_result()\r\n if result != None:\r\n ip_list.append(result)\r\n # print(result)\r\n else:\r\n pass\r\n print('Time_consuming:', time.time() - start_time)\r\n\r\n\r\n# 返回的数据是一个list里面包含多个dict,dict的内容是protocol+ip+port\r\n# dict样例 {'HTTP': '125.105.105.2319999'}\r\ndef get_ip_xici(html, uncheck_ip):\r\n soup = BeautifulSoup(html, 'html.parser')\r\n body = soup.find('table', {'id':'ip_list'})\r\n ip_list = body.find_all('tr', {'class':'odd'})\r\n for i in ip_list:\r\n data = i.find_all('td')\r\n pro = {}\r\n ip = data[1].string\r\n port = data[2].string\r\n protocol = data[5].string\r\n pro[protocol] = ip + ':' + port\r\n uncheck_ip.append(pro)\r\n\r\n\r\n# 返回的数据是一个list里面包含多个dict,dict的内容是protocol+ip+port\r\n# dict样例 {'HTTP': '125.105.105.2319999'}\r\ndef check_ip(pro_ip):\r\n try:\r\n response = requests.get(url=url_check, proxies=pro_ip, timeout=5)\r\n if response.status_code == 200:\r\n return pro_ip\r\n else:\r\n return None\r\n except:\r\n pass\r\n\r\n\r\ndef save_to_txt(ip_list):\r\n with open('ip_list.txt', 'a', encoding='utf-8')as f:\r\n for i in ip_list:\r\n # print(i)\r\n for key, value in i.items(): # 将一个dict的key以及value同时获取的方法\r\n f.writelines(key + ':' + value + u'\\n')\r\n # print(key + value)\r\n\r\n\r\ndef get_proxy_xici():\r\n start_time = time.time()\r\n page = 0\r\n next_page = url_xici\r\n ip_list = []\r\n uncheck_ip = []\r\n degree = 0\r\n while page < 2:\r\n try:\r\n html = requests.get(url=next_page, headers=header).content\r\n if html:\r\n get_ip_xici(html, uncheck_ip)\r\n # threading_for_check_ip(uncheck_ip, ip_list)\r\n page = page + 1\r\n if page == 2:\r\n break\r\n next_page = url_xici + str(page)\r\n page = int(page)\r\n time.sleep(20 + random.randint(20, 100) / 20)\r\n except Exception as e:\r\n print(e)\r\n if page > 0:\r\n page = page - 1\r\n time.sleep(40 + random.randint(20, 100) / 10)\r\n threading_for_check_ip(uncheck_ip, ip_list)\r\n # print(ip_list)\r\n end_time = time.time()\r\n print('爬取完毕,整个爬取时间:', end_time - start_time)\r\n return ip_list\r\n\r\n\r\ndef get_proxy_ip3366():\r\n start_time = time.time()\r\n url_ip3366 = 'http://www.ip3366.net/free/?stype=1&page={}'\r\n number = 1\r\n ip_list = []\r\n uncheck_ip_list = []\r\n while number < 3:\r\n try:\r\n html = requests.get(url_ip3366.format(str(number)), headers=header_ip3366).text\r\n ip_init = re.findall('\\d+\\.\\d+\\.\\d+\\.\\d+', html)\r\n port_init = re.findall('\\d*', html)\r\n proc_init = re.findall('HTTP\\w?', html)\r\n split_built_init_data(ip_init, port_init, proc_init, uncheck_ip_list)\r\n number = number + 1\r\n time.sleep(14 + random.randint(20, 100) / 20)\r\n except Exception:\r\n print(Exception)\r\n if number > 1:\r\n number = number - 1\r\n time.sleep(40 + random.randint(20, 100) / 10)\r\n threading_for_check_ip(uncheck_ip_list, ip_list)\r\n end_time = time.time()\r\n print('爬取完毕,整个爬取时间:', end_time - start_time)\r\n return ip_list\r\n\r\n\r\ndef get_proxy_kuaidaili():\r\n start_time = time.time()\r\n url_kuaidaili = 'https://www.kuaidaili.com/free/inha/{}/'\r\n number = 1\r\n ip_list = []\r\n uncheck_ip_list = []\r\n while number < 3:\r\n try:\r\n html = requests.get(url_kuaidaili.format(str(number)), headers=header_kuaidaili).text\r\n ip_init = re.findall('\\d+\\.\\d+\\.\\d+\\.\\d+', html)\r\n port_init = re.findall('\\d*', html)\r\n proc_init = re.findall('HTTP\\w?', html)\r\n split_built_init_data(ip_init, port_init, proc_init, uncheck_ip_list)\r\n number = number + 1\r\n time.sleep(14 + random.randint(20, 100) / 20)\r\n except Exception:\r\n print(Exception)\r\n if number > 1:\r\n number = number - 1\r\n time.sleep(40 + random.randint(20, 100) / 10)\r\n threading_for_check_ip(uncheck_ip_list, ip_list)\r\n end_time = time.time()\r\n print('爬取完毕,整个爬取时间:', end_time - start_time)\r\n return ip_list\r\n\r\n\r\n# 处理由正则表达式匹配到的数据,正则表达式匹配到的数据是这样的形式:113.122.169.10\r\n# 需要先切割,然后找到数据,最后把ip、port和proc构建成这样的形式[{'HTTPS': '116.209.55.63:9999'}, {'HTTPS': '49.85.179.47:9999'}]\r\ndef split_built_init_data(ip_init, port_init, proc_init, uncheck_ip_list):\r\n for i in range(len(ip_init)):\r\n ip_init[i] = re.split('[><]+', ip_init[i])[2]\r\n port_init[i] = re.split('[<>]+', port_init[i])[2]\r\n proc_init[i] = re.split('[<>]', proc_init[i])[2]\r\n for i in range(len(ip_init)):\r\n dict = {}\r\n dict[proc_init[i]] = ip_init[i] + ':' + port_init[i]\r\n uncheck_ip_list.append(dict)\r\n\r\n\r\ndef main_get():\r\n number = [1, 2, 3]\r\n while True:\r\n flag = random.choice(number)\r\n # flag = 1\r\n ip_list = []\r\n if flag == 1:\r\n print('使用西刺代理!')\r\n ip_list = get_proxy_xici()\r\n if ip_list == []:\r\n number.remove(1)\r\n else:\r\n return ip_list\r\n elif flag == 2:\r\n print('使用IP66代理!')\r\n ip_list = get_proxy_ip3366()\r\n if ip_list == []:\r\n number.remove(2)\r\n else:\r\n return ip_list\r\n elif flag == 3:\r\n print('使用快代理!')\r\n ip_list = get_proxy_kuaidaili()\r\n if ip_list == []:\r\n number.remove(3)\r\n else:\r\n return ip_list\r\n\r\n\r\nif __name__ == '__main__':\r\n final_list = main_get()\r\n print(final_list)\r\n # save_to_txt(final_list)\r\n","repo_name":"zhangyongming13/Tupian","sub_path":"get_proxy.py","file_name":"get_proxy.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32943603099","text":"from machine import Pin, ADC, PWM\nimport utime, _thread, math\n\npot = ADC(Pin(28))\ntach = Pin(26, Pin.IN)\nout = PWM(Pin(22))\n\ncount = 0\nRPM = 0\n\nping1 = utime.ticks_ms()\nprint(\"Start time: \" + str(ping1))\n\nout.freq(25000) #25kHz\ndef counter(self):\n global count\n count += 1\n \ntach.irq(handler = counter, trigger = Pin.IRQ_FALLING)\n\nwhile True:\n #Check time difference, update RPM\n CurrentTime = utime.ticks_ms()\n timeDiff = utime.ticks_diff(CurrentTime, ping1)\n if timeDiff > 1000:\n #print(\"timeDiff = \"+ str(timeDiff))\n RPM = (1000/timeDiff * count) * 30\n count = 0\n ping1 = utime.ticks_ms()\n PotVal = pot.read_u16()\n PotPercent = round((PotVal / 65535)*100)\n #print(str(PotPercent) + \" \" + str(RPM) + \"rpm\")\n print(str(PotPercent) + \", \" +str(RPM))\n out.duty_u16(PotVal)\n utime.sleep(0.5)\n","repo_name":"scealux/PiPicoProjects","sub_path":"FanControlwTach.py","file_name":"FanControlwTach.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72581162132","text":"import pickle\nimport pandas as pd\nimport re\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom utils import *\nimport sklearn\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.preprocessing import KBinsDiscretizer\n\nnssi_file = \"/datos/erisk/ml/data/nssicorpus.txt\"\n\ndef featurize(calculate_feats=False, normalize=False, discretize=False, scale=False, \n discretize_size=10, dis_strategy=\"kmeans\", include_feats=[], train_users=None, test_users=None, save=False):\n \n nssi_corpus = load_nssi_corpus()\n \n logger(\"Featurizing calculate_feats={}, normalize={}, discretize={}, discretize_size={}, include_feats={}\".format(calculate_feats, normalize, discretize, discretize_size, include_feats))\n \n if calculate_feats:\n if train_users is None or test_users is None:\n train_users = load_pickle(pickle_path, \"train_users.pkl\")\n test_users = load_pickle(pickle_path, \"test_users.pkl\")\n X_train = train_users[\"clean_text\"]\n X_test = test_users[\"clean_text\"]\n \n logger(\"Data size: {}, {}\".format(X_train.shape[0], train_users.shape[0]))\n logger(\"Data size: {}, {}\".format(X_test.shape[0], test_users.shape[0]))\n \n feats_train = calculate_features(X_train, train_users, nssi_corpus, include_feats)\n feats_test = calculate_features(X_test, test_users, nssi_corpus, include_feats)\n \n save_pickle(pickle_path, \"feats_train_original.pkl\", feats_train)\n save_pickle(pickle_path, \"feats_test_original.pkl\", feats_test)\n \n else:\n feats_train = load_pickle(pickle_path, \"feats_train_original.pkl\")\n feats_test = load_pickle(pickle_path, \"feats_test_original.pkl\")\n\n \n if normalize:\n logger(\"Normalizing features\")\n feats_train = normalize_features(feats_train)\n feats_test = normalize_features(feats_test)\n if scale: \n logger(\"Scaling features\")\n feats_train, feats_test = scale_features(feats_train, feats_test)\n if discretize:\n logger(\"Discretizing\")\n feats_train, feats_test = discretize_features(feats_train, feats_test, size=discretize_size, strategy=dis_strategy)\n \n if save:\n logger(\"Saving variables to memory\")\n save_pickle(pickle_path, \"feats_train.pkl\", feats_train)\n save_pickle(pickle_path, \"feats_test.pkl\", feats_test)\n \n return feats_train, feats_test\n \n\ndef calculate_features(X, users, nssi_corpus, include_features=[]):\n \n feats = pd.DataFrame()\n #text len\n feats['char_count'] = X.map(len)\n #word count\n feats['word_count'] = X.map(lambda x: len(x.split()))\n \n #special features\n #first prons\n if 'first_prons' in include_features:\n logger(\"Calculating first prons\")\n reg = r'\\bI\\b|\\bme\\b|\\bmine\\b|\\bmy\\b|\\bmyself\\b'\n feats['first_prons'] = X.map(lambda x: len(re.findall(reg, x)))\n # sentiment analysis\n if 'sentiment' in include_features:\n logger(\"Calculating sentiment\")\n sid = SentimentIntensityAnalyzer()\n feats['sentiment'] = X.map(lambda x: round(sid.polarity_scores(x)['compound'], 2))\n \n if 'nssi' in include_features:\n logger(\"Calculating NSSI words\")\n # nssi words\n for key, values in nssi_corpus.items():\n feats[key] = users['stems'].map(lambda x: sum((' '.join(x)).count(word) for word in values))\n \n return feats\n\n\ndef load_nssi_corpus():\n with open(nssi_file, 'r') as file:\n nssi_corpus_original = file.read()\n\n nssi_corpus = nssi_corpus_original.replace('*', '')\n nssi_corpus = nssi_corpus.replace(\"Methods of NSSI\", '')\n nssi_corpus = nssi_corpus.replace(\"NSSI Terms\", '')\n nssi_corpus = nssi_corpus.replace(\"Instruments Used\", '')\n nssi_corpus = nssi_corpus.replace(\"Reasons for NSSI\", '')\n\n keys = [\"methods\", \"terms\", \"instruments\", \"reasons\"]\n\n nssi_corpus = nssi_corpus.split(':')\n nssi_corpus.remove('')\n nssi_corpus = [corpus.split(\"\\n\") for corpus in nssi_corpus]\n new_nssi_corpus = {}\n for idx, corpus in enumerate(nssi_corpus):\n new_list = [word for word in corpus if word != \"\"]\n new_nssi_corpus[keys[idx]] = new_list\n\n return new_nssi_corpus\n\n\ndef select_features(feats_train, feats_test, exclude_feats=[], normalize=False, discretize=False, discretize_size=10):\n feats_train_ret = feats_train.copy()\n feats_test_ret = feats_test.copy()\n \n for feat in exclude_feats:\n feats_train_ret.drop(feat, inplace=True, axis=1)\n feats_test_ret.drop(feat, inplace=True, axis=1)\n \n if normalize:\n feats_train_ret = normalize_features(feats_train_ret)\n feats_test_ret = normalize_features(feats_test_ret)\n \n if discretize:\n feats_train_ret, feats_test_ret = discretize_features(feats_train_ret, feats_test_ret, size=discretize_size)\n else:\n feats_train_ret = feats_train_ret.values\n feats_test_ret = feats_test_ret.values\n \n return feats_train_ret, feats_test_ret\n\n\n\ndef scale_features(feats_train, feats_test):\n scaler = StandardScaler()\n train_features = scaler.fit_transform(feats_train)\n \n test_features = scaler.transform(feats_test)\n\n train_features = np.clip(train_features, -5, 5)\n test_features = np.clip(test_features, -5, 5)\n \n logger('Training features shape: {}'.format(train_features.shape))\n logger('Test features shape: {}'.format(test_features.shape))\n \n return train_features, test_features\n\n\nnormalize_exceptions = ['char_count', 'word_density']\ndef normalize_features(feats):\n text_length = feats[\"char_count\"]\n \n norm_feats = feats.copy()\n for feature in feats.columns:\n if feature not in normalize_exceptions:\n norm_feats[feature] = feats[feature] / text_length\n \n return norm_feats\n\n\ndef discretize_features(train_feats, test_feats, size=10, strategy='kmeans', encode='onehot-dense'):\n est = KBinsDiscretizer(n_bins=size, encode=encode, strategy=strategy)\n train = est.fit_transform(train_feats)\n test = est.transform(test_feats)\n\n return train, test","repo_name":"ele94/early-risk-ensemble","sub_path":"featurizing.py","file_name":"featurizing.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35781606897","text":"import os\nimport json\nimport requests\nfrom pprint import pprint\n\ndef getWallets():\n\t\"\"\"\n\tRetrieve a list of wallets\n\n\tReturns:\n\t\twallets (list) if error None\n\t\"\"\"\n\n\t# REQUIRED: replace environment variaables for your URL and ADMIN_API_KEY\n\tapi = os.environ[\"LNBITS_URL\"] + \"/usermanager/api/v1/wallets\"\n\theaders = {\"X-Api-Key\": os.environ[\"LNBITS_ADMIN_API_KEY\"]} \n\n\tresp = requests.get(api, headers=headers)\n\n\tif resp.status_code == 200:\n\t\treturn json.loads(resp.text)\n\telse:\n\t\tprint (f\"Error code : {resp.status_code}\")\n\t\tprint (f\"Error message : {resp.text}\")\n\t\treturn None\n\ndef getWalletBalance(apikey):\n\t\"\"\"\n\tRetrieve a balance for a given user key\n\n\tArgs:\n\t\tapikey (str): a user's api key\n\tReturns:\n\t\tsats (int) if error None\n\t\"\"\"\n\n\tapi = os.environ[\"LNBITS_URL\"] + \"/api/v1/wallet\"\n\t# REQUIRED: NOT ADMIN_API_KEY but USER_API_KEY for the user\n\theaders = {\"X-Api-Key\": apikey}\n\n\tresp = requests.get(api, headers=headers)\n\n\tif resp.status_code == 200:\n\t\t# returned value is msats, so covert it into sats\n\t\treturn int(json.loads(resp.text)[\"balance\"]/1000)\n\telse:\n\t\tprint (f\"Error code : {resp.status_code}\")\n\t\tprint (f\"Error message : {resp.text}\")\n\t\treturn None\n\ndef createInvoice(receiver, sats):\n\t\"\"\"\n\tCreate an invoice for a given user key and msats\n\n\tArgs:\n\t\treceiver (dict): a receiver's wallet name and apikey\n\t\tmsats (int): milli sats (1 mats == 1000 msats)\n\tReturns:\n\t\tpayment hash (str) if error None\n\t\"\"\"\n\n\tapi = os.environ[\"LNBITS_URL\"] + \"/api/v1/payments\"\n\n\theaders = {\"X-Api-Key\": receiver[\"apikey\"]}\n\tmemo = f\"Created invoice from {receiver['walletname']}\"\n\n\tparams = {\"out\": False, \"amount\": sats, \"memo\": memo,\n\t\t\t\"unit\": \"sat\", \"lnurl_callback\": True}\n\n\tresp = requests.post(api, headers=headers, json=params)\n\t\n\tif resp.status_code == 201:\n\t\treturn json.loads(resp.text)[\"payment_hash\"]\n\telse:\n\t\tprint (f\"Error code : {resp.status_code}\")\n\t\tprint (f\"Error message : {resp.text}\")\n\t\treturn None\n\ndef getInvoice(receiver, paymentHash):\n\t\"\"\"\n\tRetrieve an invoice for a given payment hash and api key\n\n\tArgs:\n\t\treceiver (dict): a receiver's wallet name and apikey\n\t\tmsats (int): milli sats (1 mats == 1000 msats)\n\tReturns:\n\t\tbolt11 invoice (str) if error None\n\t\"\"\"\n\n\tapi = os.environ[\"LNBITS_URL\"] + \"/api/v1/payments\"\n\theaders = {\"X-Api-Key\": receiver[\"apikey\"]}\n\n\tresp = requests.get(api + f\"/{paymentHash}\", headers=headers)\n\n\tif resp.status_code == 200:\n\t\treturn json.loads(resp.text)[\"details\"][\"bolt11\"]\n\telse:\n\t\tprint (f\"Error code : {resp.status_code}\")\n\t\tprint (f\"Error message : {resp.text}\")\n\t\treturn None\n\ndef payInvoice(sender, receiver, invoice, msats):\n\t\"\"\"\n\tPay invoice from sender to receiver\n\n\tArgs:\n\t\tsender (dict): a sender's wallet name and apikey\n\t\treceiver (dict): a receiver's wallet name and apikey\n\t\tinvoice (str): bolt11 invoice\n\t\tmsats (int): milli sats (1 mats == 1000 msats)\n\tReturns:\n\t\tbolt11 invoice (str) if error None\n\t\"\"\"\n\tapi = os.environ[\"LNBITS_URL\"] + \"/api/v1/payments\"\n\n\theaders = {\"X-Api-Key\": sender[\"apikey\"]}\n\tsats = msats/1000\n\tmemo = f\"Sender ({sender['walletname']} sends {sats} sats to {receiver['walletname']})\"\n\n\n\tparams = {\"out\": True, \"bolt11\": invoice, \"memo\": memo}\n\tresp = requests.post(api, headers=headers, json=params)\n\treturn resp.status_code == 201\n\ndef run():\n\tif os.environ[\"LNBITS_URL\"] == \"\" or \\\n\t\tos.environ[\"LNBITS_ADMIN_API_KEY\"] == \"\" or \\\n\t\tos.environ[\"LNBITS_USER_API_KEY\"] == \"\":\n\t\tprint (\"Set variables first!\")\n\t\treturn\n\n\tSATS = 1\n\n\t#1. get wallets from LNbits\n\twallets = getWallets()\n\tprint (\"wallets: \")\n\tpprint (wallets)\n\n\t#2. handle invoices\n\tsender = {\"walletname\": \"Admin\", \"apikey\": os.environ[\"LNBITS_ADMIN_API_KEY\"]}\n\treceiver = {\"walletname\": \"User\", \"apikey\": os.environ[\"LNBITS_USER_API_KEY\"]}\n\n\tbalance = getWalletBalance(os.environ[\"LNBITS_USER_API_KEY\"])\n\tprint (\"balance of user before receiving sats from admin: \", balance)\n\tpaymentHash = createInvoice(receiver, SATS)\n\tprint (\"payment hash: \", paymentHash)\n\tinvoice = getInvoice(receiver, paymentHash)\n\tprint (\"bolt11 invoice: \", invoice)\n\tisPaid = payInvoice(sender, receiver, invoice, SATS)\n\tprint (\"pay invoice result \", isPaid)\n\tbalance = getWalletBalance(os.environ[\"LNBITS_USER_API_KEY\"])\n\tprint (\"balance of user after receiving sats from admin: \", balance)\n\t\nif __name__ == \"__main__\":\n\tos.environ[\"LNBITS_URL\"] = \"http://localhost:3007\"\n\tos.environ[\"LNBITS_ADMIN_API_KEY\"] = \"\"\n\tos.environ[\"LNBITS_USER_API_KEY\"] = \"\"\n\n\trun()\n","repo_name":"sudalofsatoshi/lnutils","sub_path":"lnbits.py","file_name":"lnbits.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8138230852","text":"class BankAccount:\r\n \r\n accounts = []\r\n\r\n def __init__(self, int_rate, bankBalance): \r\n self.int_rate = int_rate\r\n self.bankBalance = bankBalance\r\n BankAccount.accounts.append(self)\r\n def deposit(self, amount):\r\n self.bankBalance += amount\r\n return self\r\n def withdraw(self, amount):\r\n if (self.bankBalance-amount) >= 0:\r\n self.bankBalance -= amount\r\n else: \r\n print(\"insufficient funds.\")\r\n return self\r\n def display_account_info(self):\r\n print('Balance is: ',self.bankBalance)\r\n\r\n def yield_interest(self):\r\n if self.bankBalance>0:\r\n self.bankBalance=(self.bankBalance*self.int_rate) + self.bankBalance\r\n return self\r\n\r\n @classmethod\r\n\r\n def print_all_accounts(cls):\r\n for account in cls.accounts:\r\n account.display_account_info()\r\n\r\n\r\naccount1 = BankAccount(0.3, 1300)\r\naccount1 = BankAccount(0.2, 1000)\r\naccount1.deposit(200).deposit(100).deposit(300).withdraw(500).display_account_info()\r\naccount1.deposit(100).deposit(10).deposit(30).withdraw(50).display_account_info()\r\nBankAccount.print_all_accounts()\r\n","repo_name":"dhsinjari/Python-OOP","sub_path":"BankAccount.py","file_name":"BankAccount.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2774787799","text":"# Given a string of words, you need to find the highest scoring word.\n\n# Each letter of a word scores points according to it's position in the alphabet: a = 1, b = 2, c = 3 etc.\n\n# You need to return the highest scoring word as a string.\n\n# If two words score the same, return the word that appears earliest in the original string.\n\n# All letters will be lowercase and all inputs will be valid.\n\n# https://www.codewars.com/kata/highest-scoring-word\n\nimport operator\n\ndef high(x):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n x_list = x.split()\n higher_word_list = []\n count = 0\n for word in x_list:\n count -= 1\n value = 0\n for letter in word:\n value = value + alphabet.index(letter) + 1\n higher_word_list.append([value, count, word])\n return sorted(higher_word_list, reverse=True, key=operator.itemgetter(0, 1))[0][2]","repo_name":"felipelfb/Python-Katas","sub_path":"6 kyu/highest_scoring_word.py","file_name":"highest_scoring_word.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28908353004","text":"class Solution:\n def minMoves2(self, nums: List[int]) -> int:\n mid = (len(nums) - 1) // 2\n result = 0\n nums = sorted(nums)\n for i in nums:\n result += abs(i - nums[mid])\n return result\n\n \n def twoline(self, nums):\n median = sorted(nums)[len(nums) // 2]\n return sum(abs(num - median) for num in nums)\n","repo_name":"longhao54/leetcode","sub_path":"normal/462.py","file_name":"462.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32680556210","text":"import numpy as np\nimport random as rnd\noptions = ['1', '2', '3']\nMINIMUM_POSSIBLE_VALUE = 1\nMAXIMUM_POSSIBLE_VALUE = 99\n\n\ndef numeric_input():\n try:\n number = int(input())\n except:\n print('Please use numeric digits!')\n return numeric_input()\n return number\n\n\ndef input_order():\n print('Enter the order of your matrix: ')\n order = numeric_input()\n if order > 0:\n return order\n print('The order should be greater than zero!')\n return input_order()\n\n\ndef interval_input():\n print('Enter the endpoints of the interval: ')\n a, b = numeric_input(), numeric_input()\n if (a <= b) and (a >= MINIMUM_POSSIBLE_VALUE or b <= MAXIMUM_POSSIBLE_VALUE):\n return a, b\n print('a=1, b<=99')\n return interval_input()\n\n\ndef generate_order():\n a, b = interval_input()\n n = rnd.randint(a, b)\n return n\n\n\ndef init_matrix(matrix, order):\n element = 1\n n = order\n for row in range(order):\n for column in range(order):\n if(column >= n and row != 0):\n continue\n matrix[row][column] = element\n n -= 1\n element += 1\n return matrix\n\n\ndef print_matrix(matrix, order):\n for row in range(order):\n for column in range(order):\n if matrix[row][column]/10 >= 1:\n print(int(matrix[row][column]), end=\" \")\n continue\n print(int(matrix[row][column]), end=\" \")\n print('\\n')\n\n\ndef users_menu():\n users_choice = input(\n 'Choose an option:\\n1 - to enter the order of your matrix\\n2 - to get the order of your matrix randomly generated\\n3 - to exit\\n')\n if not(users_choice in options):\n print('Choose an option from the menu!')\n else:\n if users_choice == '3':\n print('The session is over!')\n return\n if users_choice == '1':\n order = input_order()\n if users_choice == '2':\n order = generate_order()\n matrix = np.zeros((order, order))\n init_matrix(matrix, order)\n print_matrix(matrix, order)\n return users_menu()\n\n\nusers_menu()","repo_name":"alexolashyn/python-practice-3term","sub_path":"програмування/task2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71329347413","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom pymongo import MongoClient\n\n\nclass JobparserPipeline:\n def __init__(self):\n client = MongoClient('localhost', 27017)\n self.mongo_base = client.vacancy\n \n \n def process_item(self, item, spider):\n collection = self.mongo_base[spider.name]\n \n src = spider.name\n salary = item['item_salary']\n url = item['item_url']\n \n if spider.name == 'hhru':\n item['_id'], item['link'] = self.process_hh_url(url, src)\n item['min_salary'], item['max_salary'], item['currency'] = self.process_hh_salary(salary) \n elif spider.name == 'sjru':\n item['_id'], item['link'] = self.process_sj_url(url, src)\n item['min_salary'], item['max_salary'], item['currency'] = self.process_sj_salary(salary) \n else:\n pass\n \n del item['item_salary']\n del item['item_url']\n\n collection.update_one({'_id': item['_id']}, {'$set': item}, upsert = True)\n return item\n \n \n def process_hh_salary(self, salary):\n if len(salary) == 1:\n min_salary, max_salary, currency = None, None, None\n elif len(salary) == 7:\n min_salary = int(salary[1].replace('\\xa0', ''))\n max_salary = int(salary[3].replace('\\xa0', ''))\n currency = salary[5]\n elif len(salary) == 5:\n if salary[0] == 'от ':\n min_salary = int(salary[1].replace('\\xa0', ''))\n max_salary = None\n currency = salary[3]\n elif salary[0] == 'до ':\n min_salary = None\n max_salary = int(salary[1].replace('\\xa0', ''))\n currency = salary[3]\n else: # если пропустила еще какой-то вариант - отловить\n min_salary, max_salary, currency = 'missed_from_to', 'missed_from_to', 'missed_from_to'\n else: # если пропустила еще какой-то вариант - тут тоже отловить\n min_salary, max_salary, currency = 'missed_len', 'missed_len', 'missed_len'\n return min_salary, max_salary, currency\n \n def process_hh_url(self, url, src):\n _id = src + '_' + url[url.rindex('https://kaliningrad.hh.ru/vacancy/') + 34 : url.rindex('?')]\n url = url[: url.rindex('?')]\n return _id, url\n \n def process_sj_salary(self, salary):\n if len(salary) == 1:\n min_salary, max_salary, currency = None, None, None\n elif len(salary) == 4:\n min_salary = int(salary[0].replace('\\xa0', ''))\n max_salary = int(salary[1].replace('\\xa0', ''))\n currency = salary[3]\n \n elif len(salary) == 3:\n if salary[0] == 'от':\n min_salary = int(salary[2][: salary[2].rindex('\\xa0')].replace('\\xa0', ''))\n max_salary = None\n currency = salary[2][salary[2].rindex('\\xa0') + 1:]\n elif salary[0] == 'до':\n min_salary = None\n max_salary = int(salary[2][: salary[2].rindex('\\xa0')].replace('\\xa0', ''))\n currency = salary[2][salary[2].rindex('\\xa0') + 1:]\n else: \n min_salary = int(salary[0])\n max_salary = int(salary[0])\n currency = salary[2]\n else: # если пропустила еще какой-то вариант - тут тоже отловить\n min_salary, max_salary, currency = 'missed_len', 'missed_len', 'missed_len'\n return min_salary, max_salary, currency\n \n def process_sj_url(self, url, src):\n _id = src + '_' + url[url.rindex('-') + 1 : url.rindex('.')]\n url = url\n return _id, url\n \n","repo_name":"hellge83/AI_collect_data","sub_path":"les06/jobparser/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36056589456","text":"\"\"\"\nComplete the function to print spiral order traversal of a tree. For below tree, function\nshould print 1, 2, 3, 4, 5, 6, 7.\n\"\"\"\n\n\nclass TreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def __repr__(self):\n return '{}'.format(self.data)\n\n\ndef height(node: TreeNode):\n if not node:\n return 0\n return 1 + max(height(node.left), height(node.right))\n\n\ndef level_order_traversal(node: TreeNode, level: int):\n if not node:\n return\n if level == 1:\n print(node)\n else:\n level_order_traversal(node.left, level - 1)\n level_order_traversal(node.right, level - 1)\n\n\ndef spiral_order_traversal(node: TreeNode, level: int, left_to_right: bool):\n if not node:\n return\n if level == 1:\n print(node)\n if left_to_right:\n spiral_order_traversal(node.left, level - 1, left_to_right)\n spiral_order_traversal(node.right, level - 1, left_to_right)\n else:\n spiral_order_traversal(node.right, level - 1, left_to_right)\n spiral_order_traversal(node.left, level - 1, left_to_right)\n\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(7)\n root.left.right = TreeNode(6)\n root.right.left = TreeNode(5)\n root.right.right = TreeNode(4)\n\n left_to_right = False\n\n for i in range(1, height(root) + 1):\n spiral_order_traversal(node=root, level=i, left_to_right=left_to_right)\n left_to_right = not left_to_right\n","repo_name":"smartinsert/CodingProblem","sub_path":"amazon/spiral_order_traversal.py","file_name":"spiral_order_traversal.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9692534439","text":"from flask import Flask, request, jsonify\nfrom charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,GT,pair\nfrom charm.schemes.abenc.abenc_yct14 import EKPabe\nfrom charm.core.engine.util import objectToBytes, bytesToObject\nimport logging\nimport base64\nimport json\nimport os\nimport ast\n\nos.environ['PYTHONUTF8'] = '1'\n\nlogging.basicConfig(level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\napp = Flask(__name__)\n\n\ngroup = PairingGroup('MNT224')\nkpabe = EKPabe(group)\n\nwith open('data.json', 'r') as file:\n existing_data = json.load(file)\n\n@app.route(\"/check\")\ndef hello():\n try:\n print('test api')\n group = PairingGroup('MNT224')\n kpabe = EKPabe(group)\n attributes = [ 'ONE1', 'two', 'THREE']\n (master_public_key, master_key) = kpabe.setup(attributes)\n policy = '(ONE1 or THREE) and (THREE or two)'\n secret_key = kpabe.keygen(master_public_key, master_key, policy)\n print(policy,attributes)\n print('secrat type',type(secret_key))\n msg = \"Some Random Message\"\n cipher_text = kpabe.encrypt(master_public_key, msg.encode(\"utf-8\"), attributes)\n print('cipher type',type(cipher_text))\n decrypted_msg = kpabe.decrypt(cipher_text, secret_key)\n if(msg==decrypted_msg):\n print('msg is same',decrypted_msg)\n else:\n print('not same msg')\n\n return jsonify({'decrypty':str(decrypted_msg)})\n except Exception as error:\n print('error is:',error)\n\n@app.route(\"/encryption\", methods=['POST'])\ndef encryption():\n try:\n global existing_data\n patient_encrypted = {}\n input_json = request.get_json(force=True)\n p_dict = input_json['patient']\n p_dict['user'] = str(p_dict['user'])\n p_dict['status'] = str(p_dict['status'])\n patient_attributes = [input_json['doctor']['username'].upper(),input_json['patient']['treatment_type'].upper(),input_json['doctor']['department'].upper()]\n policy = '('+input_json['doctor']['username'].upper()+' or '+input_json['doctor']['department'].upper()+') and ('+input_json['doctor']['department'].upper()+' or '+input_json['patient']['treatment_type'].upper()+')'\n print('check policy and attributes')\n print('making master keys.....',policy,patient_attributes)\n (master_public_key, master_key) = kpabe.setup(patient_attributes)\n print('master key done and secret key making')\n secret_key = kpabe.keygen(master_public_key, master_key, policy)\n print('sectar key generated',type(secret_key))\n for key,value in p_dict.items():\n print('check value',key,value,type(value))\n if value:\n cipher_text = kpabe.encrypt(master_public_key, value.encode(\"utf-8\"), patient_attributes)\n patient_encrypted[key] = str(objectToBytes(cipher_text,group))\n patient_encrypted['secret_key'] = str(objectToBytes(secret_key,group))\n existing_data[str(p_dict['user'])] = patient_encrypted['secret_key']\n with open('data.json', 'w') as file:\n json.dump(existing_data, file, indent=4)\n return jsonify(patient_encrypted)\n except Exception as error:\n logger.error(error)\n\n\n@app.route(\"/decryption\", methods=['POST'])\ndef decryption():\n # try:\n global group,kpabe\n all_obj = []\n input_json = request.get_json(force=True)\n patients = input_json['patient']\n for patient in patients:\n decrypted_obj = {}\n print(patient['user_id'])\n secrat_key = bytesToObject(ast.literal_eval(patient['decryption']),group)\n print('secrat type',type(secrat_key))\n print('check type of address',type(ast.literal_eval(patient['address'])))\n address_encrypted = bytesToObject(ast.literal_eval(patient['address']),group)\n check_address = kpabe.decrypt(address_encrypted, secrat_key)\n print('check address',check_address.decode('utf-8'))\n\n admitDate_encrypted = bytesToObject(ast.literal_eval(patient['admitDate']),group)\n check_admitDate = kpabe.decrypt(admitDate_encrypted, secrat_key)\n print('check address',check_admitDate.decode('utf-8'))\n\n status_encrypted = bytesToObject(ast.literal_eval(patient['status']),group)\n check_status = kpabe.decrypt(status_encrypted, secrat_key)\n print('check address',check_status.decode('utf-8'))\n\n treatment_type_encrypted = bytesToObject(ast.literal_eval(patient['treatment_type']),group)\n treatment_type_status = kpabe.decrypt(treatment_type_encrypted, secrat_key)\n print('check treatment_type',treatment_type_status.decode('utf-8'))\n\n bp_1s_encrypted = bytesToObject(ast.literal_eval(patient['bp_1s']),group)\n bp_1s_status = kpabe.decrypt(bp_1s_encrypted, secrat_key)\n print('check bp_1s',bp_1s_status.decode('utf-8'))\n \n cholesterol_level_encrypted = bytesToObject(ast.literal_eval(patient['cholesterol_level']),group)\n cholesterol_level_status = kpabe.decrypt(cholesterol_level_encrypted, secrat_key)\n print('check cholesterol_level',cholesterol_level_status.decode('utf-8'))\n\n notes_encrypted = bytesToObject(ast.literal_eval(patient['notes']),group)\n notes_status = kpabe.decrypt(notes_encrypted, secrat_key)\n print('check notes',notes_status.decode('utf-8'))\n\n weight_lb_encrypted = bytesToObject(ast.literal_eval(patient['weight_lb']),group)\n weight_lb_status = kpabe.decrypt(weight_lb_encrypted, secrat_key)\n print('check weight_lb',weight_lb_status.decode('utf-8'))\n\n decrypted_obj={\n 'user_id': str(patient['user_id']),\n 'weight_lb': str(weight_lb_status.decode('utf-8')),\n 'notes': str(notes_status.decode('utf-8')),\n 'cholesterol_level' : str(cholesterol_level_status.decode('utf-8')),\n 'bp_1s' : str(bp_1s_status.decode('utf-8')),\n 'treatment_type' : str(treatment_type_status.decode('utf-8')),\n 'status' : str(check_status.decode('utf-8')),\n 'address' : str(check_address.decode('utf-8')),\n }\n print(decrypted_obj)\n logger.info('decrypted object :'+str(decrypted_obj)) \n all_obj.append(decrypted_obj)\n print(all_obj)\n # logger.info('all decrypted object :'+type(all_obj))\n logger.info('all decrypted object :'+str(all_obj)) \n logger.info('all decrypted josnoify object :'+ json.dumps(all_obj) )\n\n return json.dumps(all_obj)\n # except Exception as error:\n # logger.error('decryption error :'+str(error))\n\n\nif __name__ == \"__main__\":\n app.run(host='172.29.0.16', port=5010,debug=True, use_reloader=False)\n","repo_name":"cyber-evangelists/ECDH-ABE-Project","sub_path":"flask_abe/abe.py","file_name":"abe.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18360572553","text":"import shutil\n\nimport numpy as np\nimport pytest\n\nfrom merlin.core.dispatch import make_df\nfrom merlin.schema import ColumnSchema, Schema\nfrom merlin.systems.dag.ensemble import Ensemble\nfrom merlin.systems.dag.ops.faiss import QueryFaiss, setup_faiss\n\nTRITON_SERVER_PATH = shutil.which(\"tritonserver\")\npytest.importorskip(\"merlin.dataloader.tf_utils\")\nfrom merlin.dataloader.tf_utils import configure_tensorflow # noqa\n\ntritonclient = pytest.importorskip(\"tritonclient\")\ngrpcclient = pytest.importorskip(\"tritonclient.grpc\")\n\nfrom merlin.systems.triton.utils import run_ensemble_on_tritonserver # noqa\n\nconfigure_tensorflow()\n\nimport tensorflow as tf # noqa\n\nfrom merlin.systems.dag.ops.tensorflow import PredictTensorflow # noqa\nfrom merlin.systems.dag.runtimes.triton import TritonExecutorRuntime # noqa\nfrom merlin.table import TensorTable # noqa\n\n\n@pytest.mark.skipif(not TRITON_SERVER_PATH, reason=\"triton server not found\")\ndef test_faiss_in_triton_executor_model(tmpdir):\n # Simulate a user vector with a TF model\n model = tf.keras.models.Sequential(\n [\n tf.keras.Input(name=\"user_id\", dtype=tf.int32, shape=(1,)),\n tf.keras.layers.Dense(128, activation=\"relu\", name=\"output\"),\n ]\n )\n\n model.compile(\n optimizer=\"adam\",\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.metrics.SparseCategoricalAccuracy()],\n )\n\n faiss_path = tmpdir / \"faiss.index\"\n item_ids = np.arange(0, 100)\n item_embeddings = np.random.rand(100, 128)\n # cannot turn a list column in cudf directly to numpy so must delegate to pandas as bridge\n df = make_df({\"item_id\": item_ids, \"embedding\": item_embeddings.tolist()}, device=\"cpu\")\n setup_faiss(df, faiss_path)\n\n request_schema = Schema(\n [\n ColumnSchema(\"user_id\", dtype=np.int32, dims=(None, 1)),\n ]\n )\n\n request_data = TensorTable(\n {\n \"user_id\": np.array([[1]], dtype=np.int32),\n }\n )\n\n retrieval = [\"user_id\"] >> PredictTensorflow(model) >> QueryFaiss(faiss_path)\n\n ensemble = Ensemble(retrieval, request_schema)\n ensemble_config, _ = ensemble.export(tmpdir, runtime=TritonExecutorRuntime())\n\n response = run_ensemble_on_tritonserver(\n tmpdir,\n ensemble.input_schema,\n request_data,\n ensemble.output_schema.column_names,\n ensemble_config.name,\n )\n\n assert response is not None\n assert len(response[\"candidate_ids\"]) == 10\n","repo_name":"NVIDIA-Merlin/systems","sub_path":"tests/unit/systems/ops/faiss/test_executor.py","file_name":"test_executor.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"67"} +{"seq_id":"16836655268","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 31 20:27:12 2019\r\n\r\n@author: Zakaria Dahi\r\n@Insitution: Zakaria Dahi\r\n\r\nthis file perform the initialisation in simulation mode \r\n\r\nThe intialisation of a Quantum Circuit of \"qbits\" Qbits and executed \"shots\" times\r\nqbits: the # of qbits to be generated\r\nshots: the number of individual in the population\r\n\"\"\"\r\n\r\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, Aer, execute\r\n \r\ndef initialisation_Py_Func(shots,qbits):\r\n # the integer when coming from matlab is considered float: need to convert it to \"int\"\r\n shots = int(shots)\r\n qbits = int(qbits)\r\n #create a quantum circuit(score): with quantum register nd a classical register \r\n q = QuantumRegister(qbits)\r\n c = ClassicalRegister(qbits)\r\n qc = QuantumCircuit(q, c)\r\n #Apply hadamard gate and measurement on each qbit in the quantum register\r\n #1/sqrt(2) initialisation: 1/2 probability to get 0 or 1\r\n # create the superposition of states\r\n for i in range(qbits):\r\n #hadamard gate: create superposition\r\n qc.h(q[i])\r\n # measurement\r\n qc.measure(q[i],c[i])\r\n # enable the QASM simulator \r\n backend = Aer.get_backend('qasm_simulator')\r\n # execute the quantum circuit\r\n job_sim = execute(qc, backend, shots=shots)\r\n sim_result = job_sim.result()\r\n # get the results of the simulation\r\n results_m = sim_result.get_counts(qc)\r\n # return the result of the simulation\r\n return(results_m);","repo_name":"Zakaria-Dahi/Quantum_Solver_For_User_Tracking","sub_path":"Proposal/IBMQ-cQGA/Using_QASM_simulator/initialisation.py","file_name":"initialisation.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4471631663","text":"import praw\nimport random\n\nreddit = praw.Reddit(client_id=\"Client ID\",\n client_secret=\"Client Secret\",\n user_agent=\"User Agent\")\n\nprint(f\"Logged in as {reddit.user.me()}\")\n\n\ndef meme(subreddit):\n subreddit = reddit.subreddit(f\"{subreddit}\")\n memes = []\n for submission in subreddit.hot(limit=50):\n if not submission.stickied:\n memes.append(submission)\n\n choice = random.choice(range(len(memes)))\n\n title = memes[choice].title\n url = memes[choice].url\n\n result = {\n 'title': title,\n 'url': url,\n }\n return result\n","repo_name":"arunkumarp1947/doremon","sub_path":"pyfiles/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35512233130","text":"#!/usr/bin/env pythonv\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\nimport sys\nVersion = \"0.3\"\n\nif float(\"%d.%d\" % sys.version_info[:2]) < 2.6:\n sys.stderr.write(\"Your Python version %d.%d.%d is not supported.\\n\" % sys.version_info[:3])\n sys.stderr.write(\"eeagent requires Python 2.6 or newer.\\n\")\n sys.exit(1)\n\nsetup(name='pyhantom',\n version=Version,\n description='AWS Autoscale clone-ish',\n author='Nimbus Development Team',\n author_email='workspace-user@globus.org',\n url='http://www.nimbusproject.org/',\n packages=find_packages(),\n keywords = \"Nimbus auto scale\",\n long_description=\"\"\"Some other time\"\"\",\n license=\"Apache2\",\n install_requires = [\"simplejson == 2.3.2\", \"boto >= 2.6\", \"dashi\", \"ceiclient\", \"sqlalchemy == 0.7.6\", \"wsgiref\", \"webob\", \"cherrypy >= 3.2\", \"mysql-python == 1.2.3\"],\n entry_points = {\n 'console_scripts': [\n 'phantomwsgiref = pyhantom.execs.simpleref:main',\n 'phantomcherrypy = pyhantom.execs.cherrypy_exe:main',\n 'phantomparse = pyhantom.execs.parse_launch_plan:main',\n 'phantomclient = pyhantom.execs.client:main'\n\n ],},\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Topic :: System :: Clustering',\n 'Topic :: System :: Distributed Computing',\n ],\n )\n","repo_name":"nimbusproject/Phantom","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"38272883220","text":"#一行一行替换\n#open 以w打开的文件不存在则创建,存在则清空\nimport os\nwith open('a.txt','r') as read_f,open('.a.txt.swap','w') as write_f:\n for line in read_f:\n line=line.replace('mac','linux')\n write_f.write(line)\n\nos.remove('a.txt')\nos.rename('.a.txt.swap','a.txt')","repo_name":"justzzk/python_camp","sub_path":"day5/replace2.py","file_name":"replace2.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10879620538","text":"from flask import Flask, render_template, Response\r\nimport pyautogui\r\nimport cv2\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\r\nwidth, height = pyautogui.size()\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\ndef generate():\r\n while True:\r\n img = pyautogui.screenshot()\r\n frame = np.array(img)\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n ret, jpeg = cv2.imencode('.jpg', frame)\r\n if ret:\r\n yield (b'--frame\\r\\n'\r\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + jpeg.tobytes() + b'\\r\\n\\r\\n')\r\n\r\n@app.route('/video_feed')\r\ndef video_feed():\r\n return Response(generate(),\r\n mimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=False,host='0.0.0.0')\r\n","repo_name":"Unity11991/Flask-Screen-Streaming-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26300890273","text":"from Tkinter import *\n\ndef tmp():\n root = Tk()\n\n root.mainloop()\n\ndef tk_grid_layout():\n root = Tk()\n\n lb1 = Label(root, text='Name')\n lb2 = Label(root, text='Password')\n entry1 = Entry(root)\n entry2 = Entry(root)\n\n root.mainloop()\n\ndef tk_Frames():\n root = Tk()\n\n topFrame = Frame(root)\n topFrame.pack()\n\n bottomFrame = Frame(root)\n bottomFrame.pack(side=BOTTOM)\n\n btn1 = Button(topFrame, text=\"One\", bg='green', fg='white')\n btn2 = Button(topFrame, text=\"Two\", bg='blue', fg='white')\n btn3 = Button(topFrame, text=\"Three\", bg='green', fg='white')\n btn4 = Button(bottomFrame, text=\"Four\", bg='blue', fg='white')\n btn5 = Button(bottomFrame, text=\"Five\", bg='green', fg='white')\n btn6 = Button(bottomFrame, text=\"Six\", bg='blue', fg='white')\n\n btn1.pack(side=LEFT)\n btn2.pack(side=LEFT)\n btn3.pack(side=LEFT)\n btn4.pack(side=LEFT)\n btn5.pack(side=BOTTOM)\n btn6.pack(side=BOTTOM)\n\n root.mainloop()\n\ndef tk_Labels():\n root = Tk()\n\n lb1 = Label(root, text=\"Hello World!\", bg=\"green\", fg='white')\n lb1.pack(fill=X)\n\n lb2 = Label(root, text=\"Welcome to Tkinter\", bg=\"blue\", fg='yellow')\n lb2.pack(fill=X)\n\n root.mainloop()\n","repo_name":"losacii/ubuntuMemo","sub_path":"pycode/mod/tkMod.py","file_name":"tkMod.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29861836882","text":"n = int(input())\npeople = list(map(int, input().split()))\npeople.sort()\n\nsum = 0\ntemp = 0\nfor i in people:\n temp += i\n sum += temp\n # print(f'sum:{sum} i:{i} temp:{temp} ')\n\nprint(sum)\n","repo_name":"Sora-CodingTestStudy/our-code","sub_path":"greedy/jungrye/070_ATM.py","file_name":"070_ATM.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16326859779","text":"# Optimization the denoising autoencoder by genetic algorithm for anomaly detection (DAEGA)\n\nfrom cgi import print_directory\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score,roc_curve\n\nfrom Dateloader import *\n\nnp.random.seed(0)\n\ninput_size = 10\nhidden_size = 5\nnum_epoch = 1\n\n# 迭代次数\nnum_generations = 100\n# 染色体长度\nchr_size = 2*input_size + input_size*hidden_size*2 + hidden_size\n# 每个群落中染色体的数量\nchr_per_pop = 100 \n# 定义群落规模,(染色体数量,染色体长度).\npopulation_size = (chr_per_pop, chr_size)\n# 初始化群落.\nnew_population = np.random.uniform(low=-1.0, high=1.0, size=population_size)\n# 群落中精英染色体数量\nnum_parents_mating = 30\n\n# 交配得到的下一代基因来自父染色体的概率\np = 0.5\n# 每条染色变异基因的最大数量(不能大于chr_size)\nnum_mutation = int(chr_size * 0.1)\n\n\n# 降噪自编码器\n# 输入:染色体和输入x\n# 输出:x重构后的数据y\ndef DAE(chromosome, input):\n\n noise = chromosome[0 : input_size]\n inputAddNoise = input + noise\n\n w_index1 = input_size + input_size*hidden_size\n w_index2 = w_index1 + hidden_size*input_size\n b_index1 = w_index2 + hidden_size\n b_index2 = b_index1 + input_size\n\n w_in_to_hidden = chromosome[input_size : w_index1].reshape(hidden_size, input_size)\n w_hidden_to_out = chromosome[w_index1 : w_index2].reshape(input_size, hidden_size)\n b_in_to_hidden = chromosome[w_index2 : b_index1]\n b_hidden_to_out = chromosome[b_index1 : b_index2]\n\n # print(\"inputAddNoise: \", inputAddNoise)\n # print(\"w_in_to_hidden: \", w_in_to_hidden)\n # print(\"w_hidden_to_out: \", w_hidden_to_out)\n # print(\"b_in_to_hidden: \", b_in_to_hidden)\n # print(\"b_hidden_to_out: \", b_hidden_to_out)\n\n hidden = relu(np.dot(w_in_to_hidden, inputAddNoise) + b_in_to_hidden)\n # print(\"w_in_to_hidden*inputAddNoise: \", np.dot(w_in_to_hidden, inputAddNoise))\n # print(\"hidden: \",hidden)\n output = relu(np.dot(w_hidden_to_out, hidden) + b_hidden_to_out)\n\n return output\n\ndef relu(array):\n return np.maximum(array, 0)\n\n# 计算适应度,即输入x与x重构后的y直接的距离,这个值越小表示染色体越好\ndef col_pop_fitness(x, pop):\n fitness = []\n for i in range(chr_per_pop):\n chr = pop[i, :] # 染色体\n y = DAE(chr, x)\n dist = np.sqrt(np.sum(np.square(x - y)))\n fitness.append(dist)\n fitness = np.array(fitness)\n return fitness\n\ndef select_mating_pool(pop, fitness, num_parents):\n # Selecting the best chromosome in the current generation as parents for producting the offspring of the next generation.\n parents = np.empty((num_parents, chr_size))\n for parent_num in range(num_parents):\n min_fitness_idx = np.where(fitness == np.min(fitness))\n min_fitness_idx = min_fitness_idx[0][0]\n parents[parent_num, :] = pop[min_fitness_idx, :]\n fitness[min_fitness_idx] = float('inf')\n return parents\n\n# 给精英染色体和交配得到的子代规模(染色体总数量-精英染色体数量, 染色体长度)\ndef crossover(parents, offspring_size):\n offspring = np.empty(offspring_size)\n\n for k in range(offspring_size[0]):\n # Index of the first parent to mate.\n parent1_idx = np.random.randint(0, parents.shape[0])\n # Index of the second parent to mate\n parent2_idx = np.random.randint(0, parents.shape[0])\n\n is_from_a = np.random.random(chr_size) < p\n offspring[k, :] = np.where(is_from_a, parents[parent1_idx, :], parents[parent2_idx, :])\n\n return offspring\n\ndef mutation(offspring_crossover):\n # Mutation changes some gene in each offspring randomly.\n for idx in range(offspring_crossover.shape[0]):\n random_value = np.random.uniform(-1.0, 1.0, num_mutation)\n offspring_crossover[idx, np.random.randint(0, chr_size, num_mutation)] = random_value\n return offspring_crossover\n\ndef train(date):\n\n # batch_avg_fitness_list = []\n\n for batch in range(num_epoch):\n\n # batch_fitness = []\n\n for idx, x in enumerate(date):\n\n # 计算适应度\n fitness = col_pop_fitness(x, new_population)\n\n # 在群落中选择适应度最小的染色体做为精英.\n parents = select_mating_pool(new_population, fitness, num_parents_mating)\n\n # 通过交配生成下一代.\n offspring_crossover = crossover(parents, offspring_size=(population_size[0]-parents.shape[0], chr_size))\n\n # 基因变异.\n offspring_mutation = mutation(offspring_crossover)\n\n # 得到下一代.\n new_population[0:parents.shape[0], :] = parents\n new_population[parents.shape[0]:, :] = offspring_mutation\n\n fitness = col_pop_fitness(x, new_population)\n # batch_fitness.append(np.min(fitness))\n\n if idx%5000 == 0: \n print(\"Generation : \", idx)\n print(\"Best result : \", np.min(fitness))\n\n # Getting the best solution after iterating finishing all generation\n # At first, the fitness is calculated for each solution in the final generation\n \n fitness = col_pop_fitness(x, new_population)\n # batch_avg_fitness = np.mean(batch_fitness)\n # batch_avg_fitness_list.append(batch_avg_fitness)\n\n # Then return the index of the solution corresponding the best fitness\n best_match_idx = np.where(fitness == np.min(fitness))[0][0]\n best_chromosome = new_population[best_match_idx, :]\n # print(\"Best_match_idx : \", best_match_idx)\n print(\"Best solution : \", best_chromosome)\n print(\"Best solution fitness : \", fitness[best_match_idx])\n\n # plt.plot(range(len(batch_avg_fitness_list)), batch_avg_fitness_list)\n # plt.show()\n return best_chromosome\n\ndef predict(traindate, test_date):\n\n best_chromosome = train(date=traindate)\n\n test_date_after = np.array([[]]*input_size).T\n # print(test_date_after.shape)\n for i in test_date:\n new_i = DAE(best_chromosome, i)\n new_i = np.array(new_i).reshape(1,10)\n test_date_after = np.vstack([test_date_after, new_i]) \n\n print(\"test_date_after.shape: \", test_date_after.shape)\n\n return test_date_after\n\ndef distance(x, y):\n distance = []\n for i, j in zip(x, y):\n dist = np.sqrt(np.sum(np.square(i - j)))\n distance.append(dist)\n return distance\n\ndef datetrans(distance_arr, label_thre, threshold):\n distance_arr = np.array(distance_arr)\n label = np.array([0]*len(distance_arr))\n y = np.array([0]*len(distance_arr))\n\n label[distance_arr < label_thre] = 1\n y[distance_arr < threshold] = 1\n return label, y\n\ndef getmatrix(x, y):\n acc = accuracy_score(x,y)\n f1 = f1_score(x, y)\n pre = precision_score(x, y)\n print(\"accuracy is: \", acc)\n print(\"f1 score is: \", f1)\n print(\"precision is: \", pre)\n\nif __name__ == '__main__':\n # 台区1原始数据-时间窗口split前数据\n testdate_huifu1_1 = redate_local(testdate_1_1, 10, num_user1)\n testdate_huifu1_2 = redate_local(testdate_1_2, 10, num_user1)\n testdate_huifu1_3 = redate_local(testdate_1_3, 10, num_user1)\n testdate_huifu1_4 = redate_local(testdate_1_4, 10, num_user1)\n # 台区2原始数据-时间窗口split前数据\n testdate_huifu2_1 = redate_local(testdate_2_1, 10, num_user2)\n testdate_huifu2_2 = redate_local(testdate_2_2, 10, num_user2)\n testdate_huifu2_3 = redate_local(testdate_2_3, 10, num_user2)\n testdate_huifu2_4 = redate_local(testdate_2_4, 10, num_user2)\n \n # 台区1预测数据-时间窗口split后数据\n test_date_after1_1 = predict(traindate_1_1, testdate_1_1)\n test_date_after1_2 = predict(traindate_1_2, testdate_1_2)\n test_date_after1_3 = predict(traindate_1_3, testdate_1_3)\n test_date_after1_4 = predict(traindate_1_4, testdate_1_4)\n # 台区2预测数据-时间窗口split后数据\n test_date_after2_1 = predict(traindate_2_1, testdate_2_1)\n test_date_after2_2 = predict(traindate_2_2, testdate_2_2)\n test_date_after2_3 = predict(traindate_2_3, testdate_2_3)\n test_date_after2_4 = predict(traindate_2_4, testdate_2_4)\n \n # 台区1预测数据-恢复时间窗口split前格式\n testdate_predict1_1 = redate_local(test_date_after1_1, 10, num_user1)\n testdate_predict1_2 = redate_local(test_date_after1_2, 10, num_user1)\n testdate_predict1_3 = redate_local(test_date_after1_3, 10, num_user1)\n testdate_predict1_4 = redate_local(test_date_after1_4, 10, num_user1)\n\n # 台区2预测数据-恢复时间窗口split前格式\n testdate_predict2_1 = redate_local(test_date_after2_1, 10, num_user2)\n testdate_predict2_2 = redate_local(test_date_after2_2, 10, num_user2)\n testdate_predict2_3 = redate_local(test_date_after2_3, 10, num_user2)\n testdate_predict2_4 = redate_local(test_date_after2_4, 10, num_user2)\n\n # 台区1-数据合并还原为未dateprocess后格式(n, 4)\n test1_origin_col1 = testdate_huifu1_1.reshape(-1,1)\n test1_origin_col2 = testdate_huifu1_2.reshape(-1,1)\n test1_origin_col3 = testdate_huifu1_3.reshape(-1,1)\n test1_origin_col4 = testdate_huifu1_4.reshape(-1,1)\n\n test1_origin = np.hstack([test1_origin_col1, test1_origin_col2, test1_origin_col3, test1_origin_col4])\n print(\"test1_origin.shape\", test1_origin.shape)\n\n test1_predict_col1 = testdate_predict1_1.reshape(-1,1)\n test1_predict_col2 = testdate_predict1_2.reshape(-1,1)\n test1_predict_col3 = testdate_predict1_3.reshape(-1,1)\n test1_predict_col4 = testdate_predict1_4.reshape(-1,1)\n\n test1_predict = np.hstack([test1_predict_col1, test1_predict_col2, test1_predict_col3, test1_predict_col4])\n print(\"test1_predict.shape\", test1_predict.shape)\n\n # 台区2-数据合并还原为未dateprocess后格式(n, 4)\n test2_origin_col1 = testdate_huifu2_1.reshape(-1,1)\n test2_origin_col2 = testdate_huifu2_2.reshape(-1,1)\n test2_origin_col3 = testdate_huifu2_3.reshape(-1,1)\n test2_origin_col4 = testdate_huifu2_4.reshape(-1,1)\n\n test2_origin = np.hstack([test2_origin_col1, test2_origin_col2, test2_origin_col3, test2_origin_col4])\n print(\"test2_origin.shape\", test2_origin.shape)\n\n test2_predict_col1 = testdate_predict2_1.reshape(-1,1)\n test2_predict_col2 = testdate_predict2_2.reshape(-1,1)\n test2_predict_col3 = testdate_predict2_3.reshape(-1,1)\n test2_predict_col4 = testdate_predict2_4.reshape(-1,1)\n\n test2_predict = np.hstack([test2_predict_col1, test2_predict_col2, test2_predict_col3, test2_predict_col4])\n print(\"test2_predict.shape\", test2_predict.shape)\n\n distance1 = distance(test1_origin, test1_predict)\n distance2 = distance(test2_origin, test2_predict)\n\n label1, y1 = datetrans(distance1, 1.4, 5)\n label2, y2 = datetrans(distance2, 0.55, 5)\n \n getmatrix(label1, y1)\n getmatrix(label2, y2) ","repo_name":"re-begin/DAEGA","sub_path":"Optimize_DAE_Using_GA.py","file_name":"Optimize_DAE_Using_GA.py","file_ext":"py","file_size_in_byte":10939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28385640196","text":"\"\"\"\nSemantic theme/colors.\n\nThe basic idea is that when you want a color/attribute you pass a list of possible\ntheme variable names. For example, a map application could do something like\n\nwaterC = theme.color(\"myApp_lake\",\"lake\",\"water\",\"blue\") or (0.5,0.5,0.8,1)\n\"\"\"\n\nimport functools\n\nclass ColorDict(dict):\n def color(self, *args):\n \"\"\"Supply it with a list of color names, it will\n return the first color that matches your name, trying each\n fallback in order. If it can't find any color, returns None.\n \"\"\"\n return next((self[item] for item in args if item in self), None)\n\ntheme = ColorDict(**{\n 'primary': (0.0,0.1,0.7,1),\n 'secondary': (0.7,0.1,0,1),\n 'success': (0.4,0.9,0.2,1),\n 'warning': (0.85,0.8,0.2,1),\n 'danger': (0.9,0.2,0,1),\n 'info': (0.0,0.75,0.9,1),\n})\n#Have opinions on this default color scheme? I don't! Do a pull request.\n","repo_name":"kuzen/ImageViewer","sub_path":"test_image/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"33147591251","text":"# Chiamaka Ugbaja\r\n# 1772427\r\nclass FoodItem:\r\n\r\n # Defined constructor with parameters to initialize\r\n # attributes (name, fat, carbs, protein)\r\n def __init__(self, name=\"None\", fat=0.0, carbs=0.0, protein=0.0):\r\n self.name = name\r\n self.fat = fat\r\n self.carbs = carbs\r\n self.protein = protein\r\n\r\n def get_calories(self, num_servings): # calculated total calories based on formula\r\n # Calorie formula\r\n calories = ((self.fat * 9) + (self.carbs * 4) + (self.protein * 4)) * num_servings\r\n return calories\r\n\r\n def print_info(self): # printed nutritional information with user input\r\n print('Nutritional information per serving of {}:'.format(self.name))\r\n print(' Fat: {:.2f} g'.format(self.fat))\r\n print(' Carbohydrates: {:.2f} g'.format(self.carbs))\r\n print(' Protein: {:.2f} g'.format(self.protein))\r\n\r\n\r\nif __name__ == \"__main__\": # executes first to fill variables and run before class FoodItem\r\n first_food_item = FoodItem()\r\n\r\n item_name = input()\r\n amount_fat = float(input())\r\n amount_carbs = float(input())\r\n amount_protein = float(input())\r\n\r\n second_food_item = FoodItem(item_name, amount_fat, amount_carbs, amount_protein)\r\n num_servings = float(input())\r\n\r\n first_food_item.print_info()\r\n print('Number of calories for {:.2f} serving(s): {:.2f}'.format(num_servings,\r\n first_food_item.get_calories(num_servings)))\r\n\r\n print()\r\n\r\n second_food_item.print_info()\r\n print('Number of calories for {:.2f} serving(s): {:.2f}'.format(num_servings,\r\n second_food_item.get_calories(num_servings)))\r\n","repo_name":"ChiUgb/CIS2348-15664","sub_path":"Homework 3/hw10.11.py","file_name":"hw10.11.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12723386893","text":"from collections import Counter\nfrom typing import Callable, Dict, Any\nfrom dataclasses import dataclass\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state import (\n TuningJobState,\n)\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import (\n TrialEvaluations,\n PendingEvaluation,\n INTERNAL_METRIC_NAME,\n)\nfrom syne_tune.optimizer.schedulers.searchers.utils.hp_ranges import (\n HyperparameterRanges,\n)\n\n\n@dataclass\nclass MapReward:\n forward: Callable[[float], float]\n reverse: Callable[[float], float]\n\n def __call__(self, x: float) -> float:\n return self.forward(x)\n\n\ndef map_reward_const_minus_x(const=1.0) -> MapReward:\n \"\"\"\n Factory for map_reward argument in GPMultiFidelitySearcher.\n \"\"\"\n\n def const_minus_x(x):\n return const - x\n\n return MapReward(forward=const_minus_x, reverse=const_minus_x)\n\n\nSUPPORTED_INITIAL_SCORING = {\"thompson_indep\", \"acq_func\"}\n\n\nDEFAULT_INITIAL_SCORING = \"thompson_indep\"\n\n\ndef encode_state(state: TuningJobState) -> Dict[str, Any]:\n trials_evaluations = [\n {\"trial_id\": x.trial_id, \"metrics\": x.metrics} for x in state.trials_evaluations\n ]\n pending_evaluations = [\n {\"trial_id\": x.trial_id, \"resource\": x.resource}\n if x.resource is not None\n else {\"trial_id\": x.trial_id}\n for x in state.pending_evaluations\n ]\n enc_state = {\n \"config_for_trial\": state.config_for_trial,\n \"trials_evaluations\": trials_evaluations,\n \"failed_trials\": state.failed_trials,\n \"pending_evaluations\": pending_evaluations,\n }\n return enc_state\n\n\ndef decode_state(\n enc_state: Dict[str, Any], hp_ranges: HyperparameterRanges\n) -> TuningJobState:\n trials_evaluations = [\n TrialEvaluations(**x) for x in enc_state[\"trials_evaluations\"]\n ]\n pending_evaluations = [\n PendingEvaluation(**x) for x in enc_state[\"pending_evaluations\"]\n ]\n return TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=enc_state[\"config_for_trial\"],\n trials_evaluations=trials_evaluations,\n failed_trials=enc_state[\"failed_trials\"],\n pending_evaluations=pending_evaluations,\n )\n\n\ndef _get_trial_id(\n hp_ranges: HyperparameterRanges,\n config: Dict[str, Any],\n config_for_trial: Dict[str, Any],\n trial_for_config: Dict[str, Any],\n) -> str:\n match_str = hp_ranges.config_to_match_string(config, skip_last=True)\n trial_id = trial_for_config.get(match_str)\n if trial_id is None:\n trial_id = str(len(trial_for_config))\n trial_for_config[match_str] = trial_id\n config_for_trial[trial_id] = config\n return trial_id\n\n\ndef decode_state_from_old_encoding(\n enc_state: Dict[str, Any], hp_ranges: HyperparameterRanges\n) -> TuningJobState:\n \"\"\"\n Decodes ``TuningJobState`` from encoding done for the old definition of\n ``TuningJobState``. Code maintained for backwards compatibility.\n\n Note: Since the old ``TuningJobState`` did not contain ``trial_id``, we need\n to make them up here. We assign these IDs in the order\n ``candidate_evaluations``, ``failed_candidates``, ``pending_candidates``,\n matching for duplicates.\n\n :param enc_state:\n :param hp_ranges:\n :return:\n \"\"\"\n config_for_trial = dict()\n trial_for_config = dict()\n trials_evaluations = []\n for cand_eval in enc_state[\"candidate_evaluations\"]:\n config = cand_eval[\"candidate\"]\n trial_id = _get_trial_id(hp_ranges, config, config_for_trial, trial_for_config)\n trials_evaluations.append(TrialEvaluations(trial_id, cand_eval[\"metrics\"]))\n failed_trials = []\n for failed_cand in enc_state[\"failed_candidates\"]:\n failed_trials.append(\n _get_trial_id(hp_ranges, failed_cand, config_for_trial, trial_for_config)\n )\n pending_evaluations = []\n resource_attr_name = hp_ranges.name_last_pos\n for pending_cand in enc_state[\"pending_candidates\"]:\n resource = None\n if resource_attr_name is not None and resource_attr_name in pending_cand:\n # Extended config (multi-fidelity)\n resource = int(pending_cand[resource_attr_name])\n pending_cand = pending_cand.copy()\n del pending_cand[resource_attr_name]\n trial_id = _get_trial_id(\n hp_ranges, pending_cand, config_for_trial, trial_for_config\n )\n pending_evaluations.append(PendingEvaluation(trial_id, resource))\n return TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=config_for_trial,\n trials_evaluations=trials_evaluations,\n failed_trials=failed_trials,\n pending_evaluations=pending_evaluations,\n )\n\n\nclass ResourceForAcquisitionMap:\n \"\"\"\n In order to use a standard acquisition function (like expected improvement)\n for multi-fidelity HPO, we need to decide at which ``r_acq`` we would like\n to evaluate the AF, w.r.t. the posterior distribution over ``f(x, r=r_acq)``.\n This decision can depend on the current state.\n \"\"\"\n\n def __call__(self, state: TuningJobState, **kwargs) -> int:\n raise NotImplementedError()\n\n\nclass ResourceForAcquisitionBOHB(ResourceForAcquisitionMap):\n \"\"\"\n Implements a heuristic proposed in the BOHB paper: ``r_acq`` is the\n largest ``r`` such that we have at least ``threshold`` observations at\n ``r``. If there are less than ``threshold`` observations at all levels,\n the smallest level is returned.\n \"\"\"\n\n def __init__(self, threshold: int, active_metric: str = INTERNAL_METRIC_NAME):\n self.threshold = threshold\n self.active_metric = active_metric\n\n def __call__(self, state: TuningJobState, **kwargs) -> int:\n assert (\n state.num_observed_cases(self.active_metric) > 0\n ), f\"state must have data for metric {self.active_metric}\"\n all_resources = []\n for cand_eval in state.trials_evaluations:\n all_resources += [\n int(r) for r in cand_eval.metrics[self.active_metric].keys()\n ]\n histogram = Counter(all_resources)\n return self._max_at_least_threshold(histogram)\n\n def _max_at_least_threshold(self, counter: Counter) -> int:\n \"\"\"\n Get largest key of ``counter`` whose value is at least ``threshold``.\n\n :param counter: Dict[str, Any] with keys that support comparison operators\n :return: largest key of ``counter``\n \"\"\"\n return max(\n filter(lambda r: counter[r] >= self.threshold, counter.keys()),\n default=min(counter.keys()),\n )\n\n\nclass ResourceForAcquisitionFirstMilestone(ResourceForAcquisitionMap):\n \"\"\"\n Here, ``r_acq`` is the smallest rung level to be attained by a config\n started from scratch.\n \"\"\"\n\n def __call__(self, state: TuningJobState, **kwargs) -> int:\n assert \"milestone\" in kwargs, (\n \"Need the first milestone to be attained by the new config \"\n + \"passed as kwargs['milestone']. Use a scheduler which does \"\n + \"that (e.g., HyperbandScheduler)\"\n )\n return kwargs[\"milestone\"]\n\n\nclass ResourceForAcquisitionFinal(ResourceForAcquisitionMap):\n \"\"\"\n Here, ``r_acq = r_max`` is the largest resource level.\n \"\"\"\n\n def __init__(self, r_max: int):\n self._r_max = r_max\n\n def __call__(self, state: TuningJobState, **kwargs) -> int:\n return self._r_max\n\n\nSUPPORTED_RESOURCE_FOR_ACQUISITION = {\"bohb\", \"first\", \"final\"}\n\n\ndef resource_for_acquisition_factory(\n kwargs: Dict[str, Any], hp_ranges: HyperparameterRanges\n) -> ResourceForAcquisitionMap:\n resource_acq = kwargs.get(\"resource_acq\", \"bohb\")\n assert (\n resource_acq in SUPPORTED_RESOURCE_FOR_ACQUISITION\n ), f\"resource_acq = {resource_acq} not supported, must be in \" + str(\n SUPPORTED_RESOURCE_FOR_ACQUISITION\n )\n if resource_acq == \"bohb\":\n threshold = kwargs.get(\"resource_acq_bohb_threshold\", hp_ranges.ndarray_size)\n resource_for_acquisition = ResourceForAcquisitionBOHB(threshold=threshold)\n elif resource_acq == \"first\":\n assert resource_acq == \"first\", \"resource_acq must be 'bohb' or 'first'\"\n resource_for_acquisition = ResourceForAcquisitionFirstMilestone()\n else:\n r_max = kwargs[\"max_epochs\"]\n resource_for_acquisition = ResourceForAcquisitionFinal(r_max=r_max)\n return resource_for_acquisition\n","repo_name":"awslabs/syne-tune","sub_path":"syne_tune/optimizer/schedulers/searchers/gp_searcher_utils.py","file_name":"gp_searcher_utils.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"4613752429","text":"from setuptools import setup\n\ndescription = \"Simplified tool for re-ranking images using CLIP given a text\"\n\nsetup(\n name=\"clip_rerank\",\n version=\"0.1.0\",\n author=\"Mehdi Cherti\",\n description=description,\n license=\"MIT\",\n url=\"https://github.com/mehdidc/clip_rerank\",\n zip_safe=False, # the package can run out of an .egg file\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n scripts=['clip_rerank'],\n include_package_data=True,\n install_requires=['clize'],\n)\n\n","repo_name":"mehdidc/clip_rerank","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"38569504072","text":"import requests\nimport json\n\n# import urllib.parse as codec\n\n\nclass BusStation:\n def __init__(self):\n pass\n\n @classmethod\n def search_station(cls, searchText):\n wincId = \"\"\n stid_url = (\n \"https://businfo.daegu.go.kr:8095/dbms_web_api/bs/search?searchText=\"\n + searchText\n + \"&wincId=\"\n + wincId\n )\n json_txt = requests.get(stid_url).text\n stations_json = json.loads(json_txt)[\"body\"]\n station_list = []\n for station in stations_json:\n station_ = {}\n station_[\"bsNm\"] = station[\"bsNm\"]\n station_[\"bsId\"] = station[\"bsId\"]\n station_[\"routeList\"] = station[\"routeList\"]\n\n station_list.append(station_)\n\n # for i in range(len(stations_json)):\n # stations_json[i]['bsNm'] = codec.unquote(stations_json[i]['bsNm'])\n\n return station_list\n\n @classmethod\n def get_bus_info(cls, stid):\n station_url = (\n \"https://businfo.daegu.go.kr:8095/dbms_web_api/realtime/arr2/\" + stid\n )\n\n bus_txt = requests.get(station_url).text\n bus_info = json.loads(bus_txt)[\"body\"][\"list\"]\n\n bus_list = []\n for bus in bus_info:\n bus_ = {}\n bus_[\"routeNo\"] = bus[\"routeNo\"]\n bus_[\"arrState\"] = []\n for arr in bus[\"arrList\"]:\n bus_[\"arrState\"].append(arr[\"arrState\"])\n\n bus_list.append(bus_)\n\n return bus_list\n","repo_name":"blueberrysmoooothie/bus_station","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15245267265","text":"\"\"\"Utility functions to display the pose detection results.\"\"\"\n\nimport cv2\nimport numpy as np\nfrom tflite_support.task import processor\nimport settings #global variables\n\n_MARGIN = 10 # pixels\n_ROW_SIZE = 10 # pixels\n_FONT_SIZE = 1\n_FONT_THICKNESS = 1\n_TEXT_COLOR = (50, 205, 50) # rectangle color\n\ndef visualize(\n image: np.ndarray,\n detection_result: processor.DetectionResult,\n) -> np.ndarray:\n \"\"\"Draws bounding boxes on the input image and return it.\n\n Args:\n image: The input RGB image.\n detection_result: The list of all \"Detection\" entities to be visualize.\n\n Returns:\n Image with bounding boxes.\n \"\"\"\n settings.init()\n \n for detection in detection_result.detections:\n # Draw bounding_box\n bbox = detection.bounding_box\n start_point = bbox.origin_x, bbox.origin_y\n end_point = bbox.origin_x + bbox.width, bbox.origin_y + bbox.height\n cv2.rectangle(image, start_point, end_point, _TEXT_COLOR, 2)\n\n # Draw label and score\n category = detection.categories[0]\n category_name = category.category_name\n probability = round(category.score, 2)\n result_text = category_name + ' (' + str(probability) + ')'\n text_location = (_MARGIN + bbox.origin_x,\n _MARGIN + _ROW_SIZE + bbox.origin_y)\n cv2.putText(image, result_text, text_location, cv2.FONT_HERSHEY_PLAIN,\n _FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)\n \n # Define global variables\n settings.name = category_name\n settings.precision = probability\n \n if settings.name == 'red_empty' or settings.name == 'white_empty' or settings.name == 'black_empty':\n settings.inner_part = 'false'\n else: settings.inner_part = 'true'\n \n if settings.name == 'red_empty' or settings.name == 'red_full':\n settings.color = 'red'\n if settings.name == 'white_empty' or settings.name == 'white_full':\n settings.color = 'white'\n if settings.name == 'black_empty' or settings.name == 'black_full':\n settings.color = 'black'\n \n return image\n","repo_name":"ikornev01/detail-detection","sub_path":"raspberry_pi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43104927462","text":"from fastapi import APIRouter, Depends\n\nfrom src.aws.api.container import create, delete, get\nfrom src.aws.domain.entities import aws as schemas_aws\n\nrouter = APIRouter()\n\n\n@router.post(\"/\", status_code=200)\nasync def create_new_aws_profile(\n create_aws_profile: schemas_aws.AwsAsumeProfile = Depends(\n create.create_new_aws_profile\n ),\n):\n return create_aws_profile\n\n\n@router.get(\"/\")\nasync def get_all_aws_accounts(\n get_aws_profile: schemas_aws.AwsAsumeProfile = Depends(get.get_all_aws_accounts),\n):\n return get_aws_profile\n\n\n@router.delete(\"/{aws_account_id}\")\nasync def delete_aws_account_by_id(\n delete_aws_profile: schemas_aws.AwsAsumeProfile = Depends(delete.aws_account_by_id),\n):\n return delete_aws_profile\n","repo_name":"D10S0VSkY-OSS/Stack-Lifecycle-Deployment","sub_path":"sld-api-backend/src/aws/api/v1/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"67"} +{"seq_id":"32140963666","text":"from random import choices\nfrom django.shortcuts import render\nfrom tasks.models import Task\nfrom django.shortcuts import redirect \n\n\ndef add_view(request):\n if request.method == 'GET':\n choices = Task.CHOICES\n return render(request,'task_create.html',context={\n 'choices': choices\n })\n task_data = {\n 'description': request.POST.get('description'),\n 'status': request.POST.get('status'),\n 'date_deadline': request.POST.get('date_deadline')\n }\n Task.objects.create(**task_data)\n return redirect('/')\n\ndef del_view(request):\n if request.method == 'GET':\n id_task = request.GET.get('id')\n task = Task.objects.get(pk=id_task)\n task.delete()\n return redirect('/')\n\n\ndef edit_view(request):\n if request.method == 'GET':\n id_task = request.GET.get('id')\n task = Task.objects.get(pk=id_task)\n choices = Task.CHOICES\n context = {\n \"task\": task,\n 'choices': choices\n }\n return render(request, 'task_edit.html', context=context)\n task = Task.objects.get(id=request.GET.get('id'))\n task.description = request.POST.get('description')\n task.status = request.POST.get('status')\n task.date_deadline = request.POST.get('date_deadline')\n task.save()\n \n return redirect('/')\n \n","repo_name":"sined862/p-7_hw52_yugai_denis","sub_path":"webproject/tasks/views/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41705858461","text":"# we need a dict of entity -> id for tracking ids and making sure no entity has the same id\n#e.g. \"COVID-19\" -> 0\nentity2id = {}\nidCounter = 0\n\nwith open(\"output.txt\") as file:\n line = file.readline()\n writeFile = open(\"output.cypher\", \"w+\")\n writeFile.write(\":begin\" + \"\\n\")\n writeFile.write(\"CREATE CONSTRAINT ON (node:`UNIQUE IMPORT LABEL`) ASSERT (node.`UNIQUE IMPORT ID`) IS UNIQUE;\" + \"\\n\")\n writeFile.write(\":commit\" + \"\\n\")\n while line:\n line = line.replace(\"\\n\", \"\")\n arr = line.split(\" \")\n pair = arr[0]\n relation = arr[1]\n #split pair into two separate entities and put both into entity2id\n entities = pair.split(\"/\")\n firstEntity = str(entities[0])\n secEntity = str(entities[1])\n firstEntityId = 0\n secEntityId = 0\n\n firstTimeFirst = False\n firstTimeSec = False\n \n if firstEntity in entity2id:\n firstEntity = entity2id[firstEntity]\n else:\n firstEntityId = idCounter\n entity2id[firstEntity] = firstEntityId\n idCounter += 1\n firstTimeFirst = True\n\n if secEntity in entity2id:\n secEntityId = entity2id[secEntity]\n else:\n secEntityId = idCounter\n entity2id[secEntity] = secEntityId\n idCounter += 1\n firstTimeSec = True\n\n if firstTimeFirst == True or firstTimeSec == True:\n creationArr = []\n writeFile.write(\":begin\" + \"\\n\")\n if firstTimeFirst == True:\n # newObj = {}\n # newObj[\"_id\"] = firstEntityId\n # newObj[\"properties\"] = {}\n # newObj[\"properties\"][\"name\"] = str(firstEntity)\n str1 = \"{_id:\" + str(firstEntityId) + \", properties:{name:\" + '\"' + str(firstEntity) + '\"' + \"}}\"\n creationArr.append(str1)\n # creationArr.append(newObj)\n if firstTimeSec == True:\n # newObj = {}\n # newObj[\"_id\"] = secEntityId\n # newObj[\"properties\"] = {}\n # newObj[\"properties\"][\"name\"] = str(secEntity)\n # creationArr.append(newObj)\n str2 = \"{_id:\" + str(secEntityId) + \", properties:{name:\" + '\"' + str(secEntity) + '\"' + \"}}\"\n creationArr.append(str2)\n creationArr = map(str, creationArr) \n line = \", \".join(creationArr)\n line = \"[\" + line + \"]\"\n writeFile.write(\"UNWIND \" + line + \" AS row\" + \"\\n\")\n writeFile.write(\"CREATE (n:`UNIQUE IMPORT LABEL`{`UNIQUE IMPORT ID`: row._id}) SET n += row.properties SET n:Entity;\" + \"\\n\")\n writeFile.write(\":commit\" + \"\\n\")\n\n \n # writeFile.write(\":begin\" + \"\\n\")\n\n # writeFile.write(\"UNWIND [{_id:\" + str(firstEntityId) + \", properties:{name:\" + '\"' + str(firstEntity)\n # + '\"' + \"}}, {_id:\" + str(secEntityId) + \", properties:{name:\" + '\"' + str(secEntity) + '\"' + \"}}] AS row\" + \"\\n\")\n # writeFile.write(\"CREATE (n:`UNIQUE IMPORT LABEL`{`UNIQUE IMPORT ID`: row._id}) SET n += row.properties SET n:Entity;\" + \"\\n\")\n # writeFile.write(\":commit\" + \"\\n\")\n\n writeFile.write(\":begin\" + \"\\n\")\n writeFile.write(\"UNWIND [{start: {_id:\" + str(firstEntityId) + \"}, end: {_id:\" + str(secEntityId) + \"}, properties:{}}] AS row\" + \"\\n\")\n writeFile.write(\"MATCH (start:`UNIQUE IMPORT LABEL`{`UNIQUE IMPORT ID`: row.start._id})\" + \"\\n\")\n writeFile.write(\"MATCH (end:`UNIQUE IMPORT LABEL`{`UNIQUE IMPORT ID`: row.end._id})\" + \"\\n\")\n writeFile.write(\"CREATE (start)-[r:\" + relation + \"]->(end) SET r += row.properties;\" + \"\\n\")\n writeFile.write(\":commit\" + \"\\n\")\n\n line = file.readline()\n\n writeFile.write(\":begin\" + \"\\n\")\n writeFile.write(\"MATCH (n:`UNIQUE IMPORT LABEL`) WITH n LIMIT 20000 REMOVE n:`UNIQUE IMPORT LABEL` REMOVE n.`UNIQUE IMPORT ID`;\" + \"\\n\")\n writeFile.write(\":commit\" + \"\\n\")\n\n\n writeFile.write(\":begin\" + \"\\n\")\n writeFile.write(\"DROP CONSTRAINT ON (node:`UNIQUE IMPORT LABEL`) ASSERT (node.`UNIQUE IMPORT ID`) IS UNIQUE;\" + \"\\n\")\n writeFile.write(\":commit\" + \"\\n\")","repo_name":"yu-andy/covid19-kg","sub_path":"OpenNRE/custom/convertToCypher.py","file_name":"convertToCypher.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43276702785","text":"# recreate the game the google uses when a page isn't loading\n# game board\n# person jumping on a button press (maybe longer or shorter jumps)\n# partially random things in way moving towards the user\nimport random\nimport threading\nimport time\nimport copy\nimport Object\nimport Board\n\n# should probably place a importance level for an object and those will get placed last so they are seen, this means i wil have to sort the render_list by reverse importance level in the render stage\n\n\ndef out_of_bounds_check(cord, board_obj):\n width, height = board_obj.get_boundary()\n x, y = cord\n if x < 0 or y < 0:\n return True\n elif x >= width-1 or y >= height:\n return True\n else:\n return False\n\n\ndef place(board_obj, object_rendering_map, board_frame):\n\n for obj, locations in object_rendering_map:\n for cord in locations:\n # only place a cord if it is in bounds\n if out_of_bounds_check(cord, board_obj):\n continue\n # round the cord cause it can be a float\n cord = (round(cord[0]), round(cord[1]))\n x, y = board_obj.get_relative_index(cord)\n board_frame[y][x] = obj.encode('utf-8')\n\n return board_frame\n\n# object to game board\n\n\ndef render(board_obj, render_list):\n board_frame = copy.deepcopy(board_obj.get_board())\n\n # updating the board frame allows us to have multiple items show up\n for object_rendering_map in render_list:\n board_frame = place(board_obj, object_rendering_map, board_frame)\n\n # after you've placed all the items for the frame display them\n board_obj.display(board_frame)\n\n\nobstacles = {\n # '*':[(3,0)],\n # '$':[(10,0)],\n # 'â–„':[1,0],\n 'â–‚': [(2, 0)], 'â–…': [(2, 0)], 'â–€': [(2, 0)], '🮃': [(2, 0)], 'â–”': [(2, 0)], 'â–ƒ': [(2, 1)], 'â–…': [(2, 0)],\n\n}\n\n\nclass Player():\n def __init__(self, obj):\n pass\n\n\nsquare_map = {'@': [(0, 0), (0, 1), (1, 0), (1, 1)]}\ntriangle_map = {'*': [(3, 0), (1, 1), (5, 1), (0, 2), (2, 2), (4, 2), (6, 2)]}\n\npoint_map = {'*': [(0, 0)]}\n# square = Object.Object(square_map, (5, 5))\n# triangle = Object.Object(triangle_map, (10, 10))\n\nb1 = Board.Board(50, 25, 2, '#')\n# for i in range(10):\n# for i in obstacles.items():\n# time.sleep(.1)\n# obstacle_overlay(b1, [ ('D',[(1,10)]) ])\n#import threading\n#thread = threading.Thread(target=obstacle_overlay,args=(b1, square.get_rendering_map()))\n\n# point = Object.Object(point_map, (0, 0))\n## PASSED TEST ##\n\"\"\"\nfor i in range(50):\n for j in range(20):\n render_list = [point.get_rendering_map()]\n render(b1, render_list)\n point.update_origin_location((i,j))\n time.sleep(.1)\n\"\"\"\n###\n\n### TEST OF MOVING OBEJECTS###\n\"\"\"\ndef mainloop():\n while True:\n time.sleep(.02)\n # updates every other so it looks choppy. I need some sort of pool of objects to render\n # give the renderer a list of things\n render_list = [triangle.get_rendering_map(),\n square.get_rendering_map(), ]\n render(b1, render_list)\n #render(b1, triangle.get_rendering_map())\n\n\n# rather than sleeping it should probably just check to se how much time has passed.\ndef move_objects():\n i = 0\n while True:\n square.update_origin_location((25-i, 25-i))\n triangle.update_origin_location((0+i, 0+i))\n i += 1\n time.sleep(.5)\n\n\nthread1 = threading.Thread(target=move_objects)\nthread1.start()\nthread2 = threading.Thread(target=mainloop)\nthread2.start()\n\"\"\"\n\n### make random squares fall ###\n\n\ndef mainloop(board_obj, obj_list):\n while True:\n time.sleep(.001)\n render_list = [obj.get_rendering_map() for obj in obj_list]\n render(board_obj, render_list)\n\n\n# point = Object.Object(point_map, origin_location=(\n# 0, 0), physics_on=True, velocity=(.1, .1))\n\n# point1 = Object.Object(point_map, origin_location=(\n# 3, 0), physics_on=True, velocity=(.1, .1))\n\n# point2 = Object.Object(point_map, origin_location=(\n# 4, 0), physics_on=True, velocity=(.2, .1))\n\n# point3 = Object.Object(point_map, origin_location=(\n# 7, 0), physics_on=True, velocity=(.3, 0))\n\n# point4 = Object.Object(point_map, origin_location=(\n# 8, 8), physics_on=True, velocity=(-.1, .1))\n\n# point5 = Object.Object(point_map, origin_location=(\n# 15, 14), physics_on=True, velocity=(.1, -.1))\n\n# point6 = Object.Object(point_map, origin_location=(\n# 20, 6), physics_on=True, velocity=(-.2, .1))\n\n# point7 = Object.Object(point_map, origin_location=(\n# 7, 0), physics_on=True, velocity=(-.3, 0))\nobj_list = []\nfor i in range(8):\n # get random location for width and heigh from b1.get_boundary()\n width, height = b1.get_boundary()\n x = random.randint(0, width)\n y = random.randint(0, height)\n # get random velocity in range of -1 to 1 as a float\n x_vel = random.uniform(-.2, .2)\n y_vel = random.uniform(-.2, .2)\n\n point = Object.Object(triangle_map, origin_location=(\n x, y), physics_on=True, velocity=(x_vel, y_vel))\n obj_list.append(point)\n\n# print(obj_list))\n\nmainloop(b1, obj_list)\n### move object to inside of the the render call ranther than just the get_rendering_map() ###\n### this will allow me to update out of bounds attributes fo the object ###\n\ninput()\n","repo_name":"jcourson8/Command-Line-Game-Engine","sub_path":"testing/cljump.py","file_name":"cljump.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"356838267","text":"#!/usr/bin/python\n\n#to be executed in the lib directory where all the needed libraries are installed \n# usage :\n# python .py list_of_slcio_files\n# or\n# python .py -d directory -r list_of_run_numbers\n#\n\n#########################################################################################\n## Documentation\n##\n## experience describes the experimental setup (which DIF is where, ... )\n## BIFtriggerWindow is the clock tick shift between signal of the particle\n## interaction in the Cerenkov detector on the beam line\n## and the one of particles interacting in the detector\n## lcReader is the LCIO file event reader\n##\n## The DataListener architecture is the following\n## master (directly connected to lcReader)\n## ^ ^ ^\n## | | |\n## | | timePlotListener (plot where are negative time stamps, can be omitted in production)\n## | |\n## | BIFListener (used to determine the BIFtriggerWindow, can be omitted in production)\n## |\n## trivent (the time clustering algorithm)\n## ^ ^\n## | |\n## | BIFListener_check (used to cross check the BIFtriggerWindow, can be omitted in production)\n## |\n## filter (filters events selected by trivent, see below)\n## ^\n## |\n## LCIOoutputWriter (Write LCIO output : both original RawCalorimeterHit ans CalorimeterHit collection\n##\n## the filters\n##\n## Current filters are for september 2022 SDHCAL test beam\n##\n## first filter is bigNoiseFilter : reject events that have more than 15000 hits.\n## second filter is CL_Filter : reject events with less than 6 consecutive fired layer (allowing one hole among the consecutive layer count)\n## third filter is RamfullFilter : reject events containing a DIF with more than 36 RAMfull channels (channel 29 and 31) if this DIF represents more than 80% of all hits in the event.\n## fourth filter is singleAsicNoise_Filter : reject events if a single ASIC contains more than 80 percent of all the event hits.\n## fifth filter is multipleAsicNoise_Filter : reject events containing at least 3 ASICs having more than 80 hits each.\n## \n## Filter status for GIF like analysis was :\n## first filter is doubleFilter (planFilter,CL_Filter) : reject events rejected by the 2 filters :\n## planFilter : reject events with no hits in layer 0,1,2\n## CL_Filter : reject events with less than 6 consecutive fired layer (allowing one hole among the consecutive layer count)\n## second filter is RamfullFilter : reject events containing a DIF with more than 36 RAMfull channels (channel 29 and 31) if this DIF represents more than 80% of all hits in the event.\n##\n#########################################################################################\n\nfrom InputOutputFileNames import generateFileList\nimport sys\nimport ROOT\nROOT.gROOT.Reset()\nROOT.gSystem.Load('liblcio')\nROOT.gSystem.Load('libGRPC_RawHit_Readout_Analyser')\nROOT.gSystem.Load('libGRPC_RawHit_Readout_Analyser_dict')\nif ROOT.gROOT.GetVersion()[0]=='6':\n #ROOT 6.08.02 don't understand non template dictionnary without it (don't know why)\n dummy=ROOT.intervalle('unsigned int')()\n\nIOnames=generateFileList()\n \ninputFileNames=ROOT.vector(\"string\")()\n\nfilelist=set(IOnames[0])\nif len(filelist)==0:\n sys.exit(\"No input files to process\")\n \nfor f in filelist:\n inputFileNames.push_back(f);\n\nfor file in inputFileNames:\n print (file)\n\n\n \noutputFileName=IOnames[1]+'.slcio'\nprint (\"output file name is \", outputFileName)\n\n \n#experience=ROOT.CERN_SPS_Sept2018_SDHCAL_ExperimentalSetup()\nexperience=ROOT.CERN_SPS_H2_Sept2022_part1_SDHCAL_ExperimentalSetup()\nnumeroBIFs=[experience.getBIF(x) for x in range(experience.nBIFs())]\n\nprint (numeroBIFs)\n#For the moment, put the same time window for all BIFs\nBIFtriggerWindow=ROOT.intervalle('int')(-9,-5)\n\n \n#start LCIO reader\nlcReader=ROOT.IOIMPL.LCFactory.getInstance().createLCReader()\n\n#create architecture of listeners\nmasterReader=ROOT.RawHit_SDHCAL_Data_Reader_From_LCEvent(\"DHCALRawHits\",True,10)\nlcReader.registerLCEventListener(masterReader)\n\nBIFListeners=[ROOT.BIF_Data_Listener(x) for x in numeroBIFs]\nBIFListeners_timer=[ROOT.Time_Decorator_For_RawHit_SDHCAL_Data_Listener(x,\"BIFListener_BIF_{}\".format(x.BIF_number())) for x in BIFListeners]\nfor x in BIFListeners_timer:\n masterReader.registerDataListener(x)\n\ntimePlotListener=ROOT.RawHit_TimePlot_Listener(experience)\ntimePlotListener_timer=ROOT.Time_Decorator_For_RawHit_SDHCAL_Data_Listener(timePlotListener,\"timePlotListener\")\nmasterReader.registerDataListener(timePlotListener_timer)\n\ntrivent=ROOT.RawHit_SDHCAL_Data_Reader_Trivent(2,25) # 2=event windows half size, 25=threshold number for hits\ntrivent.setSkipIfBIFisOutsideReadout(False)\nfor x in BIFListeners:\n trivent.setBIFparameters(x.BIF_number(),BIFtriggerWindow) # event window = [-2,2] so BIF window = [-9,-4]\n#trivent.setUltraVerboseDebugOutput(True)\ntrivent_timer=ROOT.Time_Decorator_For_RawHit_SDHCAL_Data_Listener(trivent,\"trivent\")\nmasterReader.registerDataListener(trivent_timer)\n\n\nBIFListeners_check=[ROOT.BIF_Data_Listener(x) for x in numeroBIFs]\nBIFListeners_check_timer=[ROOT.Time_Decorator_For_RawHit_SDHCAL_Data_Listener(x,\"BIFListener_check_BIF_{}\".format(x.BIF_number())) for x in BIFListeners_check]\nfor x in BIFListeners_check_timer:\n trivent.registerDataListener(x)\n\n#Filters\nfilter=ROOT.RawHit_SDHCAL_Data_Reader_Event_Filter()\n\nHighNhitFilter=ROOT.MinNumberOfHits_Filter(15000)\nbigNoiseFilter=ROOT.Inverse_Filter(HighNhitFilter)\nfilter.addRejectConditions(bigNoiseFilter)\n\n#planDomain=ROOT.UI_domain(ROOT.UI_intervalle(0,2))\n#planFilter=ROOT.NoHitInLayers_Filter(planDomain,experience)\nCL_Filter=ROOT.ConsecutiveLayers_Filter(6,experience)\n#doubleFilter=ROOT.Both_Reject_Filter(planFilter,CL_Filter)\n#filter.addRejectConditions(doubleFilter)\nfilter.addRejectConditions(CL_Filter)\n\nRamfullFilter=ROOT.RamFull_Filter(36,0.8)\nfilter.addRejectConditions(RamfullFilter)\n\nsingleAsicNoise_Filter=ROOT.HitFractionInOneAsicAbove_Filter(0.8)\nfilter.addRejectConditions(singleAsicNoise_Filter)\n\nmultipleAsicNoise_Filter=ROOT.MaxNumberASICwithMoreThanHits_Filter(80,3)\nfilter.addRejectConditions(multipleAsicNoise_Filter)\n\ntrivent.registerDataListener(filter)\n\n\nLCIOoutputWriter=ROOT.RawHit_SDHCAL_Data_LCWriter_RawCalorimeterHit()\nLCIOoutputWriter.open(outputFileName)\nLCIOoutputWriter_timer=ROOT.Time_Decorator_For_RawHit_SDHCAL_Data_Listener(LCIOoutputWriter,\"LCIOoutputWriter\")\nfilter.registerDataListener(LCIOoutputWriter_timer)\n\n\n\nLCIOoutputWriter_extra=ROOT.RawHit_SDHCAL_Data_LCWriter_CalorimeterHit(experience)\nLCIOoutputWriter_extra.setCorrectHR2(True)\nLCIOoutputWriter.addLCWriter(LCIOoutputWriter_extra)\n\n#open file and event loop\nlcReader.open( inputFileNames )\n#File DHCAL_744193_I0_0.slcio has Tricot data at least in readout 130 / 179 / 214 / 446 / 45223 / 525 / 575 / 678 / 749 / 778 / 793 / \n#Only one hit per trivent event\n#For readout 130, the corresponding event is at event 28 (over 87)\n#lcReader.skipNEvents(129)\n#lcReader.readStream(1)\nlcReader.readStream()\n\n#end of event loop\nfor x in BIFListeners_check:\n x.printMaxDelay()\n\nrootFile=ROOT.TFile(IOnames[1]+\"_check.root\" , \"RECREATE\")\n#load ROOT library missing\nROOT.TH1F\n\nd=rootFile.mkdir(\"BIFDelay\")\nfor x in BIFListeners:\n sub_d=d.mkdir(\"BIF_{}\".format(x.BIF_number()))\n x.saveTo(sub_d)\n\ndd=rootFile.mkdir(\"BIFDelay_check\")\nfor x in BIFListeners_check:\n sub_dd=dd.mkdir(\"BIF_{}\".format(x.BIF_number()))\n x.saveTo(sub_dd)\n\ndtime=rootFile.mkdir(\"TimePlot\")\ntimePlotListener.saveTo(dtime)\n#timePlotListener.printReport()\nrootFile.Close()\n\nmasterReader.writeSpillInfoStatShort()\nmasterReader.writeSpillInfoStatExtendedInFile()\n\nlcReader.close()\nLCIOoutputWriter.close()\n","repo_name":"SDHCAL/GRPC_RawHit_Readout_Analyse","sub_path":"python/run_trivent.py","file_name":"run_trivent.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35877553819","text":"#!/usr/bin/env python\n# coding:utf-8\nimport cyclone.web\nfrom toughwlan.manage.base import BaseHandler, MenuRes\nfrom toughlib.permit import permit\nfrom toughwlan.manage.resource import template_forms\nfrom toughwlan import models\n\n@permit.route(r\"/template\", u\"认证模版管理\", MenuRes, order=7.0000, is_menu=True)\nclass TemplateHandler(BaseHandler):\n @cyclone.web.authenticated\n def get(self):\n tpl_list = self.db.query(models.TrwTemplate)\n self.render(\"template.html\",tpl_list=tpl_list)\n\n\n@permit.route(r\"/template/add\", u\"模版新增\", MenuRes, order=7.0001)\nclass TemplateAddHandler(BaseHandler):\n @cyclone.web.authenticated\n def get(self):\n form = template_forms.tpl_add_form()\n self.render(\"base_form.html\", form=form)\n\n @cyclone.web.authenticated\n def post(self, *args, **kwargs):\n form = template_forms.tpl_add_form()\n if not form.validates(source=self.get_params()):\n return self.render(\"base_form.html\", form=form)\n\n tpl = models.TrwTemplate()\n tpl.tpl_name = form.d.tpl_name\n tpl.tpl_desc = form.d.tpl_desc\n self.db.add(tpl)\n\n self.db.commit()\n self.redirect(\"/template\")\n\n\n@permit.route(r\"/template/update\", u\"模版修改\", MenuRes, order=7.0002)\nclass TemplateUpdateHandler(BaseHandler):\n @cyclone.web.authenticated\n def get(self):\n tpl_id = self.get_argument(\"tpl_id\")\n form = template_forms.tpl_update_form()\n tpl = self.db.query(models.TrwTemplate).get(tpl_id)\n form.fill(tpl)\n self.render(\"base_form.html\",form=form)\n\n @cyclone.web.authenticated\n def post(self, *args, **kwargs):\n form = template_forms.tpl_update_form()\n if not form.validates(source=self.get_params()):\n return self.render(\"base_form,html\", form=form)\n\n tpl = self.db.query(models.TrwTemplate).get(form.d.id)\n if tpl:\n tpl.tpl_name = form.d.tpl_name\n tpl.tpl_desc = form.d.tpl_desc\n self.db.commit()\n\n self.redirect(\"/template\")\n\n\n@permit.route(r\"/template/delete\", u\"模版删除\", MenuRes, order=7.0003)\nclass TemplateDeleteHandler(BaseHandler):\n @cyclone.web.authenticated\n def get(self, *args, **kwargs):\n tpl_id = self.get_argument(\"tpl_id\")\n self.db.query(models.TrwTemplate).filter_by(id=tpl_id).delete()\n self.db.commit()\n self.redirect(\"/template\")\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"talkincode/toughwlan","sub_path":"toughwlan/manage/resource/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"3751986793","text":"# Answer 1- Time complexity O(mlogn)\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n def search(i ,j):\n if 0 <= i < len(matrix) and 0 <= j < len(matrix[0]):\n if matrix[i][j] == target:\n return True\n elif matrix[i][j] > target:\n return search(i-1,j)\n else:\n return search(i,j+1)\n else:\n return False\n return search(len(matrix)-1,0)\n\n# Answer 2 - Time complexity O(mlogn)\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n for i in range(len(matrix)):\n lo,hi = 0,len(matrix[i])-1\n while lo <= hi and 0<=lo target:\n hi = mid-1\n else:\n lo = mid+1\n return False\n \n# Answer 3 - Time complexity O(m+n)\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == target:\n return True\n return False\n","repo_name":"amankitsingh/Programming","sub_path":"search-a-2d-matrix-ii/search-a-2d-matrix-ii.py","file_name":"search-a-2d-matrix-ii.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4568678858","text":"\"\"\"Interact with Google AppEngine.\n\nSee https://googleapis.dev/python/storage/latest/client.html\n\"\"\"\n\nimport json\nimport pathlib\nfrom datetime import datetime, UTC\nfrom typing import Any\nfrom typing import Iterable\nfrom typing import List\nfrom typing import cast\nimport cherrypy\nfrom plugins import decorators\n\n\nclass Plugin(cherrypy.process.plugins.SimplePlugin):\n \"\"\"A CherryPy plugin for interacting with Google AppEngine.\"\"\"\n\n def __init__(self, bus: cherrypy.process.wspbus.Bus) -> None:\n cherrypy.process.plugins.SimplePlugin.__init__(self, bus)\n\n def start(self) -> None:\n \"\"\"Define the CherryPy messages to listen for.\n\n This plugin owns the gcp prefix.\n \"\"\"\n\n self.bus.subscribe(\n \"gcp:appengine:ingest_file\",\n self.ingest_file\n )\n\n @staticmethod\n def full_storage_path(path: pathlib.Path) -> pathlib.Path:\n \"\"\"Get an absolute path to a file within the storage root.\"\"\"\n\n storage_root = cherrypy.engine.publish(\n \"registry:first:value\",\n \"config:storage_root\",\n as_path=True\n ).pop()\n\n return cast(pathlib.Path, storage_root / path)\n\n def ingest_file(\n self,\n storage_path: pathlib.Path,\n batch_size: int = 100\n ) -> None:\n \"\"\"Match a log file to a processor based on its file path.\"\"\"\n\n request_top_path = (\"appengine.googleapis.com\", \"request_log\")\n\n if not storage_path.parts[0:2] == request_top_path:\n return\n\n line_count = self.process_request_log(storage_path, batch_size)\n\n unit = \"line\" if line_count == 1 else \"lines\"\n\n cherrypy.engine.publish(\n \"applog:add\",\n \"gcp_appengine\",\n f\"{line_count} {unit} ingested from {storage_path}\"\n )\n\n cherrypy.engine.publish(\"scheduler:add\", 5, \"logindex:parse\")\n\n @decorators.log_runtime\n def process_request_log(\n self,\n storage_path: pathlib.Path,\n batch_size: int = 100\n ) -> int:\n \"\"\"\n Add the lines of an hourly request log in JSON format to the\n logindex database.\n\n This is similar to the ingestion process for log files in\n combined format, even though it means additional parsing\n (going from JSON to combined here, and then parsing combined\n back down to individual fields in the logindex plugin). Doing\n it this way reduces duplication with the logindex plugin.\n \"\"\"\n\n log_path = self.full_storage_path(storage_path)\n\n if not log_path.is_file():\n return 0\n\n line_count = 0\n batch: List[Any] = []\n with open(log_path, \"r\", encoding=\"utf-8\") as file_handle:\n while True:\n line = file_handle.readline()\n\n if not line:\n break\n\n offset = file_handle.tell()\n\n json_line = json.loads(line)\n\n payload = json_line.get(\"protoPayload\")\n\n if not payload:\n continue\n\n combined_line = self.json_to_combined(payload)\n\n record_hash = cherrypy.engine.publish(\n \"hasher:value\",\n line,\n algorithm=\"md5\"\n ).pop()\n\n batch.append((\n str(storage_path),\n offset,\n record_hash,\n combined_line\n ))\n\n if len(batch) > batch_size:\n self.publish_lines(batch)\n line_count += len(batch)\n batch = []\n if batch:\n self.publish_lines(batch)\n line_count += len(batch)\n batch = []\n\n return line_count\n\n @staticmethod\n def combined_quoted(value: str = \"\") -> str:\n \"\"\"Wrap a value in quotes unless it is empty.\"\"\"\n\n if value:\n quoteless_value = value.replace('\"', '[DOUBLEQUOTE]')\n return f'\"{quoteless_value}\"'\n\n return \"-\"\n\n @staticmethod\n def combined_pair(key: str, value: str = \"\") -> str:\n \"\"\"Pair a key and its quoted value or suppress both.\"\"\"\n\n if value:\n quoteless_value = value.replace('\"', '[DOUBLEQUOTE]')\n return f'{key}=\"{quoteless_value}\"'\n return \"\"\n\n def json_to_combined(self, payload: Any) -> str:\n \"\"\"Format a JSON-formatted string in combined log format.\"\"\"\n\n resource = \" \".join((\n payload[\"method\"],\n payload[\"resource\"],\n payload[\"httpVersion\"]\n ))\n\n formats = (\n \"%Y-%m-%dT%H:%M:%S.%f%z\",\n \"%Y-%m-%dT%H:%M:%S%z\"\n )\n\n for format in formats:\n try:\n parsed_timestamp = datetime.strptime(\n payload[\"startTime\"],\n format\n ).replace(tzinfo=UTC)\n break\n except ValueError:\n pass\n\n formatted_timestamp = parsed_timestamp.strftime(\n \"%d/%b/%Y:%H:%M:%S:%f %z\"\n )\n\n fields = (\n payload[\"ip\"],\n \"-\",\n \"-\",\n f'[{formatted_timestamp}]',\n self.combined_quoted(resource),\n str(payload[\"status\"]),\n payload.get(\"responseSize\", \"0\"),\n self.combined_quoted(payload.get(\"referrer\")),\n self.combined_quoted(payload.get(\"userAgent\")),\n self.combined_quoted(payload.get(\"host\")),\n self.combined_pair(\"latency\", payload.get(\"latency\")),\n self.combined_pair(\"end_time\", payload.get(\"endTime\")),\n self.combined_pair(\"version\", payload.get(\"versionId\")),\n self.combined_pair(\"request_id\", payload.get(\"requestId\"))\n )\n\n return \" \".join(fields).strip()\n\n @staticmethod\n def publish_lines(batch: Iterable[str]) -> None:\n \"\"\"Send a batch of request logs in combined format to the logindex\n plugin.\n\n \"\"\"\n cherrypy.engine.publish(\n \"logindex:insert_line\",\n batch\n )\n","repo_name":"lovett/medley","sub_path":"plugins/gcp_appengine.py","file_name":"gcp_appengine.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"30349009469","text":"import sys\nsys.setrecursionlimit(5000)\n\nm, n = list(map(int, input().split(' ')))\nmatrix = []\n\nfor i in range(m):\n new_line = list(map(int, input().split(' ')))\n matrix.append(new_line)\n\ndp = [[-1] * n for _ in range(m)]\n\n\ndef solution(i, j):\n global m, n\n\n if (i, j) == (m - 1, n - 1):\n return 1\n\n if dp[i][j] != -1:\n return dp[i][j]\n\n d = [[1, 0], [0, 1], [-1, 0], [0, -1]]\n\n possible = 0\n for dx, dy in d:\n if 0 <= i + dx < m and 0 <= j + dy < n and matrix[i][j] > matrix[i + dx][j + dy]:\n possible += solution(i + dx, j + dy)\n dp[i][j] = possible\n return dp[i][j]\n\n\nsolution(0, 0)\nprint(dp[0][0])\n","repo_name":"Kim-Young-Hoo/boj_algorithms","sub_path":"백준/Gold/1520. 내리막 길/내리막 길.py","file_name":"내리막 길.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4425334208","text":"\"\"\"\nProgram to calculate and display\nthe electricity bill. Inputs will be price per kWh in cents,\ndaily use in kWh and the number of days in the billing period\n\"\"\"\nelectricity_price = int(input(\"Enter the price of electricity in cents per kilowatt hour: \"))\ndaily_use = float(input(\"Enter the kilowatt hours of electricity use per day: \"))\nnumber_days = int(input(\"Enter the number of days in the billing period: \"))\n\nprint(\"The estimated electricity bill is $\", electricity_price * daily_use * number_days /100)\n\nprint(\"Thank you.\")\n","repo_name":"philipwoodward/Practicals","sub_path":"Prac01/electricityBillEstimator.py","file_name":"electricityBillEstimator.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29638838568","text":"import logging\n\n# Create a logger\nlogger = logging.getLogger(__name__)\n\n# Set the log level\nlogger.setLevel(logging.INFO)\n\n# Create a file handler\nhandler = logging.FileHandler('app.log')\n\n# Set the log level of the handler\nhandler.setLevel(logging.INFO)\n\n# Create a formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# Set the formatter for the handler\nhandler.setFormatter(formatter)\n\n# Add the handler to the logger\nlogger.addHandler(handler)\n","repo_name":"JaggedGem/virus-total-antivirus","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24789887753","text":"'''\nGiven a measured impedance in polar coordinates, prints out the\nassociated parameters that can be calculated.\n'''\nif 1: # Copyright, license\n # These \"trigger strings\" can be managed with trigger.py\n #∞copyright∞# Copyright (C) 2014 Don Peterson #∞copyright∞#\n #∞contact∞# gmail.com@someonesdad1 #∞contact∞#\n #∞license∞#\n # Licensed under the Open Software License version 3.0.\n # See http://opensource.org/licenses/OSL-3.0.\n #∞license∞#\n #∞what∞#\n # Calculates various measures from a complex electrical impedance\n #∞what∞#\n #∞test∞# #∞test∞#\n pass\nif 1: # Imports\n import getopt\n import os\n import sys\n #from math import tan, sin, cos, pi, isinf, radians #xx\n from pdb import set_trace as xx\nif 1: # Custom imports\n from wrap import dedent\n #from sig import sig #xx\n from f import flt, tan, sin, cos, pi, isinf, radians\n from fpformat import FPFormat\n if 1:\n import debug\n debug.SetDebugger()\nif 1: # Global variables\n ii = isinstance\ndef Error(*msg, status=1):\n print(*msg, file=sys.stderr)\n exit(status)\ndef Usage(d, status=1):\n print(dedent(f'''\n Usage: {sys.argv[0]} [options] Z theta\n Given a measured impedance with magnitude Z in ohms and phase angle\n theta in degrees, prints out the associated parameters. You can use\n a cuddled SI prefix after the number for Z if you wish (example:\n 1.23k means 1230 ohms).\n Options\n -d n Use n significant digits for output [{d[\"-d\"]}]\n -f f Specify measurement frequency in Hz. You can use a cuddled SI \n prefix after the number. [{d[\"-f\"]} Hz]\n '''))\n exit(status)\ndef ParseCommandLine(d):\n d[\"-d\"] = 3 # Number of significant digits\n d[\"-f\"] = 1000 # Measurement frequency in Hz\n if len(sys.argv) < 2:\n Usage(d)\n try:\n optlist, args = getopt.getopt(sys.argv[1:], \"d:f:\")\n except getopt.GetoptError as e:\n msg, option = e\n print(msg)\n exit(1)\n for o, a in optlist:\n if o == \"-d\":\n try:\n d[\"-d\"] = int(a)\n except ValueError:\n Error(\"-d option invalid\")\n if not(1 <= d[\"-d\"] <= 15):\n Error(\"-d option must be between 1 and 15\")\n if o == \"-f\":\n try:\n d[\"-f\"] = Interpret(a)\n except ValueError:\n Error(\"-f option invalid\")\n if d[\"-f\"] <= 0:\n Error(\"-f option must be > 0\")\n #sig.digits = d[\"-d\"]\n flt(0).n = d[\"-d\"]\n if len(args) != 2:\n Usage(d)\n return args\ndef Interpret(s):\n '''Return the value given in the string s as a float. A single\n trailing character may be an optional SI prefix.\n '''\n prefix = {\n \"y\": -24, \"z\": -21, \"a\": -18, \"f\": -15, \"p\": -12, \"n\": -9, \"u\":\n -6, \"m\": -3, \"c\": -2, \"d\": -1, \"h\": 2, \"k\": 3, \"M\": 6, \"G\": 9,\n \"T\": 12, \"P\": 15, \"E\": 18, \"Z\": 21, \"Y\":24}\n if not s:\n raise ValueError(\"Empty string in Interpret()\")\n m = 1\n if s[-1] in prefix:\n m = 10**prefix[s[-1]]\n s = s[:-1]\n return flt(s)*m\nif __name__ == \"__main__\":\n d = {} # Options dictionary\n z, theta_d = ParseCommandLine(d)\n fp = FPFormat(d[\"-d\"])\n inf = flt(float(\"inf\"))\n # Check the angle\n t = flt(theta_d)\n theta = radians(t)\n if not (-90 <= t <= 90):\n Error(\"Angle must be between -90° and 90°\")\n w = 2*pi*d[\"-f\"] # Angular frequency in radians/s\n Z = Interpret(z) # Magnitude of impedance in ohms\n a = 1/(w*Z)\n if t == 90:\n Rs = 0\n Rp = inf\n Q = inf\n D = 0\n Cs = a/sin(theta)\n elif t == -90:\n Rs = 0\n Rp = -inf\n Q = -inf\n D = 0\n Cs = a/sin(theta)\n else:\n theta = radians(flt(theta_d))\n Rs = Z*cos(theta)\n Rp = Z/cos(theta)\n Q = tan(abs(theta))\n D = inf if not Q else 1/Q\n Cs = inf if not theta else a/sin(theta)\n Cp = a*sin(theta)\n a = Z/w\n Ls = a*sin(theta)\n Lp = inf if not theta else a/sin(theta)\n # Correct capacitances to get conventional sign\n Cs *= -1\n Cp *= -1\n # Print report\n E = fp.engsi\n fr = E(d[\"-f\"]) + \"Hz\"\n if 0:\n # Old method for python 2.7 and later\n print(\"Impedance(%s) =\" % fr, z, \"ohms @\", theta_d, \"deg\")\n X = Z*sin(theta)\n print(\" Rs = \", E(Rs), \"ohm = ESR\", sep=\"\")\n print(\" Rp = \", E(Rp), \"ohm\", sep=\"\")\n print(\" X = \", E(X), \"ohm\", sep=\"\")\n if isinf(Cs):\n print(\" Cs = inf\")\n else:\n print(\" Cs = \", E(Cs), \"F\", sep=\"\")\n print(\" Cp = \", E(Cp), \"F\", sep=\"\")\n print(\" Ls = \", E(Ls), \"H\", sep=\"\")\n if isinf(Lp):\n print(\" Lp = inf\")\n else:\n print(\" Lp = \", E(Lp), \"H\", sep=\"\")\n if ii(Q, flt):\n print(\" Q =\", sig(Q))\n print(\" D =\", sig(D))\n else:\n print(\" Q =\", Q)\n print(\" D =\", D)\n else:\n # Use f-strings\n o = \"Ω\"\n if isinf(Rs):\n Rs = \"∞ {o}\"\n else:\n Rs = f\"{E(Rs)}{o}\"\n if isinf(Rp):\n Rp = f\"∞ {o}\"\n else:\n Rp = f\"{E(Rp)}{o}\"\n X = E(Z*sin(theta)) + o\n if isinf(Cs):\n Cs = f\"∞ F\"\n else:\n Cs = f\"{E(Cs)}F\"\n Cp = f\"{E(Cp)}F\"\n if isinf(Ls):\n Ls = \"-∞ H\" if Ls == -inf else \"∞ H\"\n else:\n Ls = f\"{E(Ls)}H\"\n if isinf(Lp):\n Lp = \"-∞ H\" if Lp == -inf else \"∞ H\"\n else:\n Lp = f\"{E(Lp)}H\"\n if isinf(Q):\n Q = \"-∞\" if Q == -inf else \"∞\"\n else:\n Q = f\"{Q}\" \n if isinf(D):\n D = \"-∞\" if D == -inf else \"∞\"\n else:\n D = f\"{D}\"\n n = 22\n print(dedent(f'''\n Impedance({fr}) = {z}Ω @ {theta_d}°\n {Rs:>{n}s} Rs = Equivalent series resistance\n {Rp:>{n}s} Rp = Equivalent parallel resistance\n {X :>{n}s} X = Reactance\n {Cs:>{n}s} Cs = Equivalent series capacitance\n {Cp:>{n}s} Cp = Equivalent parallel capacitance\n {Ls:>{n}s} Ls = Equivalent series inductance\n {Lp:>{n}s} Lp = Equivalent parallel inductance\n {Q :>{n}s} Q = Quality factor\n {D :>{n}s} D = Dissipation factor'''))\n","repo_name":"someonesdad1/hobbyutil","sub_path":"elec/impedance.py","file_name":"impedance.py","file_ext":"py","file_size_in_byte":6513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30855638687","text":"import sqlite3\nimport time\nimport csv\n\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndef Add_Record(db, data):\n #Insert record into table\n with sqlite3.connect(db) as conn:\n conn.row_factory = dict_factory\n conn.text_factory = str\n\n cursor = conn.cursor()\n\n query = \"INSERT INTO country_index ({cols}) VALUES ({vals});\".format(cols=\",\".join(data.keys()),\n vals=str([data[i] for i in data]).strip('[]'))\n cursor.execute(query)\n\n\ndef Load_Data(file_name):\n records = list()\n with open(file_name, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n records.append(list(row.values()))\n return records \n\n\nif __name__ == \"__main__\":\n t = time.time()\n\n db = 'db.sqlite' # Database filename\n\n file_name = \"data/BLI_28032019144925238.csv\"\n\n data = Load_Data(file_name) # Get data from CSV\n\n #For every record, format and insert to table\n for i in data:\n record = {\n \"code\": i[0],\n \"code_desc\": i[1],\n \"indicator\": i[2],\n \"indicator_desc\": i[3],\n \"measure\": i[4],\n \"measure_desc\": i[5],\n \"inequality\": i[6],\n \"inequality_desc\": i[7],\n \"unit_code\": i[8],\n \"unit_code_desc\": i[9],\n \"powercode_code\": i[10],\n \"powercode_desc\": i[11],\n \"reference_period\": i[12],\n \"reference_period_desc\": i[13],\n \"value\": float(i[14]),\n \"flag_code\": i[15],\n \"flag_code_desc\": i[16],\n }\n \n Add_Record(db, record)\n\n print(\"Time elapsed: \" + str(time.time() - t) + \" s.\")\n","repo_name":"ChrisVidal10/aivo-challenge","sub_path":"scripts/load_script.py","file_name":"load_script.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23375036522","text":"import random\n\nfrom config import TOKEN\nimport telebot\nfrom database import cursor\nfrom telebot import types\nfrom sql.tablets_sql import tabletSQL\nfrom sql.ointments_sql import ointSQL\nfrom sql.syrups_sql import syrupSQL\nfrom datetime import datetime\nfrom random import randint\nbot = telebot.TeleBot(TOKEN)\n\n\nimg_list = ['img/doctor.jpg','img/doctor2.jpg','img/doctor3.jpg','img/johny.jpg','img/johny_doctor.jpeg']\n\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome_message(message):\n text = f\"\"\"\n Здравствуйте, {message.chat.first_name}, вас приветствует Pharmacy_bot!\n \"\"\"\n markup = types.InlineKeyboardMarkup()\n medicines = types.InlineKeyboardButton('Лекарства', callback_data='medicines')\n ordering = types.InlineKeyboardButton('Добавить лекарство', callback_data=\"order\")\n markup.row_width = 2\n markup.add(medicines, ordering)\n bot.send_photo(message.chat.id, open(img_list[random.randint(0,len(img_list)-1)], 'rb'))\n bot.send_message(message.chat.id, text=text, reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"back\")\ndef answer_conatcts_callback(call):\n message = call.message\n if call.data == \"back\":\n send_welcome_message(message)\n\n\n\n\n@bot.callback_query_handler(func= lambda call: call.data=='order')\ndef get_info(call):\n message = call.message\n bot.send_message(chat_id=message.chat.id,\n text = 'Напишите название лекарства, которое хотите заказать')\n\n bot.register_next_step_handler(message, add_order_review)\n\ndef add_order_review(message):\n medicine = message.text\n user = message.from_user.first_name\n message_time = message.date\n created = datetime.fromtimestamp(message_time).strftime('%d-%m-%Y %H:%M:%S')\n with open('orders.txt', 'a', encoding='utf-8') as file:\n info = f\"\"\"\n Дата создания отзыва: {created}\n Имя пользователя: {user}\n Лекарство: {medicine}\n \"\"\"\n file.write(info)\n bot.send_message(message.chat.id, text=\"Спасибо, мы с вами свяжемся в течении недели!\")\n\n\n\n\n\n@bot.callback_query_handler(func= lambda call: call.data=='medicines')\ndef send_all_genres(call):\n message = call.message\n markup = types.InlineKeyboardMarkup()\n tablets = types.InlineKeyboardButton('Таблетки', callback_data='tablets')\n ointments = types.InlineKeyboardButton('Мази', callback_data='ointments')\n syrups = types.InlineKeyboardButton('Сиропы', callback_data='syrups')\n back_button = types.InlineKeyboardButton('Назад',callback_data='back')\n markup.row_width = 2\n markup.add(tablets,ointments,syrups,back_button)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=\"Выберите тип лекарства\",\n message_id=message.id,\n reply_markup=markup)\n\n\n\n\n\n@bot.callback_query_handler(func= lambda call: call.data=='tablets')\ndef send_all_tablets(call):\n message = call.message\n tablets_manager = tabletSQL(cursor=cursor)\n tablets = tablets_manager.extract_all_data()\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n for id,name,price,description in tablets:\n print(id)\n button = types.InlineKeyboardButton(name, callback_data=f'tablet_{id}')\n markup.add(button)\n button2 = types.InlineKeyboardButton('Назад', callback_data='tablets_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=\"Выберите лекарство\",\n message_id=message.id,\n reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"tablets_back\")\ndef answer_tablets_callback(call):\n if call.data == \"tablets_back\":\n send_all_genres(call)\n\n@bot.callback_query_handler(func= lambda call: str(call.data).startswith(\"tablet_\"))\ndef send_tablet_info(call):\n message = call.message\n id = str(call.data).split(\"_\")[1]\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n manager = tabletSQL(cursor=cursor)\n tablet = manager.extract_one_tablet(int(id))[0]\n text = f\"\"\"\n {tablet[0]}\n {tablet[1]}\n {tablet[2]}\n {tablet[3]}\n \"\"\"\n button2 = types.InlineKeyboardButton('Назад', callback_data='tablets_info_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=text,\n message_id=message.id,\n reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"tablets_info_back\")\ndef answer_tablet_callback(call):\n if call.data == \"tablets_info_back\":\n send_all_tablets(call)\n\n\n\n\n\n\n\n\n@bot.callback_query_handler(func= lambda call: call.data=='ointments')\ndef send_all_ointments(call):\n message = call.message\n ointments_manager = ointSQL(cursor=cursor)\n ointments = ointments_manager.extract_all_data()\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n for id,name,price,description in ointments:\n print(id)\n button = types.InlineKeyboardButton(name, callback_data=f'ointment_{id}')\n markup.add(button)\n button2 = types.InlineKeyboardButton('Назад', callback_data='ointments_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=\"Выберите лекарство\",\n message_id=message.id,\n reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"ointments_back\")\ndef answer_ointments_callback(call):\n if call.data == \"ointments_back\":\n send_all_genres(call)\n\n@bot.callback_query_handler(func= lambda call: str(call.data).startswith('ointment_'))\ndef send_ointment_info(call):\n message = call.message\n print(call.data)\n id = str(call.data).split(\"_\")[1]\n manager = ointSQL(cursor=cursor)\n ointment = manager.extract_one_ointment(int(id))[0]\n text = f\"\"\"\n {ointment[0]}\n {ointment[1]}\n {ointment[2]}\n {ointment[3]}\n \"\"\"\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n button2 = types.InlineKeyboardButton('Назад', callback_data='ointments_info_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=text,\n message_id=message.id,\n reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"ointments_info_back\")\ndef answer_syrups_callback(call):\n if call.data == \"ointments_info_back\":\n send_all_ointments(call)\n\n\n\n\n\n\n\n\n@bot.callback_query_handler(func= lambda call: call.data=='syrups')\ndef send_all_syrups(call):\n message = call.message\n syrups_manager = syrupSQL(cursor=cursor)\n syrups = syrups_manager.extract_all_data()\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n for id,name,price,description in syrups:\n button = types.InlineKeyboardButton(name, callback_data=f'syrup_{id}')\n markup.add(button)\n button2 = types.InlineKeyboardButton('Назад', callback_data='syrups_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=\"Выберите лекарство\",\n message_id=message.id,\n reply_markup=markup)\n\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"syrups_back\")\ndef answer_syrups_callback(call):\n if call.data == \"syrups_back\":\n send_all_genres(call)\n\n\n\n@bot.callback_query_handler(func= lambda call: str(call.data).startswith(\"syrup_\"))\ndef send_syrup_info(call):\n message = call.message\n print(call.data)\n id = str(call.data).split(\"_\")[1]\n manager = syrupSQL(cursor=cursor)\n syrup = manager.extract_one_syrup(int(id))[0]\n text = f\"\"\"\n {syrup[0]}\n {syrup[1]}\n {syrup[2]}\n {syrup[3]}\n \"\"\"\n markup = types.InlineKeyboardMarkup()\n markup.row_width = 1\n button2 = types.InlineKeyboardButton('Назад', callback_data='syrups_info_back')\n markup.add(button2)\n bot.edit_message_text(\n chat_id=message.chat.id,\n text=text,\n message_id=message.id,\n reply_markup=markup)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"syrups_info_back\")\ndef answer_syrups_callback(call):\n if call.data == \"syrups_info_back\":\n send_all_syrups(call)\n\n\nbot.infinity_polling()","repo_name":"Islashik/pharmacy_project","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35802685735","text":"import torch\nimport torchvision\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport random\nimport sys\nimport os\nfrom sklearn.metrics import classification_report\n\nsys.path.append('model_adjust')\nfrom 卷积神经网络调整 import CNN, MyDataset, read_data\n\ndata = read_data('data//MNIST//raw//mnist_test.csv')\nallData = MyDataset(data)\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\nBATCH_SIZE = len(data)\nEPOCHS = 10\n\nallDataLoader = torch.utils.data.DataLoader(dataset=allData,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\noptimizers = [\n \"ASGD\", \"SGD\", \"Adagrad\", \"Adadelta\", \"RMSprop\", \"Adamax\", \"Adam\", \"Rprop\"\n]\nlearning_rate = 0.001\noptimizer_name = \"Adam\"\nnet = CNN()\nnet.to(device)\nprint(f\"optimizer_name={optimizer_name}, learning_rate={learning_rate}! \")\nlossF = torch.nn.CrossEntropyLoss()\noptimizers = {\n \"ASGD\": torch.optim.ASGD,\n \"SGD\": torch.optim.SGD,\n \"Adagrad\": torch.optim.Adagrad,\n \"Adadelta\": torch.optim.Adadelta,\n \"RMSprop\": torch.optim.RMSprop,\n \"Adamax\": torch.optim.Adamax,\n \"Adam\": torch.optim.Adam,\n \"Rprop\": torch.optim.Rprop,\n}\n\nif optimizer_name not in optimizers:\n raise ValueError(f\"Unknown optimizer: {optimizer_name}\")\n\noptimizer_class = optimizers[optimizer_name]\noptimizer = optimizer_class(net.parameters(), lr=learning_rate)\n\ncorrect = 0\nnet.train(False)\n\npath = f\"save_model//CNN//{learning_rate}\"\nmodel_path = os.makedirs(path, exist_ok=True)\nmodel_path = path\nnet.load_state_dict(\n torch.load(f\"{model_path}//{optimizer_name}_cnn.pth\")) # 同上\n\ncorrect, totalLoss = 0, 0\nnet.train(False)\n\nwith torch.no_grad():\n for step, (testImgs, labels) in enumerate(allDataLoader):\n testImgs = testImgs.to(device)\n labels = labels.to(device)\n net.zero_grad()\n outputs = net(testImgs)\n loss = lossF(outputs, labels)\n predictions = torch.argmax(outputs, dim=1)\n print(\n classification_report(labels.cpu().numpy(),\n predictions.cpu().numpy(),\n digits=3))\n accuracy = torch.sum(predictions == labels) / labels.shape[0]\n print(accuracy)","repo_name":"sunwuzhou03/Basic-Machine-Learning-Algorithm","sub_path":"Machine Learning Course Design 2/机器学习实验课设/model_test/卷积神经网络测试.py","file_name":"卷积神经网络测试.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73412528213","text":"import torch\nimport numpy as np\nfrom torchvision import transforms\nimport random\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append('./model')\nfrom PIL import Image\n\ndef calc_patch_size(func):\n def wrapper(args):\n if args.scale == 2:\n args.patch_size = 48\n elif args.scale == 3:\n args.patch_size = 48\n elif args.scale == 4:\n args.patch_size = 48\n else:\n raise Exception('Scale Error', args.scale)\n return func(args)\n return wrapper\n\ndef convert_rgb_to_y(img, dim_order='hwc'):\n if dim_order == 'hwc':\n return 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.\n else:\n return 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.\n\n\ndef convert_rgb_to_ycbcr(img, dim_order='hwc'):\n if dim_order == 'hwc':\n y = 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.\n cb = 128. + (-37.945 * img[..., 0] - 74.494 * img[..., 1] + 112.439 * img[..., 2]) / 256.\n cr = 128. + (112.439 * img[..., 0] - 94.154 * img[..., 1] - 18.285 * img[..., 2]) / 256.\n else:\n y = 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.\n cb = 128. + (-37.945 * img[0] - 74.494 * img[1] + 112.439 * img[2]) / 256.\n cr = 128. + (112.439 * img[0] - 94.154 * img[1] - 18.285 * img[2]) / 256.\n\n return np.array([y, cb, cr]).transpose([1, 2, 0]) ###chw hwc 输出的array 数据类型是float\n\ndef convert_ycbcr_to_rgb(img, dim_order='hwc'):\n if dim_order == 'hwc':\n r = 298.082 * img[..., 0] / 256. + 408.583 * img[..., 2] / 256. - 222.921\n g = 298.082 * img[..., 0] / 256. - 100.291 * img[..., 1] / 256. - 208.120 * img[..., 2] / 256. + 135.576\n b = 298.082 * img[..., 0] / 256. + 516.412 * img[..., 1] / 256. - 276.836\n else:\n r = 298.082 * img[0] / 256. + 408.583 * img[2] / 256. - 222.921\n g = 298.082 * img[0] / 256. - 100.291 * img[1] / 256. - 208.120 * img[2] / 256. + 135.576\n b = 298.082 * img[0] / 256. + 516.412 * img[1] / 256. - 276.836\n return np.array([r, g, b]).transpose([1, 2, 0])\n\n'''\n# =======================================\n# image processing process on numpy image\n# channel_convert(in_c, tar_type, img_list):\n# rgb2ycbcr(img, only_y=True):\n# bgr2ycbcr(img, only_y=True):\n# ycbcr2rgb(img):\n# modcrop(img_in, scale):\n# =======================================\n'''\ndef rgb2ycbcr(img, only_y=True):\n '''same as matlab rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8: ####这里是说如果输入的不是uint,那么就是floa,需要乘以255\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],\n [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef ycbcr2rgb(img):\n '''same as matlab ycbcr2rgb\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],\n [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\ndef bgr2ycbcr(img, only_y=True):\n '''bgr version of rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],\n [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\ndef modcrop(img_in, scale):\n # img_in: Numpy, HWC or HW\n img = np.copy(img_in)\n if img.ndim == 2:\n H, W = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r]\n elif img.ndim == 3:\n H, W, C = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r, :]\n else:\n raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))\n return img\n\ndef shave(img_in, border=0):\n # img_in: Numpy, HWC or HW\n img = np.copy(img_in)\n h, w = img.shape[:2]\n img = img[border:h - border, border:w - border]\n return img\n\ndef preprocess_to_y(img, device='cpu'):\n \"\"\"\n 将图片转换为 y通道的数据\n \"\"\"\n img = np.array(img).astype(np.float32) ##将PIL图片转换为numpy 数据类型是np.float32\n ycbcr = convert_rgb_to_ycbcr(img)\n x = ycbcr[..., 0]\n x /= 255.\n x = torch.from_numpy(x).to(device)\n x = x.unsqueeze(0).unsqueeze(0)\n return x, ycbcr ####这里是为了产生测试图片 所以会转换为tensor格式\ndef preprocess_to_tesnor(img, device):\n \"\"\"\n 将图片转换为 tensor数据\n img:传入的数据可以是pil和numpy格式\n \"\"\"\n img=transforms.ToTensor()(img).to(device)\n if img.dim()==3:\n img=img.unsqueeze(0)\n return img\ndef preprocess_to_pil(tensor):\n if tensor.dim()>3:\n tensor=tensor.squeeze(0)\n img=transforms.ToPILImage()(tensor)\n\n return img\ndef get_hr_lr_bicubic(path,scale):\n image = Image.open(path).convert('RGB')\n image_width = (image.width // scale) * scale\n image_height = (image.height // scale) * scale\n hr = image.resize((image_width, image_height), resample=Image.BICUBIC)\n\n lr = hr.resize((hr.width // scale, hr.height // scale), resample=Image.BICUBIC)\n bicubic = lr.resize((lr.width *scale, lr.height *scale), resample=Image.BICUBIC)\n return hr,lr,bicubic\n\ndef merge(y,ycbcr):\n \"\"\"\n 主要是将y通道数据和cbcr拼接起来\n y:\n ycbcr:是通过convert_ycbcr_to_rgb得到的,格式是array\n \"\"\"\n if torch.is_tensor(y):\n y=y.squeeze(0)\n y=transforms.ToPILImage()(y)\n y=np.array(y)\n output = np.array([y, ycbcr[..., 1], ycbcr[..., 2]]).transpose([1, 2, 0]) ###hwc chw\n output = np.clip(convert_ycbcr_to_rgb(output), 0.0, 255.0).astype(np.uint8)\n output = Image.fromarray(output)\n return output\n\ndef calc_psnr(img1, img2):\n # imdff = torch.clamp(img1, 0, 1) - torch.clamp(img2, 0, 1)\n \"\"\"\n img1:\n img2:都是tensor格式\n \"\"\"\n return 10. * torch.log10(1. / torch.mean((img1 - img2) ** 2))\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n\ndef random_vertically_flip(image: np.ndarray, p: float = 0.5) -> np.ndarray:\n \"\"\"随机翻转\n Args:\n image (np.ndarray): Image read with OpenCV\n p (optional, float): Vertically flip probability. Default: 0.5\n\n Returns:\n vertically_flip_image (np.ndarray): image after vertically flip\n\n \"\"\"\n if random.random() < p:\n vertically_flip_image = cv2.flip(image, 0)\n else:\n vertically_flip_image = image\n\n return vertically_flip_image\n\ndef random_crop(image, crop_shape):\n \"\"\"\n 随机裁剪图片\n 注意:这里是用pil进行裁剪的,所以image必须是pil\n crop_shape:必须是数组\n \"\"\"\n ###image 是pil读取的,crop_shape是裁剪的大小\n nw = random.randint(0, image.size[0] - crop_shape[0]) ##裁剪图像在原图像中的坐标\n nh = random.randint(0, image.size[1] - crop_shape[1])\n image_crop = image.crop((nh, nw, nh + crop_shape[0], nw + crop_shape[1]))\n\n return image_crop\ndef center_crop(image: np.ndarray, image_size: int) -> np.ndarray:\n \"\"\"Crop small image patches from one image center area.\n\n Args:\n image (np.ndarray): The input image for `OpenCV.imread`.\n image_size (int): The size of the captured image area.\n\n Returns:\n patch_image (np.ndarray): Small patch image\n\n \"\"\"\n image_height, image_width = image.shape[:2]\n\n # Just need to find the top and left coordinates of the image\n top = (image_height - image_size) // 2\n left = (image_width - image_size) // 2\n\n # Crop image patch\n patch_image = image[top:top + image_size, left:left + image_size, ...]\n\n return patch_image\n\n\ndef random_crop(image: np.ndarray, image_size: int) -> np.ndarray:\n \"\"\"Crop small image patches from one image. 随机中心旋转\n\n Args:\n image (np.ndarray): The input image for `OpenCV.imread`.\n image_size (int): The size of the captured image area.\n\n Returns:\n patch_image (np.ndarray): Small patch image\n\n \"\"\"\n image_height, image_width = image.shape[:2]\n\n # Just need to find the top and left coordinates of the image\n top = random.randint(0, image_height - image_size)\n left = random.randint(0, image_width - image_size)\n\n # Crop image patch\n patch_image = image[top:top + image_size, left:left + image_size, ...]\n\n return patch_image\n\n\ndef random_rotate(image,\n angles: list,\n center = None,\n scale_factor = 1.0) -> np.ndarray:\n \"\"\"Rotate an image by a random angle 随机旋转\n\n Args:\n image (np.ndarray): Image read with OpenCV\n angles (list): Rotation angle range\n center (optional, tuple[int, int]): High resolution image selection center point. Default: ``None``\n scale_factor (optional, float): scaling factor. Default: 1.0\n\n Returns:\n rotated_image (np.ndarray): image after rotation\n\n \"\"\"\n image_height, image_width = image.shape[:2]\n\n if center is None:\n center = (image_width // 2, image_height // 2)\n\n # Random select specific angle\n angle = random.choice(angles)\n matrix = cv2.getRotationMatrix2D(center, angle, scale_factor)\n rotated_image = cv2.warpAffine(image, matrix, (image_width, image_height))\n\n return rotated_image\n\n\ndef random_horizontally_flip(image: np.ndarray, p: float = 0.5) -> np.ndarray:\n \"\"\"Flip the image upside down randomly 随机翻转\n\n Args:\n image (np.ndarray): Image read with OpenCV\n p (optional, float): Horizontally flip probability. Default: 0.5\n\n Returns:\n horizontally_flip_image (np.ndarray): image after horizontally flip\n\n \"\"\"\n if random.random() < p:\n horizontally_flip_image = cv2.flip(image, 1)\n else:\n horizontally_flip_image = image\n\n return horizontally_flip_image\n\n\n\ndef psnr(img1, img2,border=0):\n \"\"\"\n function:计算两幅图片的psnr\n img1:格式可以是tensor,也可以是array,pil\n \"\"\"\n ##这里通常会去除边框\n if torch.is_tensor(img1):\n if img1.dim() > 3:\n #大于3,说明包含了batch维度,需要去除\n img1 = img1.squeeze(0)\n img1 = transforms.ToPILImage()(img1)\n if torch.is_tensor(img2):\n if img2.dim() > 3:\n img2 = img2.squeeze(0)\n img2 = transforms.ToPILImage()(img2)\n img1=np.array(img1)\n img2=np.array(img2)\n h, w = img1.shape[:2]\n img1 = img1[border:h - border, border:w - border]\n img2 = img2[border:h - border, border:w - border]\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return float('inf')\n return 20 * np.log10(255.0 / np.sqrt(mse))\n # imdff = np.array(img2).astype(np.float64) - np.array(img1).astype(np.float64)\n # rmse = np.sqrt(np.mean(imdff**2))\n # ps = 20*np.log10(255/rmse)\n # return ps\n\n\ndef calculate_ssim(img1, img2):\n \"\"\"\n 计算ssim\n \"\"\"\n C1 = (0.01 * 255) ** 2\n C2 = (0.03 * 255) ** 2\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid\n mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1 ** 2\n mu2_sq = mu2 ** 2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()\n\ndef ssim(img1, img2,border=0):\n '''calculate SSIM\n the same outputs as MATLAB's\n img1, img2: [0, 255]\n 输入的参数可以是numpy格式,也可以是tensor格式\n '''\n if torch.is_tensor(img1):\n if img1.dim()>3:\n img1=img1.squeeze(0)\n img1=transforms.ToPILImage()(img1)\n if torch.is_tensor(img2):\n if img2.dim()>3:\n img2=img2.squeeze(0)\n img2=transforms.ToPILImage()(img2)\n img1=np.array(img1).astype(np.float64)\n img2=np.array(img2).astype(np.float64)\n if not img1.shape == img2.shape:\n raise ValueError('Input images must have the same dimensions.')\n h, w = img1.shape[:2]\n img1 = img1[border:h - border, border:w - border]\n img2 = img2[border:h - border, border:w - border]\n if img1.ndim == 2:\n return calculate_ssim(img1, img2)\n elif img1.ndim == 3:\n if img1.shape[2] == 3:\n ssims = []\n for i in range(3):\n ssims.append(calculate_ssim(img1, img2))\n return np.array(ssims).mean()\n elif img1.shape[2] == 1:\n return calculate_ssim(np.squeeze(img1), np.squeeze(img2))\n else:\n raise ValueError('Wrong input image dimensions.')\n\ndef show_tensor_img(tensor_img):\n to_pil = transforms.ToPILImage()\n img1 = tensor_img.cpu().clone()\n if img1.dim()==4:\n img1=img1.squeeze(0)\n img1 = to_pil(img1)\n plt.imshow(img1,cmap='gray')\n\ndef plot_data_loader_image(data_loader):\n \"\"\"\n 显示dataloader中所有图片\n col:表示一行显示多少图片\n \"\"\"\n batch_size = data_loader.batch_size\n col = batch_size\n row=int(batch_size*len(data_loader)/col)\n for i,data in enumerate(data_loader):\n lr, hr = data ### data是数据\n img=hr\n for j in range(col):\n plt.subplot(row, col, i*col+j+1)\n plt.xticks([]) # 去掉x轴的刻度\n plt.yticks([]) # 去掉y轴的刻度\n # plt.title('{}batch {} figure'.format(i,i*col+j+1))\n show_tensor_img(img[j])\n if (i * col + j+1) >=(batch_size * len(data_loader)):\n break\n plt.show()\n\n\ndef seed_torch(seed=1218):\n seed = int(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = False\n\ndef save_checkpoint(path,model, epoch, train_loss, test_loss, optimizer,test_psnr,test_ssim,best_epoch=0):\n model_out_path = os.path.join(path , \"model.pth\") ###保存的地址\n state = {\"epoch\": epoch,\n \"model\": model,\n 'train_loss': train_loss,\n 'test_loss': test_loss,\n 'optimizer': optimizer.state_dict(),\n 'test_psnr':test_psnr,\n 'test_ssim':test_ssim,\n 'best_epoch':best_epoch\n }\n # check path status\n if not os.path.exists(\"../model/\"):\n os.makedirs(\"../model/\")\n # save model\n torch.save(state, model_out_path)\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in ['jpeg', 'JPEG', 'jpg', 'png', 'JPG', 'PNG', 'gif','bmp'])\n\ndef train_lr_transform(crop_size,scale):\n return transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize(crop_size//scale,interpolation=Image.BICUBIC),\n transforms.ToTensor()\n ])\n\ndef train_hr_transform(crop_size):\n return transforms.Compose([\n # transforms.CenterCrop(crop_size),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.ToTensor()\n ])\n\ndef test_hr_transform():\n return transforms.Compose([\n transforms.ToTensor() ###hr.shape[2],hr.shape[1]\n ])\ndef test_lr_transform(w,h,scale):\n w=int(w/scale)\n h=int(h/scale)\n return transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((h,w),interpolation=Image.BICUBIC),\n transforms.ToTensor()\n ])\n\n","repo_name":"laity-sir/basic_sr","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35602713904","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 17 12:12:02 2020\n\n@author: ashutosh.k\n\"\"\"\n\n\"\"\"\nHousing Price Prediction\nPython Script (from Jupyter Notebook)\nData Source: https://www.kaggle.com/c/house-prices-advanced-regression-techniques\n\"\"\"\n\n#Import Libraries\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.linear_model import Lasso\nfrom sklearn import metrics\n\n\n### * * * * * * * CONFIG * * * * * * * * * * \n\nDATAPATH = \"../data/HousingPrediction/\"\nTRAIN_FILE = 'train.csv'\nTEST_FILE = 'test.csv'\n\nTARGET = 'SalePrice'\n## Features to keep\nKEEP = ['MSSubClass', 'MSZoning', 'Neighborhood',\n 'OverallQual', 'OverallCond', 'YearRemodAdd',\n 'RoofStyle', 'MasVnrType', 'BsmtQual', 'BsmtExposure',\n 'HeatingQC', 'CentralAir', '1stFlrSF', 'GrLivArea',\n 'BsmtFullBath', 'KitchenQual', 'Fireplaces', 'FireplaceQu',\n 'GarageType', 'GarageFinish', 'GarageCars', 'PavedDrive',\n 'LotFrontage','YrSold'] #Final feature to keep in data\n\nNUMERICAL_FEATURES = ['LotFrontage'] #Numerical\nCATEGORICAL_FEATURES = ['MasVnrType', 'BsmtQual', 'BsmtExposure','FireplaceQu', \n 'GarageCars','GarageType', 'GarageFinish','MSZoning','BsmtFullBath',\n 'KitchenQual'] #Categorical\n\nFEATURES_TO_ENCODE = ['MSZoning', 'Neighborhood', 'RoofStyle', 'MasVnrType','BsmtQual', \n 'BsmtExposure', 'HeatingQC', 'CentralAir','KitchenQual', 'FireplaceQu', \n 'GarageType', 'GarageFinish','PavedDrive'] #Features to Encode\n\nTEMPORAL_FEATURES = ['YearRemodAdd']\nTEMPORAL_COMPARISON = 'YrSold'\n\nLOG_FEATURES = ['LotFrontage', '1stFlrSF', 'GrLivArea'] #Features for Log Transform\n\nDROP_FEATURES = ['YrSold'] #Features to Drop\n\n\n### * * * * * * * CONFIG ENDS * * * * * * * * * * \n\n\n\n## Read Data\ntrain = pd.read_csv(DATAPATH+TRAIN_FILE)\ntest = pd.read_csv(DATAPATH+TEST_FILE)\n\n#separating SalePrice in Y\ny = train[TARGET]\ntrain.drop([TARGET], axis=1, inplace=True)\n\n#Combine train and test data\ndata = pd.concat([train,test], axis=0)\n\ndata = data[KEEP].copy()\n\n#Numerical Imputer\nfor var in NUMERICAL_FEATURES:\n data[var].fillna(data[var].mode()[0], inplace=True)\n\n\n#Categorical Imputer\nfor var in CATEGORICAL_FEATURES:\n data[var].fillna(data[var].mode()[0], inplace=True)\n\n#Rare label Categorical Imputer\nencoder_dict_ = {}\ntol=0.05\n\nfor var in FEATURES_TO_ENCODE:\n # the encoder will learn the most frequent categories\n t = pd.Series(data[var].value_counts() / np.float(len(data)))\n # frequent labels:\n encoder_dict_[var] = list(t[t >= tol].index)\n \nfor var in FEATURES_TO_ENCODE:\n data[var] = np.where(data[var].isin(\n encoder_dict_[var]), data[var], 'Rare')\n \n#Categorical Imputer\nencoder_dict_ ={}\nfor var in FEATURES_TO_ENCODE:\n t = data[var].value_counts().sort_values(ascending=True).index #Sorting on freq, should be done on target, just saving some time here\n encoder_dict_[var] = {k:i for i,k in enumerate(t,0)}\n \n## Mapping using the encoder dictionary\nfor var in FEATURES_TO_ENCODE:\n data[var] = data[var].map(encoder_dict_[var])\n \n\n#Temporal Variables\nfor var in TEMPORAL_FEATURES:\n data[var] = data[var]-data[TEMPORAL_COMPARISON]\n \n# Log Transformations\nfor var in LOG_FEATURES:\n data[var] = np.log(data[var])\n \n# Drop Features\ndata.drop(DROP_FEATURES, axis=1, inplace=True)\n\n##############################################################################\n\n\n## Split Train and Test\n\ntrain_clean = data.iloc[:train.shape[0],:]\ntest_clean = data.iloc[train.shape[0]:,:]\n\n#Split Train and Test\nX_train, X_test, y_train, y_test = train_test_split(train_clean, y, \n random_state=42, test_size=0.15)\ny_train = np.log(y_train)\ny_test = np.log(y_test)\n\n# Run Model\nmodel = Lasso(alpha=0.005, random_state=0)\nmodel.fit(X_train,y_train)\npred = model.predict(X_test)\n\n# Model Evaluation\n#MSE\nprint(\"MSE : \",metrics.mean_squared_error(pred, y_test))\n#MAE\nprint(\"MAE : \",metrics.mean_absolute_error(pred, y_test))\n#RMSE\nprint(\"RMSE : \",np.sqrt(metrics.mean_squared_error(pred, y_test)))\n#R2\nprint(\"R-sq : \",metrics.r2_score(pred, y_test))\n\n\n# Prediction on actual Test Data\n#test_clean is the transformed original test data; x_test is the 15% split from training data, \n#apologies for similar names\npred_test = np.exp(model.predict(test_clean))\n\nprint(\"Top 10 predictions: \",pred_test[1:10])","repo_name":"ashukumar27/blog_codes","sub_path":"03_python_script.py","file_name":"03_python_script.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"16774946529","text":"time = input(\"Enter 24hr time in HH:MM:SS format:\")\ncolons = time.count(':')\nwhile colons != 2:\n print(\"Incorrect format, please utilize exact format.\")\n quit()\n\ntimeNoColons = time.replace(\":\", \"\")\ntimeNotNumbers = timeNoColons.isdigit()\nif timeNotNumbers == False:\n print(\"Please use only numbers for time values.\")\n quit()\n\ndef twoDigitCheck(value):\n if len(value) == 2:\n return True\n else:\n return False\n\ntimeBreakUp = time.split(\":\")\nhours = int(timeBreakUp[0])\nminutes = int(timeBreakUp[1])\nseconds = int(timeBreakUp[2])\n\nfor value in timeBreakUp:\n if twoDigitCheck(value) == False:\n print(\"Incorrect format. Please enter values in two digit format (e.g. \\\"04:15:09\\\")\")\n quit()\n\nif hours < 0 or hours > 23:\n print(\"Hours value must be between 0 and 23.\")\n quit()\n\nif minutes < 0 or minutes > 59:\n print(\"Minutes value must be between 0 and 59.\")\n quit()\n\nif seconds < 0 or seconds > 59:\n print(\"Seconds value must be between 0 and 59.\")\n quit()\n\nprint(timeNoColons)\n","repo_name":"newkstime/PythonLabs","sub_path":"Lab10/Lab10P3.py","file_name":"Lab10P3.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26125978471","text":"# import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport plotly.express as px\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#🎯for checking purpose uncomment these lines\r\n# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\ntemp = pd.read_csv('data/yesdeepakmittal_tweets.csv')\r\ntemp = pd.DataFrame(temp.groupby('time')['sent_value'].sum())\r\ntemp.index.name = ''\r\ntemp = temp.reset_index()\r\ntemp.columns = ['time','sent_value']\r\nfig = px.line(x=temp['time'], y=temp['sent_value'],)\r\nfig.update_layout(template='plotly_white',height = 400,margin={\"r\": 150, \"t\": 50, \"l\": 150, \"b\": 50},\r\n title='')\r\n\r\ndel temp\r\n#🎯for checking purpose, use app.layout instead of layout\r\nlayout = html.Div([ \r\n html.H1(children='Social Media'), \r\n html.H6(children='Visualising Customer Tweets Sentiment Score'), \r\n dcc.Graph(id='tw-sentiment', figure=fig),\r\n ],\r\n style={'color': 'navy', 'textAlign': 'center'})\r\n\r\n#🎯for checking purpose uncomment these lines\r\n# if __name__ == '__main__':\r\n# app.run_server(debug=True, port=8049, host='127.0.0.1')","repo_name":"yesdeepakmittal/SupplyChainModel","sub_path":"socialmedia.py","file_name":"socialmedia.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"9964896877","text":"from __future__ import annotations\n\nimport dataclasses\nimport pickle\n\nimport requests\n\n\n@dataclasses.dataclass\nclass App:\n client_id: str\n client_secret: str\n irc_token: str\n app_token: str\n redirect_url: str\n webhook_secret: bytes\n\n def store(self) -> None:\n with open(\"tokens/_app.token\", \"wb\") as handle:\n pickle.dump(self, handle)\n\n @classmethod\n def load(cls) -> App:\n with open(\"tokens/_app.token\", \"rb\") as handle:\n data = pickle.load(handle)\n\n if not isinstance(data, App):\n raise TypeError(\"Found incorrect token type: \" + type(data))\n\n return data\n\n\n@dataclasses.dataclass\nclass Token:\n user_id: int\n user: str\n access_token: str\n refresh_token: str\n\n def store(self) -> None:\n with open(f\"tokens/{self.user}.token\", \"wb\") as handle:\n pickle.dump(self, handle)\n\n def renew(self, app: App) -> bool:\n token_request = requests.post(\n \"https://id.twitch.tv/oauth2/token\",\n {\n \"client_id\": app.client_id,\n \"client_secret\": app.client_secret,\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n },\n timeout=15,\n )\n\n token = token_request.json()\n\n if \"access_token\" not in token:\n return False\n\n self.access_token = token[\"access_token\"]\n self.refresh_token = token[\"refresh_token\"]\n\n self.store()\n\n return True\n\n @classmethod\n def load(cls, user: str) -> Token:\n with open(f\"tokens/{user}.token\", \"rb\") as handle:\n data = pickle.load(handle)\n\n if not isinstance(data, Token):\n raise TypeError(\"Found incorrect token type: \" + type(data))\n\n return data\n\n\ndef refresh_app_token() -> App:\n app = App.load()\n\n response = requests.post(\n \"https://id.twitch.tv/oauth2/token\",\n {\n \"client_id\": app.client_id,\n \"client_secret\": app.client_secret,\n \"grant_type\": \"client_credentials\",\n \"scope\": \"channel:read:redemptions\",\n },\n timeout=15,\n )\n\n app.app_token = response.json()[\"access_token\"]\n app.store()\n\n return app\n","repo_name":"javajawa/snerge-bot","sub_path":"src/snerge/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38562136161","text":"import boto3\n\nec2_tags= boto3.client(service_name='ec2', region_name='us-east-1')\n\nsts = boto3.client(service_name='sts', region_name='us-east-1')\n\nresponse = sts.get_caller_identity()\naccount_id = response.get('Account')\n\n#my_snaps = ec2_tags.describe_snapshots(OwnerIds=[account_id])\n\n\n\nfor each_item in ec2_tags.describe_volumes()['Volumes']:\n\tif not \"Tags\" in each_item and each_item['State']=='available':\n\t\tprint('Deleting ',each_item['VolumeId'])\n\t\tec2_con_cli.delete_volume(VolumeId=each_item['VolumeId'])\nprint(\"Delete all unused and untagged volumes.\")","repo_name":"AlCode88/01-boto3-lambda-repo","sub_path":"02-project-boto-lambda-examples/Session/lambda_terraform_projects/lambda_SDK_codes/list_tags.py","file_name":"list_tags.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12913908827","text":"import logging\nimport getopt\nimport sys\n\nfrom pyswing.utils.Logger import Logger\nfrom pyswing.objects.market import Market\nfrom pyswing.objects.equity import Equity\nfrom pyswing.objects.indicators.indicatorSMA import IndicatorSMA\nfrom pyswing.objects.indicators.indicatorEMA import IndicatorEMA\nfrom pyswing.objects.indicators.indicatorBB20 import IndicatorBB20\nfrom pyswing.objects.indicators.indicatorROC import IndicatorROC\nfrom pyswing.objects.indicators.indicatorMACD import IndicatorMACD\nfrom pyswing.objects.indicators.indicatorSTOCH import IndicatorSTOCH\nfrom pyswing.objects.indicators.indicatorRSI import IndicatorRSI\nfrom pyswing.objects.indicators.indicatorAROON import IndicatorAROON\nfrom pyswing.objects.indicators.indicatorADX import IndicatorADX\nfrom pyswing.objects.indicators.indicatorDX import IndicatorDX\nfrom pyswing.objects.indicators.indicatorADI import IndicatorADI\nfrom pyswing.utils.TeamCity import TeamCity\n\nimport pyswing.database\n\n\ndef updateIndicators(argv):\n \"\"\"\n Update Indicators.\n\n :param argv: Command Line Parameters.\n\n -n = Name\n\n Example:\n\n python -m pyswing.UpdateIndicators -n asx\n \"\"\"\n\n Logger.log(logging.INFO, \"Log Script Call\", {\"scope\":__name__, \"arguments\":\" \".join(argv)})\n Logger.pushLogData(\"script\", __name__)\n\n marketName = \"\"\n\n try:\n shortOptions = \"n:dh\"\n longOptions = [\"marketName=\", \"debug\", \"help\"]\n opts, __ = getopt.getopt(argv, shortOptions, longOptions)\n except getopt.GetoptError as e:\n Logger.log(logging.ERROR, \"Error Reading Options\", {\"scope\": __name__, \"exception\": str(e)})\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-d\", \"--debug\"):\n Logger().setLevel(logging.DEBUG)\n elif opt in (\"-h\", \"--help\"):\n print(\"?\")\n usage()\n sys.exit()\n elif opt in (\"-n\", \"--marketName\"):\n marketName = arg\n\n if marketName != \"\":\n\n pyswing.database.initialiseDatabase(marketName)\n\n Logger.log(logging.INFO, \"Update Indicators\", {\"scope\":__name__, \"market\":marketName})\n\n tickerCodesRelativeFilePath = \"resources/%s.txt\" % (marketName)\n\n market = Market(tickerCodesRelativeFilePath)\n\n # Market Indicators\n adiIndicator = IndicatorADI()\n adiIndicator.updateIndicator()\n\n # Equity Indicators\n for index, row in market.tickers.iterrows():\n tickerCode = row[0]\n equity = Equity(tickerCode)\n equityDataFrame = equity.dataFrame()\n\n smaIndicator = IndicatorSMA(equityDataFrame, tickerCode)\n smaIndicator.updateIndicator()\n\n emaIndicator = IndicatorEMA(equityDataFrame, tickerCode)\n emaIndicator.updateIndicator()\n\n bbIndicator = IndicatorBB20(equityDataFrame, tickerCode)\n bbIndicator.updateIndicator()\n\n rocIndicator = IndicatorROC(equityDataFrame, tickerCode)\n rocIndicator.updateIndicator()\n\n macdIndicator = IndicatorMACD(equityDataFrame, tickerCode)\n macdIndicator.updateIndicator()\n\n stochIndicator = IndicatorSTOCH(equityDataFrame, tickerCode)\n stochIndicator.updateIndicator()\n\n rsiIndicator = IndicatorRSI(equityDataFrame, tickerCode)\n rsiIndicator.updateIndicator()\n\n adxIndicator = IndicatorADX(equityDataFrame, tickerCode)\n adxIndicator.updateIndicator()\n\n aroonIndicator = IndicatorAROON(equityDataFrame, tickerCode)\n aroonIndicator.updateIndicator()\n\n dxIndicator = IndicatorDX(equityDataFrame, tickerCode)\n dxIndicator.updateIndicator()\n\n TeamCity.setBuildResultText(\"Updated Indicators\")\n\n else:\n Logger.log(logging.ERROR, \"Missing Options\", {\"scope\": __name__, \"options\": str(argv)})\n usage()\n sys.exit(2)\n\n\ndef usage():\n print(\"\")\n print(\"usage:\")\n print(\" UpdateIndicators.py -n name [-d] [-h]\")\n print(\"\")\n print(\"arguments:\")\n print(\" -n, --name Name\")\n print(\"\")\n print(\"optional arguments:\")\n print(\" -d, --debug Change the Log Level to Debug\")\n print(\" -h, --help Display Usage Information\")\n\n\nif __name__ == \"__main__\":\n updateIndicators(sys.argv[1:])","repo_name":"garyjoy/pyswing","sub_path":"pyswing/UpdateIndicators.py","file_name":"UpdateIndicators.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14074409944","text":"from channels.auth import AuthMiddlewareStack\nfrom rest_framework_simplejwt.tokens import TokenError\nfrom rest_framework_simplejwt.tokens import AccessToken\nfrom rest_framework_simplejwt.backends import TokenBackend\nfrom django.contrib.auth.models import AnonymousUser\nfrom prosocial.models import CustomMember\nfrom channels.db import database_sync_to_async\n\n\n@database_sync_to_async\ndef get_user(valid_data):\n return CustomMember.objects.get(id=valid_data['user_id'])\n\nclass TokenAuthMiddleware:\n \"\"\"\n Custom middleware (insecure) that takes user IDs from the query string.\n \"\"\"\n\n def __init__(self, inner):\n # Store the ASGI application we were passed\n self.inner = inner\n\n def __call__(self, scope):\n return TokenAuthMiddlewareInstance(scope, self)\n \n\nclass TokenAuthMiddlewareInstance:\n \"\"\"\n Token authorization middleware for Django Channels 2\n \"\"\"\n\n def __init__(self, scope, middleware):\n self.middleware = middleware\n self.scope = dict(scope)\n self.inner = self.middleware.inner\n\n async def __call__(self, send, receive):\n headers = dict(self.scope['headers'])\n print(\"Headers\", headers)\n if b'sec-websocket-protocol' in headers:\n\n try:\n token_name, token_key = headers[b'sec-websocket-protocol'].decode().split('%space%')\n if token_name == 'Bearer':\n valid_data = TokenBackend(algorithm='HS256').decode(token_key, verify=False)\n print(valid_data['user_id'])\n # print(\"Access_token\", access_token)\n # user = CustomMember.objects.get(id=valid_data['user_id'])\n user = await get_user(valid_data)\n print(\"Token key: \", token_key)\n self.scope['user'] = user\n print(user)\n print(\"Embedded user to header\")\n except TokenError:\n self.scope['user'] = AnonymousUser()\n inner = self.inner(self.scope)\n return await inner(receive, send)\n\nTokenAuthMiddlewareStack = lambda inner: TokenAuthMiddleware(AuthMiddlewareStack(inner))","repo_name":"proptitclub/prosocial-backend","sub_path":"chat/jwt_authentication.py","file_name":"jwt_authentication.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11948917062","text":"import sys\n\nif __name__ == '__main__':\n N = int(input())\n homework = []\n check = [False] * N\n last_day = 0\n\n for _ in range(N):\n d, w = map(int, sys.stdin.readline().split())\n last_day = max(last_day, d)\n homework.append((w, d))\n homework.sort(reverse=True)\n ans = 0\n for day in range(last_day, 0, -1):\n for i in range(N):\n if check[i]: continue\n if homework[i][1] >= day:\n check[i] = True\n ans += homework[i][0]\n break\n print(ans)","repo_name":"Team-NTO/NTO","sub_path":"HyeonJinGithub/2021-03-11/13904 과제.py","file_name":"13904 과제.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"15499631123","text":"# Write your code here\nt = int(input())\n \n \ndef bellman_ford(size, edges):\n costs = [float('-inf')] * size\n costs[0] = 0\n for _ in range(size - 1):\n for u, v, w in edges:\n costs[v] = max(costs[v], costs[u] + w)\n for u, v, w in edges:\n if costs[v] < costs[u] + w:\n return True\n return False\n \n \nfor _ in range(t):\n n, m = map(int, input().strip().split())\n trade = []\n for _ in range(m):\n i, j, c = map(int, input().strip().split())\n i -= 1\n j -= 1\n trade.append((i, j, c))\n print('Yes' if bellman_ford(n, trade) else 'No')\n","repo_name":"Chiki1601/Hackerearth-Solutions","sub_path":"Algorithms/Graphs/Shortest Path Algorithms/Monk's Business Day.py","file_name":"Monk's Business Day.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"37406660104","text":"#---------------------------------------------------------------------------\n# \n#\n# Author: Anders Olson \n#\n# Usage: Requires arcpy and python 3, run as stand-alone script.\n#\n# Description: Script creates x,y,z... add description of script...\n# ---------------------------------------------------------------------------\n\nimport arcpy\nimport datetime\nfrom datetime import datetime\n\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n# ================================#\n# Define variables and environments\n# ================================#\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n\n# Set the arcpy overwriteOutput ON\narcpy.gp.overwriteOutput = True\n\n# Create output messages for arcpy\ndef outputMessage(msg):\n print(msg)\n arcpy.AddMessage(msg)\n\ndef outputError(msg):\n print(msg)\n arcpy.AddError(msg)\n\n# Start a timer\nstartTime = datetime.now()\n\n# Define some variables\nvarX = 'Some text'\nvarY = 45.2\nworkspace = r'C:\\Users\\andolson\\someGDB.gdb'\n\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n# ================================#\n# Define functions\n# ================================#\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n\n'''\nSummary: Function creates x,y,z thing...\n\nParameters:\ninputData -- \ninVar -- \n\nReturns:\noutputData -- \n\n'''\n\ndef someFunction0 (inputData, inVar, outputData):\n varSum = inputData + inVar\n outputMessage(varSum)\n return outputData\n\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n# ================================#\n# Call Functions & Run Code\n# ================================#\n##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##\n\n# Run a function\nsomeFunction0(4, 3, r'C:\\Users\\andolson')\n\n# Run some code\nx = len(varX)\n\nfor n in range(x):\n outputMessage(n)\n\n# Print timer progress\noutputMessage('Task Completed!\\nFinal run time is: {}'.format(datetime.now() - startTime))\n","repo_name":"andersolson/Python-Projects","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27110405489","text":"import datetime, random\n\ninput_words = []\nwith open(\"words.txt\") as f:\n lines = f.readlines()\n for val in lines:\n input_words.append(val)\n\ndef bubblesort(num_list):\n for i in range(len(num_list)):\n for j in range(len(num_list) - 1, i, -1):\n if (num_list[j] < num_list[j - 1]):\n swap(num_list, j, j - 1)\n\n\ndef swap(A, x, y):\n tmp = A[x]\n A[x] = A[y]\n A[y] = tmp\n\n\ndef perforamnce_bubble_sort(array_size, iterations, input_data):\n array_size_values = []\n time_values = []\n for num in range(6):\n starting_time = datetime.datetime.now()\n for val in range(iterations):\n random_array = [input_words[random.randint(0,len(input_data)-1)] for i in range(array_size)]\n bubblesort(random_array)\n finishing_time = datetime.datetime.now()\n array_size_values.append(array_size)\n time_values.append((finishing_time - starting_time).microseconds)\n print((finishing_time - starting_time).microseconds, \" microseconds taken by bubble sort to sort an array of size \", array_size , \"for iterations \", iterations)\n array_size *= 10\n return array_size_values, time_values\n\nperforamnce_bubble_sort(10,10,input_words)\n","repo_name":"samhassan20/Algorithms","sub_path":"Sorting_Algo_Compare/bubble_String.py","file_name":"bubble_String.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13511016203","text":"def gen(string):\n\twhile True:\n\t\ta = string[:5]\n\t\tb = string[5:]\n\t\tstring = yield a, b\n\n\n\ndef main():\n\tg = gen('1234567890')\n\ta, b = g.send(None)\n\tprint(a, b)\n\n\ta, b = g.send('abcdifghijk')\n\tprint(a, b)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Fire-5/py-proj","sub_path":"OLD/ttt2.py","file_name":"ttt2.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29989918540","text":"def checkAlphabet(inp):\r\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\r\n for i in inp:\r\n if i in alphabet:\r\n return i\r\n\r\ndef sortAlphabet(inp):\r\n for i in range(len(inp)):\r\n for j in range(len(inp)-i-1):\r\n if checkAlphabet(inp[j]) > checkAlphabet(inp[j+1]):\r\n inp[j], inp[j+1] = inp[j+1], inp[j]\r\ninp = input('Enter Input : ').split()\r\nsortAlphabet(inp)\r\nprint(*inp)\r\n","repo_name":"Pskmax/dataStruc","sub_path":"Python/Sorting/sortByAlphabet.py","file_name":"sortByAlphabet.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15755608685","text":"# software entityes should be open for extension but closed for modification\n\n#****************************************************\n## let's take a assumption we wants to intigrate *\n## one more payment method as paypal *\n#****************************************************\n\nfrom abc import ABC, abstractmethod\n\nclass Order:\n items = []\n quantities =[]\n prices = []\n status = 'open'\n\n def add_item(self, name, quantity, price):\n self.items.append(name)\n self.quantities.append(quantity)\n self.prices.append(price)\n\n def total_price(self):\n total = 0\n for i in range(len(self.prices)):\n total += self.quantities[i] * self.prices[i]\n return total\n\nclass PaymentProcessor(ABC):\n\n @abstractmethod\n def pay(self, order, security_code):\n pass \n\nclass DebitPaymentProcessor(PaymentProcessor):\n def pay(self, order, security_code):\n print('pricessing debit payment type')\n print(f'verifying security code:{security_code}')\n order.status = \"paid\"\n\nclass CreditPaymentProcessor(PaymentProcessor):\n def pay(self, order, security_code):\n print('pricessing credit payment type')\n print(f'verifying security code:{security_code}')\n order.status = \"paid\"\n\n\n# adding paypal method without change in any existing class \nclass PaypalPaymentProcessor(PaymentProcessor):\n def pay(self, order, security_code):\n print('pricessing Paypal payment type')\n print(f'verifying security code:{security_code}')\n order.status = \"paid\" \n\nif __name__ == \"__main__\":\n print(\"\", end=\"\\n \\n \\n\")\n print(\"bill\".center(40,\"*\"))\n order = Order()\n order.add_item(\"Keyboard\", 1, 50)\n order.add_item(\"SSD\", 1, 150)\n order.add_item(\"USB cable\", 2, 5)\n\n\n print(f\"total : {order.total_price()}\".center(40))\n processor = PaypalPaymentProcessor()\n processor.pay(order, \"123456\")\n\n print(\"\".center(40,\"*\"))\n print(\"\", end=\"\\n \\n \\n\")","repo_name":"BhuwanSingh248/System-design","sub_path":"solid_principles/order_bill/open_close.py","file_name":"open_close.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20135509733","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_GET\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage\n\nfrom .models import Question\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login\nfrom .forms import AskForm, AnswerForm, NewUserForm, LoginForm\n\nfrom django.contrib.auth.decorators import login_required\nimport logging\n\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK')\n\n@require_GET\ndef index(request, *args, **kwargs):\n\n\treturn render(request, 'question/index.html', {'questions': paginate(request, Question.objects.resent_questions()),})\n\n@require_GET\ndef popular(request, *args, **kwargs):\n\treturn render(request, 'question/index.html', {'questions': paginate(request, Question.objects.popular_questions()),})\n\n\n@require_GET\ndef question_details(request, question_id):\n\tquestion = get_object_or_404(Question, id=question_id)\n\tform = AnswerForm(initial = {'question': question_id})\n\t\n\treturn render(request, 'question/details.html', {'question': question, 'form': form})\n\n# @login_required\ndef question_add(request):\n\tif request.method == 'POST':\n\t\tform = AskForm(request.POST)\n\t\timport pdb; pdb.set_trace()\n\t\tform.instance.author = request.user\n\t\tif form.is_valid():\n\t\t\tquestion = form.save()\n\t\t\turl = question.get_absolute_url()\n\t\t\treturn HttpResponseRedirect(url)\n\telse:\n\t\tform = AskForm()\n\treturn render(request, 'question/add.html', {'form' : form})\n\ndef signup(request):\n\tif request.method == 'POST':\n\t\tlogger = logging.getLogger(__name__)\n\t\tform = NewUserForm(request.POST)\n\t\tlogger.error(str(form))\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tuser = authenticate(username=user.username, password=form.cleaned_data['password'])\n\t\t\tif user is not None:\n\t\t\t\t# залогинить нового пользователя\n\t\t\t\tlogin(request, user)\n\t\t\t\t# отправить нового пользователя на главную страницу\n\t\t\t\treturn HttpResponseRedirect('/')\n\telse:\n\t\tform = NewUserForm()\n\treturn render(request, 'user/signup.html', {'form' : form})\n\ndef login_view(request):\n\tif request.method == 'POST':\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])\n\t\t\tif user is not None:\n\t\t\t\tif user.is_active:\n\t\t\t\t\tlogin(request, user)\n\t\t\t\t\t# отправить пользователя на главную страницу\n\t\t\t\t\treturn HttpResponseRedirect('/')\n\telse:\n\t\tform = LoginForm()\n\treturn render(request, 'user/login.html', {'form' : form})\n\n\n# @login_required\ndef answer_add(request):\n\tif request.method == 'POST':\n\t\tform = AnswerForm(request.POST)\n\t\tform.instance.author = request.user\n\t\tif form.is_valid():\n\t\t\tanswer = form.save()\n\t\t\turl = answer.question.get_absolute_url()\n\t\t\treturn HttpResponseRedirect(url)\n\telse:\n\t\tform = AnswerForm()\n\treturn render(request, 'answer/add.html', {'form' : form})\n\ndef paginate(request, qs):\n\ttry:\n\t\tlimit = int(request.GET.get('limit', 10))\n\texcept ValueError:\n\t\tlimit = 10\n\tif limit > 10:\n\t\tlimit = 10\n\ttry:\n\t\tpage = int(request.GET.get('page',1))\n\texcept ValueError:\n\t\traise Http404\n\tpaginator = Paginator(qs, limit)\n\ttry:\n\t\tpage = paginator.page(page)\n\texcept EmptyPage:\n\t\tpage = paginator.page(paginator.num_pages)\n\treturn page\n","repo_name":"NikolayMarkin/StepicWebTechologyCourse","sub_path":"web/ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"69915708375","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 16:52:12 2019\nOrthogonal Collocation on Finite Elements\nHomework Exercise #2\n\nObjective: Compare orthogonal collocation on finite elements with 3 nodes \nwith a numerical integrator (e.g. ODE15s in MATLAB or ODEINT in Python). \nCalculate the error at each of the solution points for the equation \n(same as for Exercise 1) \n\n@author: rob\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom gekko import GEKKO\n\n# Initial conditions\nx0 = 0.0\ntf = 1.0\nans = np.zeros(5)\ni = 0\nt = np.linspace(0,tf,20)\nx_a = np.zeros(20)\n\nplt.figure(2, clear=True)\n\n# Analytic solution\nfor i in range(20):\n x_a[i] = x0 + 2.0*np.tanh(t[i]*2.0/5.0)\nplt.plot(t,x_a,'r--',label='Analytic')\n \n# solve with ODEINT \ndef model(x,t):\n return (-x**2 + 4.0)/5.0\n\nx_ode = odeint(model,x0,t)\nplt.plot(t,x_ode,'g.',label='ODEINT')\n\n# Solve with 3-node collocation in GEKKO\n# Create model\nm = GEKKO(remote=False)\nu = m.Param(value=4)\nx1 = m.Var()\nx2 = m.Var()\ndx1 = m.Var()\ndx2 = m.Var()\n# Equations (use N array)\nm.Equations([5 * dx1 == -x1**2 + u, \\\n 5 * dx2 == -x2**2 + u, \\\n 0.75*dx1-0.25*dx2==x1-x0, \\\n 1.00*dx1+0.0*dx2==x2-x0])\nm.options.IMODE = 1 # Non-Dynamic Simulation\nm.solve(disp=False)\nt = np.array([0.0,0.5,1.0])\nx_g = np.array([x0,x1[-1],x2[-1]])\nplt.plot(t,x_g,'ro',label='3-node Collocation')\n\nplt.legend(loc='best')\nplt.ylabel('x(t)')\nplt.xlabel('time')\n\nplt.show()","repo_name":"areed145/dyn_opt_class","sub_path":"Homework-B/colloc_ex2.py","file_name":"colloc_ex2.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"71115955734","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 24 19:08:51 2020\n\n@author: grago\n\"\"\"\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\n\niris = datasets.load_iris()\n\nX = iris[\"data\"][:, (2,3)]\ny = (iris[\"target\"] == 2).astype(np.float64)\n\nsvm_clf = Pipeline([\n (\"scaler\", StandardScaler()),\n (\"linear_svc\", LinearSVC(C = 1, loss = \"hinge\")),\n ])\nsvm_clf.fit(X,y)\nprint(svm_clf.predict([[5.5,1.7]]))\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\nX,y = datasets.make_moons(n_samples = 100, noise = 0.15, random_state = 42)\n\npolynomial_svc_clf = Pipeline([\n (\"poly_features\", PolynomialFeatures(degree = 3)),\n (\"scaler\", StandardScaler()),\n (\"svm_clf\", LinearSVC(C = 10, loss = \"hinge\", max_iter= 2000))\n ])\n\npolynomial_svc_clf.fit(X,y)\n\nfrom sklearn.svm import SVC\npoly_kernel_svm_clf = Pipeline([\n (\"scaler\", StandardScaler())\n (\"svm_clf\", SVC(kernel = \"poly\", degeree = 3, coef = 1, C =5))\n ])\npoly_kernel_svm_clf.fit(X,y)\n\nrbf_kernel_svm_clf = Pipeline([\n (\"scaler\", StandardScaler()),\n (\"svm_clf\", SVC(kernel = \"rbf\", gamma = 5, C =0.001))\n ])\nrbf_kernel_svm_clf.fit(X,y)\n","repo_name":"ihatebroccoli/HandsOnMachineLearning","sub_path":"SVM - day4.py","file_name":"SVM - day4.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13575433707","text":"#!/usr/bin/env python\nimport getopt, sys, os\nimport numpy as np\nimport pyfits\nfrom pylab import matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes\nfrom mpl_toolkits.axes_grid.inset_locator import mark_inset\n\n#fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits'\nfname_ext = sys.argv[1]\nfname = fname_ext.split('.')[0]\nout_fname = fname + '.png'\nprint('displaying ' + fname)\ntitle_str = fname.split(os.sep)[-1]\nt = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T)\nfig = plt.figure(1, [5,4])\nax = fig.add_subplot(111)\n\n#imshow(t , interpolation=\"nearest\")\n#imshow((t - t.min())) ** .25, interpolation=\"nearest\")\ntt = t ** .25\ntt[np.isnan(tt)] = 0\nextent = [0., 192., 0., 192.]\nax.imshow(tt, extent=extent, interpolation=\"nearest\")\n\ntzoom = tt[135:155, 80:100,]\naxins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6\nextent = [80., 100., 192. - 155., 192. - 135, ]\nim = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\")\nim.set_clim([tt.min(), tt.max()])\nplt.xticks(visible=False)\nplt.yticks(visible=False)\n#x1, x2, y1, y2 = 80., 100., 135., 155.,\n#axins.set_xlim(x1, x2)\n#axins.set_ylim(y1, y2)\nmark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")\n\n#plt.title(title_str)\n#plt.colorbar()\n#plt.xlabel('Right Ascension')\n#plt.ylabel('Declination')\nplt.show()\nfig.savefig(out_fname)\n","repo_name":"nbarbey/csh","sub_path":"tests/fits2png_zoom.py","file_name":"fits2png_zoom.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"27062509969","text":"\nfrom random import random\nfrom sqlite3 import Cursor\nfrom mock_data import catalog\nfrom flask import Flask, abort, request\nfrom about_me import me \nimport json \nfrom flask_cors import CORS\nfrom config import db\nfrom bson import ObjectId\n\n#create the server/app\napp = Flask(\"server\")\nCORS(app)\n\n@app.route(\"/\", methods=[\"get\"])\ndef home_page():\n return \"Under Construction\"\n\n@app.route(\"/about\")\ndef about_me():\n return \"Nicholas Lucien\"\n\n@app.route(\"/test\")\ndef test():\n return \"Simple Test\"\n\n@app.route(\"/myaddress\")\ndef get_address():\n address = me[\"address\"]\n # return address[\"street\"]\n return f\"{address['street']} {address['city']}\"\n\n#start the server \n\n@app.route(\"/api/catalog\")\ndef get_catalog():\n\n cursor = db.products.find({})\n results = []\n for prod in cursor:\n results.append(prod)\n prod[\"_id\"] = str(prod[\"_id\"])\n \n\n return json.dumps(results)\n \n\n@app.route(\"/api/catalog\", methods=[\"Post\"])\ndef save_product():\n product = request.get_json() # read the payload as a dictionary from json string \n\n # validate \n # title and longer than 5 chars\n if not \"title\" in product or len(product[\"title\"]) < 5:\n return abort(400, \"There should be title. Title should be atr least 5 char longs\")\n \n # should have a price\n if not \"price\" in product:\n return abort(400, \"Price is required\")\n\n # if the price is not and int and not a float, error \n if not isinstance(product[\"price\"], int) and not isinstance(product[\"price\"], float):\n return abort(400, \"It is not a price\")\n\n # the price should be greater than zero \n if product[\"price\"] < 0: \n return abort(400, \"Can not be less that zero\")\n \n db.products.insert_one(product)\n\n\n #hack to fix the _id \n product[\"_id\"] = str(product[\"_id\"])\n return json.dumps(product) \n\n#get /api/catalog/count\n@app.route(\"/api/catalog/count\")\ndef get_catalogcount(): \n cursor = db.products.find({}) \n count = len(cursor)\n for prod in cursor:\n count += 1\n return json.dumps(cursor.count())\n\n@app.route(\"/api/catalog/sum\")\ndef get_sum():\n total = 0\n for prod in catalog:\n total += prod[\"price\"]\n\n res = \"$\" + str(total)\n return json.dumps(res)\n\n@app.route(\"/api/product/\")\ndef get_product(id):\n prod = db.products.find_one({\"_id\": ObjectId(id)})\n if not ObjectId.is_valid(id):\n return abort(400, \"id is not a valid ObjectId\")\n if not prod:\n return abort(404, \"Product not found\")\n\n prod[\"_id\"] = str(prod[\"_id\"])\n return json.dumps(prod)\n \n #return abort(404) # 400 not found\n\n@app.route(\"/api/product/most_expensive\")\ndef get_most_expensive():\n pivot = catalog[0]\n cursor = db.products.find({})\n for prod in cursor:\n if prod[\"price\"] > pivot[\"price\"]:\n pivot = prod\n\n\n pivot[\"_id\"] = str(pivot[\"_id\"])\n return json.dumps(pivot)\n\n# get /api/categories\n# return a list of strings, representing the UNIQUE categories \n\n@app.route(\"/api/categories\")\ndef get_categories():\n\n res = []\n cursor = db.products.find({})\n # 2 - print each category \n for prod in cursor:\n category = prod[\"category\"]\n if not category in res:\n res.append(category)\n return json.dumps(res)\n\n\n@app.route(\"/api/catalog/\")\ndef get_allproducts(category):\n res=[]\n cursor = db.products.find({\"category\": category})\n for prod in cursor:\n prod[\"_id\"] = str(prod[\"_id\"])\n res.append(prod)\n\n return json.dumps(res)\n\ncoupons = []\n#API Methods for Coupon Code\n#code: Nick \n# get all get /api/coupons \n#save new post /api/coupons\n#get by code get /api/coupons/\n\n@app.route(\"/api/coupons\")\ndef get_coupons():\n cursor = db.discounts.find({})\n results = []\n for prod in cursor:\n prod[\"_id\"] = str(prod[\"_id\"])\n return json.dumps(results)\n\n@app.route(\"/api/coupons\", methods=[\"Post\"])\ndef save_coupons():\n coupon = request.get_json()\n\n db.coupons.insert_one(coupon)\n\n coupon[\"_id\"] = str(coupon[\"_id\"])\n return json.dumps(coupon)\n\n@app.route(\"/api/coupon/\")\ndef get_couponsbycode(code):\n coupon = db.products.find({\"code\": code})\n if not coupon:\n return abort(404, 'Coupon not found for code: ' + code)\n coupon[\"_id\"] = str(coupon['_id'])\n return json.dumps(coupon)\n \n\n@app.route(\"/api/saveOrders\", methods = [\"Post\"])\ndef save_orders():\n order = request.get_json()\n\n db.orders.insert_one(order)\n\n order[\"_id\"] = str(order[\"_id\"])\n return json.dumps(order)\n\n\n@app.route(\"/api/retrieveOrders\")\ndef get_orders():\n cursor = db.orders.find({})\n results = []\n for order in cursor:\n order[\"_id\"] = str(order[\"_id\"])\n return json.dumps(results)\n\n\n@app.route(\"/api/retrieveOrders/\")\ndef get_ordersbyId(user_id):\n order = db.orders.find_one({\"user_id\": user_id})\n if not order:\n return abort(404, \"Not correct ID\")\n order[\"_id\"] = str(order['_id'])\n return json.dumps(order)\n\n#start the server \napp.run(debug=True)","repo_name":"nick3325/Unit-108-Assignment","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18951291032","text":"f = open('transform-the-array.txt')\ndef input():\n return f.readline()\n\nT = int(input().strip())\nfor t in range(T):\n N = int(input().strip())\n numbers = [int(x) for x in input().strip().split(' ')]\n\n i1 = 0\n while True:\n while i1 < N - 1 and numbers[i1] == 0 :\n i1 += 1\n\n if i1 == N - 1:\n break\n\n i2 = i1 + 1\n\n while i2 < N and numbers[i2] == 0 :\n i2 += 1\n\n if i2 == N:\n break\n\n if numbers[i1] == numbers[i2]:\n numbers[i1] *= 2\n numbers[i2] = 0\n else:\n i1 += 1\n i2 += 1\n\n\n i1 = 0\n while i1 < N and numbers[i1] != 0:\n i1 += 1\n\n i2 = i1\n while True:\n while i1 < N and numbers[i1] == 0:\n i1 += 1\n if i1 == N:\n break\n\n numbers[i2] = numbers[i1]\n numbers[i1] = 0\n i2 += 1\n\n print(numbers)\n\n","repo_name":"chvjak/hr-practice","sub_path":"transform-the-array.py","file_name":"transform-the-array.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27165324529","text":"import sys\n\ndef read_input(file):\n yield file.split()\n\ndef main(s, separator='\\t'):\n data = read_input(s)\n file = open('file1.txt', 'w')\n\n for words in data:\n for word in words:\n file.write('%s%s%d\\n' % (word, separator, 1))\n print('%s%s%d' % (word, separator, 1))\n\n file.close()\n\n\ns = \"abc ddd hello abc yyy def hello\"\nif __name__ == \"__main__\":\n main(s)\n","repo_name":"diana-tym/DevOps-course","sub_path":"mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40606587408","text":"'''\n子集\n\n给你一个整数数组 nums ,数组中的元素 互不相同 。返回该数组所有可能的子集(幂集)。\n\n解集 不能 包含重复的子集。你可以按 任意顺序 返回解集。\n'''\n\nimport copy\n#dfs+ always append\ndef subsets(nums):\n\tresult=[]\n\tfindsubset(nums,0,[],result)\n\treturn result\n\ndef findsubset(nums,start,path,result):\n\tresult.append(copy.copy(path))\n\t\n\tif start==len(nums):\n\t\treturn\n\n\t#dfs\n\tfor idx in range(start,len(nums)):\n\t\tpath=path+[nums[idx]]\n\t\tfindsubset(nums,idx+1,path,result)\n\t\tpath.pop()#撤销\n\n\n#扩展法\ndef subsets2(nums):\n\tresult=[[]]\n\tfor i in range(len(nums)):\n\t\tresult+=[sub+[nums[i]] for sub in result]\n\treturn result\n\n\ndef subsets3(nums):\n\tresult=[[]]\n\t#for every possible end, we need backtrack\n\tfor end in range(1,len(nums)+1):\n\t\tfindsubset3(nums,0,end,[],result)\n\treturn result\n\n\n#backtracking + 剪枝\ndef findsubset3(nums,start,end,path,result):\n\tif start==end:#this is 剪枝of backtracking\n\t\tresult.append(copy.copy(path))\n\t\treturn\n\n\tfor i in range(start,len(nums)):\n\t\tpath=path+[nums[i]]\n\t\tfindsubset3(nums,i+1,end,path,result)\n\t\tpath.pop()\n\nif __name__==\"__main__\":\n\tnums=[1,2,3]\n\tout=subsets(nums)\n\tprint('dfs all subsets ',out)\n\n\tout=subsets2(nums)\n\tprint('expanding all subsets2 ',out)\n\n\tout=subsets3(nums)\n\tprint('backtracking all subsets3 ',out)\n","repo_name":"pecanjk/leetcode_solution","sub_path":"78_subsets.py","file_name":"78_subsets.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27988270706","text":"# -*- coding: utf-8 -*-\n# Bibliotecas para o modelo\nimport tensorflow as tf\nimport keras_preprocessing\nfrom keras_preprocessing import image\nfrom keras_preprocessing.image import ImageDataGenerator\nimport pickle\nimport numpy as np\n\ndef treino():\n diretorio_raiz = 'data/dados_tratados'\n\n # Defina os diretórios de treinamento e validação\n train_path = diretorio_raiz+'/train_path'\n validation_path = diretorio_raiz+'/validation_path'\n\n TRAINING_DIR = train_path\n training_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n\n VALIDATION_DIR = validation_path\n validation_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = training_datagen.flow_from_directory(\n TRAINING_DIR,\n target_size=(96, 96),\n class_mode='categorical',\n batch_size=126\n )\n\n validation_generator = validation_datagen.flow_from_directory(\n VALIDATION_DIR,\n target_size=(96, 96),\n class_mode='categorical',\n batch_size=126\n )\n\n model = tf.keras.models.Sequential([\n # Note the input shape is the desired size of the image 96x96 with 3 bytes color\n # This is the first convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(96, 96, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The second convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The third convolution\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The fourth convolution\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Flatten the results to feed into a DNN\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n # 512 neuron hidden layer\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(17, activation='softmax')\n ])\n\n summary = model.summary()\n\n score = model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop', metrics=['accuracy'])\n \n history = model.fit(train_generator, epochs=25, steps_per_epoch=20, \n validation_data = validation_generator, verbose = 1, validation_steps=3)\n \n with open(\"models/toxic_to_pet.pkl\", \"wb\") as f:\n pickle.dump(model, f)\n\n return score, history, summary\n\nif __name__ == \"__main__\":\n treino()\n\n\n\n\n \n","repo_name":"thiagosilva85/Sistemas_Inteligentes","sub_path":"app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1752864938","text":"import cv2\nimg = cv2.imread('lena.bmp')\n\nh, w, _ = img.shape\n\noutput = cv2.resize(img, (int(h/2), int(w/2)))\n\ncv2.imshow('Result', output)\ncv2.waitKey(0) \ncv2.destroyAllWindows()\n\ncv2.imwrite('e.png', output)","repo_name":"isbecky27/NTU-Computer-Vision","sub_path":"HW01/Code/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3961247196","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nerror_msg = \"Connection timed out\"\nunwanted_log_file = ['/var/log/sssd/sssd.log',\n '/var/log/sssd/ldap_child.log', '/var/log/sssd/sssd_implicit_files.log', '/var/log/sssd/sssd_nss.log', '/var/log/sssd/sssd_pam.log']\nfor sssd_log_file in glob.glob(\"/var/log/sssd/*\"):\n if sssd_log_file not in unwanted_log_file:\n # Open log_file for reading purpose\n fo = open(sssd_log_file)\n # Read the first line from the file\n line = fo.readline()\n # Initialize counter for line number\n line_no = 1\n # Loop until EOF\n while line != '' :\n #search for sdap_async_sys_connect request failed/Connection timed out\n index = line.find(error_msg)\n if ( index != -1) :\n #print(\"[\", line_no, \",\", index, \"] \", line, sep=\"\")\n print(line)\n #read the next line\n line = fo.readline()\n #increment line_counter\n line_no += 1\n #close the files\n fo.close()\n","repo_name":"Roy214/Python","sub_path":"check_timeout.py","file_name":"check_timeout.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35206202524","text":"from dash import dcc, html, Input, Output, no_update, dash_table, callback\nimport dash\nimport plotly.express as px\nimport pandas as pd\nimport numpy as np\nfrom . import Pre_processing\nfrom sklearn.linear_model import LinearRegression\nimport plotly.graph_objects as go\nimport itertools\nimport dash_bootstrap_components as dbc\n# from dash_table.Format import Format, Scheme\nfrom plotly.subplots import make_subplots\n\n\ndfString = Pre_processing.dfString\n\nstringency_parameters = ['Facial Coverings', 'Testing Policy',\n 'Income Support', #'public_information_campaigns',\n 'Cancel Public Events'\n ,'Close Public Transport', 'Stay Home Requirements', 'Workplace Closures' \n ,'Vaccination Policy']\n\ny_axis_parameters = ['New Cases Per Million', 'New Deaths Per Million']\n # \"total_vaccinations_per_hundred\",'total_tests_per_thousand',\n # 'hosp_patients_per_million', 'stringency_index']\n \ndash.register_page(__name__,\n relative_path='/Polynomial_Regression',\n title='Polynomial Regression',\n name='Polynomial Regression')\n \n##################################################################################################### functions:\n \ndef rss(y, y_hat):\n RSS = sum(np.power((y - y_hat), 2))\n return RSS\n\n\n# Calculate Mean Absolute error based on observed (sampled) and model-predicted life expectancy\ndef MAE(observed_life, predicted_life):\n n = len(observed_life)\n differences = observed_life - predicted_life\n absolute_differences = differences.abs()\n return absolute_differences.sum() / n\n \n\n\n# function that generates feature matrix containing polynomial with given powers of a single input attribute\ndef create_feature_matrix(input_attr_col, powers):\n df = pd.DataFrame()\n for i in powers:\n col = \"h\" + str(i)\n df[col] = np.power(input_attr_col, i)\n\n return df\n\n# Prediction is a dot product of feature matrix and weight vector\ndef predict(input_features, weights):\n return input_features.dot(weights)\n\n# For printing ease\ndef poly_print(weights):\n print(np.poly1d(np.flip(weights, 0)))\n print(\"\")\n \n# Find which attributes have non-zero weights, print out their names and weights:\ndef print_nonzero_weights(all_features, lasso_w):\n i = 0\n result = {}\n for x in all_features:\n if (lasso_w[i] == 0):\n pass\n else:\n result[all_features[i]] = lasso_w[i]\n i = i+1\n \n return result\n\n############################################################################################ \n\nlayout = html.Div([\n dbc.Row([\n dbc.Row([\n html.Div(['In this part, we use Polynomial Regression to find the effects of each Stringency Policy individually on the new deaths and new cases in different countries.'],\n style={'font-size': '100%', 'background-color':'#d9ead3'}),\n\n html.Div(['The best polynomial model from linear to 8th grade is found and shown in the graph below. You can choose to include the points in the graph or remove them.'],\n style={'font-size': '100%', 'background-color':'#d9ead3'}),\n \n ]), \n \n dbc.Row(html.Br()),\n \n dbc.Row([\n dbc.Col([\n html.Div(\"Choose if you want to see the data points in the graph or not:\"),\n ], width = {\"size\": 4}),\n \n dbc.Col([\n dcc.RadioItems( [\"dont include points\", \"include points\"], \"dont include points\", id='include_points', \n labelStyle={'background':'#A5D6A7', # style of the